1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/LLVMContext.h" 116 #include "llvm/IR/Metadata.h" 117 #include "llvm/IR/Module.h" 118 #include "llvm/IR/Operator.h" 119 #include "llvm/IR/PatternMatch.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 202 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 203 cl::desc("The maximum allowed number of runtime memory checks with a " 204 "vectorize(enable) pragma.")); 205 206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 207 // that predication is preferred, and this lists all options. I.e., the 208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 209 // and predicate the instructions accordingly. If tail-folding fails, there are 210 // different fallback strategies depending on these values: 211 namespace PreferPredicateTy { 212 enum Option { 213 ScalarEpilogue = 0, 214 PredicateElseScalarEpilogue, 215 PredicateOrDontVectorize 216 }; 217 } // namespace PreferPredicateTy 218 219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 220 "prefer-predicate-over-epilogue", 221 cl::init(PreferPredicateTy::ScalarEpilogue), 222 cl::Hidden, 223 cl::desc("Tail-folding and predication preferences over creating a scalar " 224 "epilogue loop."), 225 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 226 "scalar-epilogue", 227 "Don't tail-predicate loops, create scalar epilogue"), 228 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 229 "predicate-else-scalar-epilogue", 230 "prefer tail-folding, create scalar epilogue if tail " 231 "folding fails."), 232 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 233 "predicate-dont-vectorize", 234 "prefers tail-folding, don't attempt vectorization if " 235 "tail-folding fails."))); 236 237 static cl::opt<bool> MaximizeBandwidth( 238 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 239 cl::desc("Maximize bandwidth when selecting vectorization factor which " 240 "will be determined by the smallest type in loop.")); 241 242 static cl::opt<bool> EnableInterleavedMemAccesses( 243 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 244 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 245 246 /// An interleave-group may need masking if it resides in a block that needs 247 /// predication, or in order to mask away gaps. 248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 249 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 250 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 251 252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 253 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 254 cl::desc("We don't interleave loops with a estimated constant trip count " 255 "below this number")); 256 257 static cl::opt<unsigned> ForceTargetNumScalarRegs( 258 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 259 cl::desc("A flag that overrides the target's number of scalar registers.")); 260 261 static cl::opt<unsigned> ForceTargetNumVectorRegs( 262 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 263 cl::desc("A flag that overrides the target's number of vector registers.")); 264 265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 266 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "scalar loops.")); 269 270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 271 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's max interleave factor for " 273 "vectorized loops.")); 274 275 static cl::opt<unsigned> ForceTargetInstructionCost( 276 "force-target-instruction-cost", cl::init(0), cl::Hidden, 277 cl::desc("A flag that overrides the target's expected cost for " 278 "an instruction to a single constant value. Mostly " 279 "useful for getting consistent testing.")); 280 281 static cl::opt<bool> ForceTargetSupportsScalableVectors( 282 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 283 cl::desc( 284 "Pretend that scalable vectors are supported, even if the target does " 285 "not support them. This flag should only be used for testing.")); 286 287 static cl::opt<unsigned> SmallLoopCost( 288 "small-loop-cost", cl::init(20), cl::Hidden, 289 cl::desc( 290 "The cost of a loop that is considered 'small' by the interleaver.")); 291 292 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 293 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 294 cl::desc("Enable the use of the block frequency analysis to access PGO " 295 "heuristics minimizing code growth in cold regions and being more " 296 "aggressive in hot regions.")); 297 298 // Runtime interleave loops for load/store throughput. 299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 300 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 301 cl::desc( 302 "Enable runtime interleaving until load/store ports are saturated")); 303 304 /// Interleave small loops with scalar reductions. 305 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 306 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 307 cl::desc("Enable interleaving for loops with small iteration counts that " 308 "contain scalar reductions to expose ILP.")); 309 310 /// The number of stores in a loop that are allowed to need predication. 311 static cl::opt<unsigned> NumberOfStoresToPredicate( 312 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 313 cl::desc("Max number of stores to be predicated behind an if.")); 314 315 static cl::opt<bool> EnableIndVarRegisterHeur( 316 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 317 cl::desc("Count the induction variable only once when interleaving")); 318 319 static cl::opt<bool> EnableCondStoresVectorization( 320 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 321 cl::desc("Enable if predication of stores during vectorization.")); 322 323 static cl::opt<unsigned> MaxNestedScalarReductionIC( 324 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 325 cl::desc("The maximum interleave count to use when interleaving a scalar " 326 "reduction in a nested loop.")); 327 328 static cl::opt<bool> 329 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 330 cl::Hidden, 331 cl::desc("Prefer in-loop vector reductions, " 332 "overriding the targets preference.")); 333 334 static cl::opt<bool> ForceOrderedReductions( 335 "force-ordered-reductions", cl::init(false), cl::Hidden, 336 cl::desc("Enable the vectorisation of loops with in-order (strict) " 337 "FP reductions")); 338 339 static cl::opt<bool> PreferPredicatedReductionSelect( 340 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 341 cl::desc( 342 "Prefer predicating a reduction operation over an after loop select.")); 343 344 cl::opt<bool> EnableVPlanNativePath( 345 "enable-vplan-native-path", cl::init(false), cl::Hidden, 346 cl::desc("Enable VPlan-native vectorization path with " 347 "support for outer loop vectorization.")); 348 349 // FIXME: Remove this switch once we have divergence analysis. Currently we 350 // assume divergent non-backedge branches when this switch is true. 351 cl::opt<bool> EnableVPlanPredication( 352 "enable-vplan-predication", cl::init(false), cl::Hidden, 353 cl::desc("Enable VPlan-native vectorization path predicator with " 354 "support for outer loop vectorization.")); 355 356 // This flag enables the stress testing of the VPlan H-CFG construction in the 357 // VPlan-native vectorization path. It must be used in conjuction with 358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 359 // verification of the H-CFGs built. 360 static cl::opt<bool> VPlanBuildStressTest( 361 "vplan-build-stress-test", cl::init(false), cl::Hidden, 362 cl::desc( 363 "Build VPlan for every supported loop nest in the function and bail " 364 "out right after the build (stress test the VPlan H-CFG construction " 365 "in the VPlan-native vectorization path).")); 366 367 cl::opt<bool> llvm::EnableLoopInterleaving( 368 "interleave-loops", cl::init(true), cl::Hidden, 369 cl::desc("Enable loop interleaving in Loop vectorization passes")); 370 cl::opt<bool> llvm::EnableLoopVectorization( 371 "vectorize-loops", cl::init(true), cl::Hidden, 372 cl::desc("Run the Loop vectorization passes")); 373 374 cl::opt<bool> PrintVPlansInDotFormat( 375 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 376 cl::desc("Use dot format instead of plain text when dumping VPlans")); 377 378 /// A helper function that returns true if the given type is irregular. The 379 /// type is irregular if its allocated size doesn't equal the store size of an 380 /// element of the corresponding vector type. 381 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 382 // Determine if an array of N elements of type Ty is "bitcast compatible" 383 // with a <N x Ty> vector. 384 // This is only true if there is no padding between the array elements. 385 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 386 } 387 388 /// A helper function that returns the reciprocal of the block probability of 389 /// predicated blocks. If we return X, we are assuming the predicated block 390 /// will execute once for every X iterations of the loop header. 391 /// 392 /// TODO: We should use actual block probability here, if available. Currently, 393 /// we always assume predicated blocks have a 50% chance of executing. 394 static unsigned getReciprocalPredBlockProb() { return 2; } 395 396 /// A helper function that returns an integer or floating-point constant with 397 /// value C. 398 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 399 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 400 : ConstantFP::get(Ty, C); 401 } 402 403 /// Returns "best known" trip count for the specified loop \p L as defined by 404 /// the following procedure: 405 /// 1) Returns exact trip count if it is known. 406 /// 2) Returns expected trip count according to profile data if any. 407 /// 3) Returns upper bound estimate if it is known. 408 /// 4) Returns None if all of the above failed. 409 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 410 // Check if exact trip count is known. 411 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 412 return ExpectedTC; 413 414 // Check if there is an expected trip count available from profile data. 415 if (LoopVectorizeWithBlockFrequency) 416 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 417 return EstimatedTC; 418 419 // Check if upper bound estimate is known. 420 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 421 return ExpectedTC; 422 423 return None; 424 } 425 426 // Forward declare GeneratedRTChecks. 427 class GeneratedRTChecks; 428 429 namespace llvm { 430 431 /// InnerLoopVectorizer vectorizes loops which contain only one basic 432 /// block to a specified vectorization factor (VF). 433 /// This class performs the widening of scalars into vectors, or multiple 434 /// scalars. This class also implements the following features: 435 /// * It inserts an epilogue loop for handling loops that don't have iteration 436 /// counts that are known to be a multiple of the vectorization factor. 437 /// * It handles the code generation for reduction variables. 438 /// * Scalarization (implementation using scalars) of un-vectorizable 439 /// instructions. 440 /// InnerLoopVectorizer does not perform any vectorization-legality 441 /// checks, and relies on the caller to check for the different legality 442 /// aspects. The InnerLoopVectorizer relies on the 443 /// LoopVectorizationLegality class to provide information about the induction 444 /// and reduction variables that were found to a given vectorization factor. 445 class InnerLoopVectorizer { 446 public: 447 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 448 LoopInfo *LI, DominatorTree *DT, 449 const TargetLibraryInfo *TLI, 450 const TargetTransformInfo *TTI, AssumptionCache *AC, 451 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 452 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 453 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 454 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 455 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 456 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 457 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 458 PSI(PSI), RTChecks(RTChecks) { 459 // Query this against the original loop and save it here because the profile 460 // of the original loop header may change as the transformation happens. 461 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 462 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 463 } 464 465 virtual ~InnerLoopVectorizer() = default; 466 467 /// Create a new empty loop that will contain vectorized instructions later 468 /// on, while the old loop will be used as the scalar remainder. Control flow 469 /// is generated around the vectorized (and scalar epilogue) loops consisting 470 /// of various checks and bypasses. Return the pre-header block of the new 471 /// loop. 472 /// In the case of epilogue vectorization, this function is overriden to 473 /// handle the more complex control flow around the loops. 474 virtual BasicBlock *createVectorizedLoopSkeleton(); 475 476 /// Widen a single call instruction within the innermost loop. 477 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 478 VPTransformState &State); 479 480 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 481 void fixVectorizedLoop(VPTransformState &State); 482 483 // Return true if any runtime check is added. 484 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 485 486 /// A type for vectorized values in the new loop. Each value from the 487 /// original loop, when vectorized, is represented by UF vector values in the 488 /// new unrolled loop, where UF is the unroll factor. 489 using VectorParts = SmallVector<Value *, 2>; 490 491 /// Vectorize a single first-order recurrence or pointer induction PHINode in 492 /// a block. This method handles the induction variable canonicalization. It 493 /// supports both VF = 1 for unrolled loops and arbitrary length vectors. 494 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 495 VPTransformState &State); 496 497 /// A helper function to scalarize a single Instruction in the innermost loop. 498 /// Generates a sequence of scalar instances for each lane between \p MinLane 499 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 500 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p 501 /// Instr's operands. 502 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe, 503 const VPIteration &Instance, bool IfPredicateInstr, 504 VPTransformState &State); 505 506 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 507 /// is provided, the integer induction variable will first be truncated to 508 /// the corresponding type. 509 void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, 510 VPValue *Def, VPValue *CastDef, 511 VPTransformState &State); 512 513 /// Construct the vector value of a scalarized value \p V one lane at a time. 514 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 515 VPTransformState &State); 516 517 /// Try to vectorize interleaved access group \p Group with the base address 518 /// given in \p Addr, optionally masking the vector operations if \p 519 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 520 /// values in the vectorized loop. 521 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 522 ArrayRef<VPValue *> VPDefs, 523 VPTransformState &State, VPValue *Addr, 524 ArrayRef<VPValue *> StoredValues, 525 VPValue *BlockInMask = nullptr); 526 527 /// Set the debug location in the builder \p Ptr using the debug location in 528 /// \p V. If \p Ptr is None then it uses the class member's Builder. 529 void setDebugLocFromInst(const Value *V, 530 Optional<IRBuilder<> *> CustomBuilder = None); 531 532 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 533 void fixNonInductionPHIs(VPTransformState &State); 534 535 /// Returns true if the reordering of FP operations is not allowed, but we are 536 /// able to vectorize with strict in-order reductions for the given RdxDesc. 537 bool useOrderedReductions(RecurrenceDescriptor &RdxDesc); 538 539 /// Create a broadcast instruction. This method generates a broadcast 540 /// instruction (shuffle) for loop invariant values and for the induction 541 /// value. If this is the induction variable then we extend it to N, N+1, ... 542 /// this is needed because each iteration in the loop corresponds to a SIMD 543 /// element. 544 virtual Value *getBroadcastInstrs(Value *V); 545 546 /// Add metadata from one instruction to another. 547 /// 548 /// This includes both the original MDs from \p From and additional ones (\see 549 /// addNewMetadata). Use this for *newly created* instructions in the vector 550 /// loop. 551 void addMetadata(Instruction *To, Instruction *From); 552 553 /// Similar to the previous function but it adds the metadata to a 554 /// vector of instructions. 555 void addMetadata(ArrayRef<Value *> To, Instruction *From); 556 557 protected: 558 friend class LoopVectorizationPlanner; 559 560 /// A small list of PHINodes. 561 using PhiVector = SmallVector<PHINode *, 4>; 562 563 /// A type for scalarized values in the new loop. Each value from the 564 /// original loop, when scalarized, is represented by UF x VF scalar values 565 /// in the new unrolled loop, where UF is the unroll factor and VF is the 566 /// vectorization factor. 567 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 568 569 /// Set up the values of the IVs correctly when exiting the vector loop. 570 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 571 Value *CountRoundDown, Value *EndValue, 572 BasicBlock *MiddleBlock); 573 574 /// Create a new induction variable inside L. 575 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 576 Value *Step, Instruction *DL); 577 578 /// Handle all cross-iteration phis in the header. 579 void fixCrossIterationPHIs(VPTransformState &State); 580 581 /// Create the exit value of first order recurrences in the middle block and 582 /// update their users. 583 void fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, VPTransformState &State); 584 585 /// Create code for the loop exit value of the reduction. 586 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 587 588 /// Clear NSW/NUW flags from reduction instructions if necessary. 589 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 590 VPTransformState &State); 591 592 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 593 /// means we need to add the appropriate incoming value from the middle 594 /// block as exiting edges from the scalar epilogue loop (if present) are 595 /// already in place, and we exit the vector loop exclusively to the middle 596 /// block. 597 void fixLCSSAPHIs(VPTransformState &State); 598 599 /// Iteratively sink the scalarized operands of a predicated instruction into 600 /// the block that was created for it. 601 void sinkScalarOperands(Instruction *PredInst); 602 603 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 604 /// represented as. 605 void truncateToMinimalBitwidths(VPTransformState &State); 606 607 /// This function adds 608 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 609 /// to each vector element of Val. The sequence starts at StartIndex. 610 /// \p Opcode is relevant for FP induction variable. 611 virtual Value * 612 getStepVector(Value *Val, Value *StartIdx, Value *Step, 613 Instruction::BinaryOps Opcode = Instruction::BinaryOpsEnd); 614 615 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 616 /// variable on which to base the steps, \p Step is the size of the step, and 617 /// \p EntryVal is the value from the original loop that maps to the steps. 618 /// Note that \p EntryVal doesn't have to be an induction variable - it 619 /// can also be a truncate instruction. 620 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 621 const InductionDescriptor &ID, VPValue *Def, 622 VPValue *CastDef, VPTransformState &State); 623 624 /// Create a vector induction phi node based on an existing scalar one. \p 625 /// EntryVal is the value from the original loop that maps to the vector phi 626 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 627 /// truncate instruction, instead of widening the original IV, we widen a 628 /// version of the IV truncated to \p EntryVal's type. 629 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 630 Value *Step, Value *Start, 631 Instruction *EntryVal, VPValue *Def, 632 VPValue *CastDef, 633 VPTransformState &State); 634 635 /// Returns true if an instruction \p I should be scalarized instead of 636 /// vectorized for the chosen vectorization factor. 637 bool shouldScalarizeInstruction(Instruction *I) const; 638 639 /// Returns true if we should generate a scalar version of \p IV. 640 bool needsScalarInduction(Instruction *IV) const; 641 642 /// If there is a cast involved in the induction variable \p ID, which should 643 /// be ignored in the vectorized loop body, this function records the 644 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 645 /// cast. We had already proved that the casted Phi is equal to the uncasted 646 /// Phi in the vectorized loop (under a runtime guard), and therefore 647 /// there is no need to vectorize the cast - the same value can be used in the 648 /// vector loop for both the Phi and the cast. 649 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 650 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 651 /// 652 /// \p EntryVal is the value from the original loop that maps to the vector 653 /// phi node and is used to distinguish what is the IV currently being 654 /// processed - original one (if \p EntryVal is a phi corresponding to the 655 /// original IV) or the "newly-created" one based on the proof mentioned above 656 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 657 /// latter case \p EntryVal is a TruncInst and we must not record anything for 658 /// that IV, but it's error-prone to expect callers of this routine to care 659 /// about that, hence this explicit parameter. 660 void recordVectorLoopValueForInductionCast( 661 const InductionDescriptor &ID, const Instruction *EntryVal, 662 Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, 663 unsigned Part, unsigned Lane = UINT_MAX); 664 665 /// Generate a shuffle sequence that will reverse the vector Vec. 666 virtual Value *reverseVector(Value *Vec); 667 668 /// Returns (and creates if needed) the original loop trip count. 669 Value *getOrCreateTripCount(Loop *NewLoop); 670 671 /// Returns (and creates if needed) the trip count of the widened loop. 672 Value *getOrCreateVectorTripCount(Loop *NewLoop); 673 674 /// Returns a bitcasted value to the requested vector type. 675 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 676 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 677 const DataLayout &DL); 678 679 /// Emit a bypass check to see if the vector trip count is zero, including if 680 /// it overflows. 681 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 682 683 /// Emit a bypass check to see if all of the SCEV assumptions we've 684 /// had to make are correct. Returns the block containing the checks or 685 /// nullptr if no checks have been added. 686 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); 687 688 /// Emit bypass checks to check any memory assumptions we may have made. 689 /// Returns the block containing the checks or nullptr if no checks have been 690 /// added. 691 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 692 693 /// Compute the transformed value of Index at offset StartValue using step 694 /// StepValue. 695 /// For integer induction, returns StartValue + Index * StepValue. 696 /// For pointer induction, returns StartValue[Index * StepValue]. 697 /// FIXME: The newly created binary instructions should contain nsw/nuw 698 /// flags, which can be found from the original scalar operations. 699 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 700 const DataLayout &DL, 701 const InductionDescriptor &ID) const; 702 703 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 704 /// vector loop preheader, middle block and scalar preheader. Also 705 /// allocate a loop object for the new vector loop and return it. 706 Loop *createVectorLoopSkeleton(StringRef Prefix); 707 708 /// Create new phi nodes for the induction variables to resume iteration count 709 /// in the scalar epilogue, from where the vectorized loop left off (given by 710 /// \p VectorTripCount). 711 /// In cases where the loop skeleton is more complicated (eg. epilogue 712 /// vectorization) and the resume values can come from an additional bypass 713 /// block, the \p AdditionalBypass pair provides information about the bypass 714 /// block and the end value on the edge from bypass to this loop. 715 void createInductionResumeValues( 716 Loop *L, Value *VectorTripCount, 717 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 718 719 /// Complete the loop skeleton by adding debug MDs, creating appropriate 720 /// conditional branches in the middle block, preparing the builder and 721 /// running the verifier. Take in the vector loop \p L as argument, and return 722 /// the preheader of the completed vector loop. 723 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 724 725 /// Add additional metadata to \p To that was not present on \p Orig. 726 /// 727 /// Currently this is used to add the noalias annotations based on the 728 /// inserted memchecks. Use this for instructions that are *cloned* into the 729 /// vector loop. 730 void addNewMetadata(Instruction *To, const Instruction *Orig); 731 732 /// Collect poison-generating recipes that may generate a poison value that is 733 /// used after vectorization, even when their operands are not poison. Those 734 /// recipes meet the following conditions: 735 /// * Contribute to the address computation of a recipe generating a widen 736 /// memory load/store (VPWidenMemoryInstructionRecipe or 737 /// VPInterleaveRecipe). 738 /// * Such a widen memory load/store has at least one underlying Instruction 739 /// that is in a basic block that needs predication and after vectorization 740 /// the generated instruction won't be predicated. 741 void collectPoisonGeneratingRecipes(VPTransformState &State); 742 743 /// Allow subclasses to override and print debug traces before/after vplan 744 /// execution, when trace information is requested. 745 virtual void printDebugTracesAtStart(){}; 746 virtual void printDebugTracesAtEnd(){}; 747 748 /// The original loop. 749 Loop *OrigLoop; 750 751 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 752 /// dynamic knowledge to simplify SCEV expressions and converts them to a 753 /// more usable form. 754 PredicatedScalarEvolution &PSE; 755 756 /// Loop Info. 757 LoopInfo *LI; 758 759 /// Dominator Tree. 760 DominatorTree *DT; 761 762 /// Alias Analysis. 763 AAResults *AA; 764 765 /// Target Library Info. 766 const TargetLibraryInfo *TLI; 767 768 /// Target Transform Info. 769 const TargetTransformInfo *TTI; 770 771 /// Assumption Cache. 772 AssumptionCache *AC; 773 774 /// Interface to emit optimization remarks. 775 OptimizationRemarkEmitter *ORE; 776 777 /// LoopVersioning. It's only set up (non-null) if memchecks were 778 /// used. 779 /// 780 /// This is currently only used to add no-alias metadata based on the 781 /// memchecks. The actually versioning is performed manually. 782 std::unique_ptr<LoopVersioning> LVer; 783 784 /// The vectorization SIMD factor to use. Each vector will have this many 785 /// vector elements. 786 ElementCount VF; 787 788 /// The vectorization unroll factor to use. Each scalar is vectorized to this 789 /// many different vector instructions. 790 unsigned UF; 791 792 /// The builder that we use 793 IRBuilder<> Builder; 794 795 // --- Vectorization state --- 796 797 /// The vector-loop preheader. 798 BasicBlock *LoopVectorPreHeader; 799 800 /// The scalar-loop preheader. 801 BasicBlock *LoopScalarPreHeader; 802 803 /// Middle Block between the vector and the scalar. 804 BasicBlock *LoopMiddleBlock; 805 806 /// The unique ExitBlock of the scalar loop if one exists. Note that 807 /// there can be multiple exiting edges reaching this block. 808 BasicBlock *LoopExitBlock; 809 810 /// The vector loop body. 811 BasicBlock *LoopVectorBody; 812 813 /// The scalar loop body. 814 BasicBlock *LoopScalarBody; 815 816 /// A list of all bypass blocks. The first block is the entry of the loop. 817 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 818 819 /// The new Induction variable which was added to the new block. 820 PHINode *Induction = nullptr; 821 822 /// The induction variable of the old basic block. 823 PHINode *OldInduction = nullptr; 824 825 /// Store instructions that were predicated. 826 SmallVector<Instruction *, 4> PredicatedInstructions; 827 828 /// Trip count of the original loop. 829 Value *TripCount = nullptr; 830 831 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 832 Value *VectorTripCount = nullptr; 833 834 /// The legality analysis. 835 LoopVectorizationLegality *Legal; 836 837 /// The profitablity analysis. 838 LoopVectorizationCostModel *Cost; 839 840 // Record whether runtime checks are added. 841 bool AddedSafetyChecks = false; 842 843 // Holds the end values for each induction variable. We save the end values 844 // so we can later fix-up the external users of the induction variables. 845 DenseMap<PHINode *, Value *> IVEndValues; 846 847 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 848 // fixed up at the end of vector code generation. 849 SmallVector<PHINode *, 8> OrigPHIsToFix; 850 851 /// BFI and PSI are used to check for profile guided size optimizations. 852 BlockFrequencyInfo *BFI; 853 ProfileSummaryInfo *PSI; 854 855 // Whether this loop should be optimized for size based on profile guided size 856 // optimizatios. 857 bool OptForSizeBasedOnProfile; 858 859 /// Structure to hold information about generated runtime checks, responsible 860 /// for cleaning the checks, if vectorization turns out unprofitable. 861 GeneratedRTChecks &RTChecks; 862 }; 863 864 class InnerLoopUnroller : public InnerLoopVectorizer { 865 public: 866 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 867 LoopInfo *LI, DominatorTree *DT, 868 const TargetLibraryInfo *TLI, 869 const TargetTransformInfo *TTI, AssumptionCache *AC, 870 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 871 LoopVectorizationLegality *LVL, 872 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 873 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 874 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 875 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 876 BFI, PSI, Check) {} 877 878 private: 879 Value *getBroadcastInstrs(Value *V) override; 880 Value *getStepVector( 881 Value *Val, Value *StartIdx, Value *Step, 882 Instruction::BinaryOps Opcode = Instruction::BinaryOpsEnd) override; 883 Value *reverseVector(Value *Vec) override; 884 }; 885 886 /// Encapsulate information regarding vectorization of a loop and its epilogue. 887 /// This information is meant to be updated and used across two stages of 888 /// epilogue vectorization. 889 struct EpilogueLoopVectorizationInfo { 890 ElementCount MainLoopVF = ElementCount::getFixed(0); 891 unsigned MainLoopUF = 0; 892 ElementCount EpilogueVF = ElementCount::getFixed(0); 893 unsigned EpilogueUF = 0; 894 BasicBlock *MainLoopIterationCountCheck = nullptr; 895 BasicBlock *EpilogueIterationCountCheck = nullptr; 896 BasicBlock *SCEVSafetyCheck = nullptr; 897 BasicBlock *MemSafetyCheck = nullptr; 898 Value *TripCount = nullptr; 899 Value *VectorTripCount = nullptr; 900 901 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 902 ElementCount EVF, unsigned EUF) 903 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 904 assert(EUF == 1 && 905 "A high UF for the epilogue loop is likely not beneficial."); 906 } 907 }; 908 909 /// An extension of the inner loop vectorizer that creates a skeleton for a 910 /// vectorized loop that has its epilogue (residual) also vectorized. 911 /// The idea is to run the vplan on a given loop twice, firstly to setup the 912 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 913 /// from the first step and vectorize the epilogue. This is achieved by 914 /// deriving two concrete strategy classes from this base class and invoking 915 /// them in succession from the loop vectorizer planner. 916 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 917 public: 918 InnerLoopAndEpilogueVectorizer( 919 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 920 DominatorTree *DT, const TargetLibraryInfo *TLI, 921 const TargetTransformInfo *TTI, AssumptionCache *AC, 922 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 923 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 924 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 925 GeneratedRTChecks &Checks) 926 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 927 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 928 Checks), 929 EPI(EPI) {} 930 931 // Override this function to handle the more complex control flow around the 932 // three loops. 933 BasicBlock *createVectorizedLoopSkeleton() final override { 934 return createEpilogueVectorizedLoopSkeleton(); 935 } 936 937 /// The interface for creating a vectorized skeleton using one of two 938 /// different strategies, each corresponding to one execution of the vplan 939 /// as described above. 940 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 941 942 /// Holds and updates state information required to vectorize the main loop 943 /// and its epilogue in two separate passes. This setup helps us avoid 944 /// regenerating and recomputing runtime safety checks. It also helps us to 945 /// shorten the iteration-count-check path length for the cases where the 946 /// iteration count of the loop is so small that the main vector loop is 947 /// completely skipped. 948 EpilogueLoopVectorizationInfo &EPI; 949 }; 950 951 /// A specialized derived class of inner loop vectorizer that performs 952 /// vectorization of *main* loops in the process of vectorizing loops and their 953 /// epilogues. 954 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 955 public: 956 EpilogueVectorizerMainLoop( 957 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 958 DominatorTree *DT, const TargetLibraryInfo *TLI, 959 const TargetTransformInfo *TTI, AssumptionCache *AC, 960 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 961 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 962 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 963 GeneratedRTChecks &Check) 964 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 965 EPI, LVL, CM, BFI, PSI, Check) {} 966 /// Implements the interface for creating a vectorized skeleton using the 967 /// *main loop* strategy (ie the first pass of vplan execution). 968 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 969 970 protected: 971 /// Emits an iteration count bypass check once for the main loop (when \p 972 /// ForEpilogue is false) and once for the epilogue loop (when \p 973 /// ForEpilogue is true). 974 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 975 bool ForEpilogue); 976 void printDebugTracesAtStart() override; 977 void printDebugTracesAtEnd() override; 978 }; 979 980 // A specialized derived class of inner loop vectorizer that performs 981 // vectorization of *epilogue* loops in the process of vectorizing loops and 982 // their epilogues. 983 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 984 public: 985 EpilogueVectorizerEpilogueLoop( 986 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 987 DominatorTree *DT, const TargetLibraryInfo *TLI, 988 const TargetTransformInfo *TTI, AssumptionCache *AC, 989 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 990 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 991 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 992 GeneratedRTChecks &Checks) 993 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 994 EPI, LVL, CM, BFI, PSI, Checks) {} 995 /// Implements the interface for creating a vectorized skeleton using the 996 /// *epilogue loop* strategy (ie the second pass of vplan execution). 997 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 998 999 protected: 1000 /// Emits an iteration count bypass check after the main vector loop has 1001 /// finished to see if there are any iterations left to execute by either 1002 /// the vector epilogue or the scalar epilogue. 1003 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1004 BasicBlock *Bypass, 1005 BasicBlock *Insert); 1006 void printDebugTracesAtStart() override; 1007 void printDebugTracesAtEnd() override; 1008 }; 1009 } // end namespace llvm 1010 1011 /// Look for a meaningful debug location on the instruction or it's 1012 /// operands. 1013 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1014 if (!I) 1015 return I; 1016 1017 DebugLoc Empty; 1018 if (I->getDebugLoc() != Empty) 1019 return I; 1020 1021 for (Use &Op : I->operands()) { 1022 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1023 if (OpInst->getDebugLoc() != Empty) 1024 return OpInst; 1025 } 1026 1027 return I; 1028 } 1029 1030 void InnerLoopVectorizer::setDebugLocFromInst( 1031 const Value *V, Optional<IRBuilder<> *> CustomBuilder) { 1032 IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 1033 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 1034 const DILocation *DIL = Inst->getDebugLoc(); 1035 1036 // When a FSDiscriminator is enabled, we don't need to add the multiply 1037 // factors to the discriminators. 1038 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1039 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 1040 // FIXME: For scalable vectors, assume vscale=1. 1041 auto NewDIL = 1042 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1043 if (NewDIL) 1044 B->SetCurrentDebugLocation(NewDIL.getValue()); 1045 else 1046 LLVM_DEBUG(dbgs() 1047 << "Failed to create new discriminator: " 1048 << DIL->getFilename() << " Line: " << DIL->getLine()); 1049 } else 1050 B->SetCurrentDebugLocation(DIL); 1051 } else 1052 B->SetCurrentDebugLocation(DebugLoc()); 1053 } 1054 1055 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 1056 /// is passed, the message relates to that particular instruction. 1057 #ifndef NDEBUG 1058 static void debugVectorizationMessage(const StringRef Prefix, 1059 const StringRef DebugMsg, 1060 Instruction *I) { 1061 dbgs() << "LV: " << Prefix << DebugMsg; 1062 if (I != nullptr) 1063 dbgs() << " " << *I; 1064 else 1065 dbgs() << '.'; 1066 dbgs() << '\n'; 1067 } 1068 #endif 1069 1070 /// Create an analysis remark that explains why vectorization failed 1071 /// 1072 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1073 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1074 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1075 /// the location of the remark. \return the remark object that can be 1076 /// streamed to. 1077 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1078 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1079 Value *CodeRegion = TheLoop->getHeader(); 1080 DebugLoc DL = TheLoop->getStartLoc(); 1081 1082 if (I) { 1083 CodeRegion = I->getParent(); 1084 // If there is no debug location attached to the instruction, revert back to 1085 // using the loop's. 1086 if (I->getDebugLoc()) 1087 DL = I->getDebugLoc(); 1088 } 1089 1090 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1091 } 1092 1093 /// Return a value for Step multiplied by VF. 1094 static Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF, 1095 int64_t Step) { 1096 assert(Ty->isIntegerTy() && "Expected an integer step"); 1097 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 1098 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1099 } 1100 1101 namespace llvm { 1102 1103 /// Return the runtime value for VF. 1104 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) { 1105 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1106 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1107 } 1108 1109 static Value *getRuntimeVFAsFloat(IRBuilder<> &B, Type *FTy, ElementCount VF) { 1110 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 1111 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 1112 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 1113 return B.CreateUIToFP(RuntimeVF, FTy); 1114 } 1115 1116 void reportVectorizationFailure(const StringRef DebugMsg, 1117 const StringRef OREMsg, const StringRef ORETag, 1118 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1119 Instruction *I) { 1120 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1121 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1122 ORE->emit( 1123 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1124 << "loop not vectorized: " << OREMsg); 1125 } 1126 1127 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1128 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1129 Instruction *I) { 1130 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1131 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1132 ORE->emit( 1133 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1134 << Msg); 1135 } 1136 1137 } // end namespace llvm 1138 1139 #ifndef NDEBUG 1140 /// \return string containing a file name and a line # for the given loop. 1141 static std::string getDebugLocString(const Loop *L) { 1142 std::string Result; 1143 if (L) { 1144 raw_string_ostream OS(Result); 1145 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1146 LoopDbgLoc.print(OS); 1147 else 1148 // Just print the module name. 1149 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1150 OS.flush(); 1151 } 1152 return Result; 1153 } 1154 #endif 1155 1156 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1157 const Instruction *Orig) { 1158 // If the loop was versioned with memchecks, add the corresponding no-alias 1159 // metadata. 1160 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1161 LVer->annotateInstWithNoAlias(To, Orig); 1162 } 1163 1164 void InnerLoopVectorizer::collectPoisonGeneratingRecipes( 1165 VPTransformState &State) { 1166 1167 // Collect recipes in the backward slice of `Root` that may generate a poison 1168 // value that is used after vectorization. 1169 SmallPtrSet<VPRecipeBase *, 16> Visited; 1170 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1171 SmallVector<VPRecipeBase *, 16> Worklist; 1172 Worklist.push_back(Root); 1173 1174 // Traverse the backward slice of Root through its use-def chain. 1175 while (!Worklist.empty()) { 1176 VPRecipeBase *CurRec = Worklist.back(); 1177 Worklist.pop_back(); 1178 1179 if (!Visited.insert(CurRec).second) 1180 continue; 1181 1182 // Prune search if we find another recipe generating a widen memory 1183 // instruction. Widen memory instructions involved in address computation 1184 // will lead to gather/scatter instructions, which don't need to be 1185 // handled. 1186 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) || 1187 isa<VPInterleaveRecipe>(CurRec)) 1188 continue; 1189 1190 // This recipe contributes to the address computation of a widen 1191 // load/store. Collect recipe if its underlying instruction has 1192 // poison-generating flags. 1193 Instruction *Instr = CurRec->getUnderlyingInstr(); 1194 if (Instr && Instr->hasPoisonGeneratingFlags()) 1195 State.MayGeneratePoisonRecipes.insert(CurRec); 1196 1197 // Add new definitions to the worklist. 1198 for (VPValue *operand : CurRec->operands()) 1199 if (VPDef *OpDef = operand->getDef()) 1200 Worklist.push_back(cast<VPRecipeBase>(OpDef)); 1201 } 1202 }); 1203 1204 // Traverse all the recipes in the VPlan and collect the poison-generating 1205 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1206 // VPInterleaveRecipe. 1207 auto Iter = depth_first( 1208 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry())); 1209 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1210 for (VPRecipeBase &Recipe : *VPBB) { 1211 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) { 1212 Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr(); 1213 VPDef *AddrDef = WidenRec->getAddr()->getDef(); 1214 if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr && 1215 Legal->blockNeedsPredication(UnderlyingInstr->getParent())) 1216 collectPoisonGeneratingInstrsInBackwardSlice( 1217 cast<VPRecipeBase>(AddrDef)); 1218 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1219 VPDef *AddrDef = InterleaveRec->getAddr()->getDef(); 1220 if (AddrDef) { 1221 // Check if any member of the interleave group needs predication. 1222 const InterleaveGroup<Instruction> *InterGroup = 1223 InterleaveRec->getInterleaveGroup(); 1224 bool NeedPredication = false; 1225 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1226 I < NumMembers; ++I) { 1227 Instruction *Member = InterGroup->getMember(I); 1228 if (Member) 1229 NeedPredication |= 1230 Legal->blockNeedsPredication(Member->getParent()); 1231 } 1232 1233 if (NeedPredication) 1234 collectPoisonGeneratingInstrsInBackwardSlice( 1235 cast<VPRecipeBase>(AddrDef)); 1236 } 1237 } 1238 } 1239 } 1240 } 1241 1242 void InnerLoopVectorizer::addMetadata(Instruction *To, 1243 Instruction *From) { 1244 propagateMetadata(To, From); 1245 addNewMetadata(To, From); 1246 } 1247 1248 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1249 Instruction *From) { 1250 for (Value *V : To) { 1251 if (Instruction *I = dyn_cast<Instruction>(V)) 1252 addMetadata(I, From); 1253 } 1254 } 1255 1256 namespace llvm { 1257 1258 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1259 // lowered. 1260 enum ScalarEpilogueLowering { 1261 1262 // The default: allowing scalar epilogues. 1263 CM_ScalarEpilogueAllowed, 1264 1265 // Vectorization with OptForSize: don't allow epilogues. 1266 CM_ScalarEpilogueNotAllowedOptSize, 1267 1268 // A special case of vectorisation with OptForSize: loops with a very small 1269 // trip count are considered for vectorization under OptForSize, thereby 1270 // making sure the cost of their loop body is dominant, free of runtime 1271 // guards and scalar iteration overheads. 1272 CM_ScalarEpilogueNotAllowedLowTripLoop, 1273 1274 // Loop hint predicate indicating an epilogue is undesired. 1275 CM_ScalarEpilogueNotNeededUsePredicate, 1276 1277 // Directive indicating we must either tail fold or not vectorize 1278 CM_ScalarEpilogueNotAllowedUsePredicate 1279 }; 1280 1281 /// ElementCountComparator creates a total ordering for ElementCount 1282 /// for the purposes of using it in a set structure. 1283 struct ElementCountComparator { 1284 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1285 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1286 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1287 } 1288 }; 1289 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1290 1291 /// LoopVectorizationCostModel - estimates the expected speedups due to 1292 /// vectorization. 1293 /// In many cases vectorization is not profitable. This can happen because of 1294 /// a number of reasons. In this class we mainly attempt to predict the 1295 /// expected speedup/slowdowns due to the supported instruction set. We use the 1296 /// TargetTransformInfo to query the different backends for the cost of 1297 /// different operations. 1298 class LoopVectorizationCostModel { 1299 public: 1300 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1301 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1302 LoopVectorizationLegality *Legal, 1303 const TargetTransformInfo &TTI, 1304 const TargetLibraryInfo *TLI, DemandedBits *DB, 1305 AssumptionCache *AC, 1306 OptimizationRemarkEmitter *ORE, const Function *F, 1307 const LoopVectorizeHints *Hints, 1308 InterleavedAccessInfo &IAI) 1309 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1310 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1311 Hints(Hints), InterleaveInfo(IAI) {} 1312 1313 /// \return An upper bound for the vectorization factors (both fixed and 1314 /// scalable). If the factors are 0, vectorization and interleaving should be 1315 /// avoided up front. 1316 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1317 1318 /// \return True if runtime checks are required for vectorization, and false 1319 /// otherwise. 1320 bool runtimeChecksRequired(); 1321 1322 /// \return The most profitable vectorization factor and the cost of that VF. 1323 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1324 /// then this vectorization factor will be selected if vectorization is 1325 /// possible. 1326 VectorizationFactor 1327 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1328 1329 VectorizationFactor 1330 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1331 const LoopVectorizationPlanner &LVP); 1332 1333 /// Setup cost-based decisions for user vectorization factor. 1334 /// \return true if the UserVF is a feasible VF to be chosen. 1335 bool selectUserVectorizationFactor(ElementCount UserVF) { 1336 collectUniformsAndScalars(UserVF); 1337 collectInstsToScalarize(UserVF); 1338 return expectedCost(UserVF).first.isValid(); 1339 } 1340 1341 /// \return The size (in bits) of the smallest and widest types in the code 1342 /// that needs to be vectorized. We ignore values that remain scalar such as 1343 /// 64 bit loop indices. 1344 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1345 1346 /// \return The desired interleave count. 1347 /// If interleave count has been specified by metadata it will be returned. 1348 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1349 /// are the selected vectorization factor and the cost of the selected VF. 1350 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1351 1352 /// Memory access instruction may be vectorized in more than one way. 1353 /// Form of instruction after vectorization depends on cost. 1354 /// This function takes cost-based decisions for Load/Store instructions 1355 /// and collects them in a map. This decisions map is used for building 1356 /// the lists of loop-uniform and loop-scalar instructions. 1357 /// The calculated cost is saved with widening decision in order to 1358 /// avoid redundant calculations. 1359 void setCostBasedWideningDecision(ElementCount VF); 1360 1361 /// A struct that represents some properties of the register usage 1362 /// of a loop. 1363 struct RegisterUsage { 1364 /// Holds the number of loop invariant values that are used in the loop. 1365 /// The key is ClassID of target-provided register class. 1366 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1367 /// Holds the maximum number of concurrent live intervals in the loop. 1368 /// The key is ClassID of target-provided register class. 1369 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1370 }; 1371 1372 /// \return Returns information about the register usages of the loop for the 1373 /// given vectorization factors. 1374 SmallVector<RegisterUsage, 8> 1375 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1376 1377 /// Collect values we want to ignore in the cost model. 1378 void collectValuesToIgnore(); 1379 1380 /// Collect all element types in the loop for which widening is needed. 1381 void collectElementTypesForWidening(); 1382 1383 /// Split reductions into those that happen in the loop, and those that happen 1384 /// outside. In loop reductions are collected into InLoopReductionChains. 1385 void collectInLoopReductions(); 1386 1387 /// Returns true if we should use strict in-order reductions for the given 1388 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1389 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1390 /// of FP operations. 1391 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1392 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1393 } 1394 1395 /// \returns The smallest bitwidth each instruction can be represented with. 1396 /// The vector equivalents of these instructions should be truncated to this 1397 /// type. 1398 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1399 return MinBWs; 1400 } 1401 1402 /// \returns True if it is more profitable to scalarize instruction \p I for 1403 /// vectorization factor \p VF. 1404 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1405 assert(VF.isVector() && 1406 "Profitable to scalarize relevant only for VF > 1."); 1407 1408 // Cost model is not run in the VPlan-native path - return conservative 1409 // result until this changes. 1410 if (EnableVPlanNativePath) 1411 return false; 1412 1413 auto Scalars = InstsToScalarize.find(VF); 1414 assert(Scalars != InstsToScalarize.end() && 1415 "VF not yet analyzed for scalarization profitability"); 1416 return Scalars->second.find(I) != Scalars->second.end(); 1417 } 1418 1419 /// Returns true if \p I is known to be uniform after vectorization. 1420 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1421 if (VF.isScalar()) 1422 return true; 1423 1424 // Cost model is not run in the VPlan-native path - return conservative 1425 // result until this changes. 1426 if (EnableVPlanNativePath) 1427 return false; 1428 1429 auto UniformsPerVF = Uniforms.find(VF); 1430 assert(UniformsPerVF != Uniforms.end() && 1431 "VF not yet analyzed for uniformity"); 1432 return UniformsPerVF->second.count(I); 1433 } 1434 1435 /// Returns true if \p I is known to be scalar after vectorization. 1436 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1437 if (VF.isScalar()) 1438 return true; 1439 1440 // Cost model is not run in the VPlan-native path - return conservative 1441 // result until this changes. 1442 if (EnableVPlanNativePath) 1443 return false; 1444 1445 auto ScalarsPerVF = Scalars.find(VF); 1446 assert(ScalarsPerVF != Scalars.end() && 1447 "Scalar values are not calculated for VF"); 1448 return ScalarsPerVF->second.count(I); 1449 } 1450 1451 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1452 /// for vectorization factor \p VF. 1453 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1454 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1455 !isProfitableToScalarize(I, VF) && 1456 !isScalarAfterVectorization(I, VF); 1457 } 1458 1459 /// Decision that was taken during cost calculation for memory instruction. 1460 enum InstWidening { 1461 CM_Unknown, 1462 CM_Widen, // For consecutive accesses with stride +1. 1463 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1464 CM_Interleave, 1465 CM_GatherScatter, 1466 CM_Scalarize 1467 }; 1468 1469 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1470 /// instruction \p I and vector width \p VF. 1471 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1472 InstructionCost Cost) { 1473 assert(VF.isVector() && "Expected VF >=2"); 1474 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1475 } 1476 1477 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1478 /// interleaving group \p Grp and vector width \p VF. 1479 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1480 ElementCount VF, InstWidening W, 1481 InstructionCost Cost) { 1482 assert(VF.isVector() && "Expected VF >=2"); 1483 /// Broadcast this decicion to all instructions inside the group. 1484 /// But the cost will be assigned to one instruction only. 1485 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1486 if (auto *I = Grp->getMember(i)) { 1487 if (Grp->getInsertPos() == I) 1488 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1489 else 1490 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1491 } 1492 } 1493 } 1494 1495 /// Return the cost model decision for the given instruction \p I and vector 1496 /// width \p VF. Return CM_Unknown if this instruction did not pass 1497 /// through the cost modeling. 1498 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1499 assert(VF.isVector() && "Expected VF to be a vector VF"); 1500 // Cost model is not run in the VPlan-native path - return conservative 1501 // result until this changes. 1502 if (EnableVPlanNativePath) 1503 return CM_GatherScatter; 1504 1505 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1506 auto Itr = WideningDecisions.find(InstOnVF); 1507 if (Itr == WideningDecisions.end()) 1508 return CM_Unknown; 1509 return Itr->second.first; 1510 } 1511 1512 /// Return the vectorization cost for the given instruction \p I and vector 1513 /// width \p VF. 1514 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1515 assert(VF.isVector() && "Expected VF >=2"); 1516 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1517 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1518 "The cost is not calculated"); 1519 return WideningDecisions[InstOnVF].second; 1520 } 1521 1522 /// Return True if instruction \p I is an optimizable truncate whose operand 1523 /// is an induction variable. Such a truncate will be removed by adding a new 1524 /// induction variable with the destination type. 1525 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1526 // If the instruction is not a truncate, return false. 1527 auto *Trunc = dyn_cast<TruncInst>(I); 1528 if (!Trunc) 1529 return false; 1530 1531 // Get the source and destination types of the truncate. 1532 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1533 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1534 1535 // If the truncate is free for the given types, return false. Replacing a 1536 // free truncate with an induction variable would add an induction variable 1537 // update instruction to each iteration of the loop. We exclude from this 1538 // check the primary induction variable since it will need an update 1539 // instruction regardless. 1540 Value *Op = Trunc->getOperand(0); 1541 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1542 return false; 1543 1544 // If the truncated value is not an induction variable, return false. 1545 return Legal->isInductionPhi(Op); 1546 } 1547 1548 /// Collects the instructions to scalarize for each predicated instruction in 1549 /// the loop. 1550 void collectInstsToScalarize(ElementCount VF); 1551 1552 /// Collect Uniform and Scalar values for the given \p VF. 1553 /// The sets depend on CM decision for Load/Store instructions 1554 /// that may be vectorized as interleave, gather-scatter or scalarized. 1555 void collectUniformsAndScalars(ElementCount VF) { 1556 // Do the analysis once. 1557 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1558 return; 1559 setCostBasedWideningDecision(VF); 1560 collectLoopUniforms(VF); 1561 collectLoopScalars(VF); 1562 } 1563 1564 /// Returns true if the target machine supports masked store operation 1565 /// for the given \p DataType and kind of access to \p Ptr. 1566 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1567 return Legal->isConsecutivePtr(DataType, Ptr) && 1568 TTI.isLegalMaskedStore(DataType, Alignment); 1569 } 1570 1571 /// Returns true if the target machine supports masked load operation 1572 /// for the given \p DataType and kind of access to \p Ptr. 1573 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1574 return Legal->isConsecutivePtr(DataType, Ptr) && 1575 TTI.isLegalMaskedLoad(DataType, Alignment); 1576 } 1577 1578 /// Returns true if the target machine can represent \p V as a masked gather 1579 /// or scatter operation. 1580 bool isLegalGatherOrScatter(Value *V) { 1581 bool LI = isa<LoadInst>(V); 1582 bool SI = isa<StoreInst>(V); 1583 if (!LI && !SI) 1584 return false; 1585 auto *Ty = getLoadStoreType(V); 1586 Align Align = getLoadStoreAlignment(V); 1587 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1588 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1589 } 1590 1591 /// Returns true if the target machine supports all of the reduction 1592 /// variables found for the given VF. 1593 bool canVectorizeReductions(ElementCount VF) const { 1594 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1595 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1596 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1597 })); 1598 } 1599 1600 /// Returns true if \p I is an instruction that will be scalarized with 1601 /// predication. Such instructions include conditional stores and 1602 /// instructions that may divide by zero. 1603 /// If a non-zero VF has been calculated, we check if I will be scalarized 1604 /// predication for that VF. 1605 bool isScalarWithPredication(Instruction *I) const; 1606 1607 // Returns true if \p I is an instruction that will be predicated either 1608 // through scalar predication or masked load/store or masked gather/scatter. 1609 // Superset of instructions that return true for isScalarWithPredication. 1610 bool isPredicatedInst(Instruction *I, bool IsKnownUniform = false) { 1611 // When we know the load is uniform and the original scalar loop was not 1612 // predicated we don't need to mark it as a predicated instruction. Any 1613 // vectorised blocks created when tail-folding are something artificial we 1614 // have introduced and we know there is always at least one active lane. 1615 // That's why we call Legal->blockNeedsPredication here because it doesn't 1616 // query tail-folding. 1617 if (IsKnownUniform && isa<LoadInst>(I) && 1618 !Legal->blockNeedsPredication(I->getParent())) 1619 return false; 1620 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1621 return false; 1622 // Loads and stores that need some form of masked operation are predicated 1623 // instructions. 1624 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1625 return Legal->isMaskRequired(I); 1626 return isScalarWithPredication(I); 1627 } 1628 1629 /// Returns true if \p I is a memory instruction with consecutive memory 1630 /// access that can be widened. 1631 bool 1632 memoryInstructionCanBeWidened(Instruction *I, 1633 ElementCount VF = ElementCount::getFixed(1)); 1634 1635 /// Returns true if \p I is a memory instruction in an interleaved-group 1636 /// of memory accesses that can be vectorized with wide vector loads/stores 1637 /// and shuffles. 1638 bool 1639 interleavedAccessCanBeWidened(Instruction *I, 1640 ElementCount VF = ElementCount::getFixed(1)); 1641 1642 /// Check if \p Instr belongs to any interleaved access group. 1643 bool isAccessInterleaved(Instruction *Instr) { 1644 return InterleaveInfo.isInterleaved(Instr); 1645 } 1646 1647 /// Get the interleaved access group that \p Instr belongs to. 1648 const InterleaveGroup<Instruction> * 1649 getInterleavedAccessGroup(Instruction *Instr) { 1650 return InterleaveInfo.getInterleaveGroup(Instr); 1651 } 1652 1653 /// Returns true if we're required to use a scalar epilogue for at least 1654 /// the final iteration of the original loop. 1655 bool requiresScalarEpilogue(ElementCount VF) const { 1656 if (!isScalarEpilogueAllowed()) 1657 return false; 1658 // If we might exit from anywhere but the latch, must run the exiting 1659 // iteration in scalar form. 1660 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1661 return true; 1662 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1663 } 1664 1665 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1666 /// loop hint annotation. 1667 bool isScalarEpilogueAllowed() const { 1668 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1669 } 1670 1671 /// Returns true if all loop blocks should be masked to fold tail loop. 1672 bool foldTailByMasking() const { return FoldTailByMasking; } 1673 1674 /// Returns true if the instructions in this block requires predication 1675 /// for any reason, e.g. because tail folding now requires a predicate 1676 /// or because the block in the original loop was predicated. 1677 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1678 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1679 } 1680 1681 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1682 /// nodes to the chain of instructions representing the reductions. Uses a 1683 /// MapVector to ensure deterministic iteration order. 1684 using ReductionChainMap = 1685 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1686 1687 /// Return the chain of instructions representing an inloop reduction. 1688 const ReductionChainMap &getInLoopReductionChains() const { 1689 return InLoopReductionChains; 1690 } 1691 1692 /// Returns true if the Phi is part of an inloop reduction. 1693 bool isInLoopReduction(PHINode *Phi) const { 1694 return InLoopReductionChains.count(Phi); 1695 } 1696 1697 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1698 /// with factor VF. Return the cost of the instruction, including 1699 /// scalarization overhead if it's needed. 1700 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1701 1702 /// Estimate cost of a call instruction CI if it were vectorized with factor 1703 /// VF. Return the cost of the instruction, including scalarization overhead 1704 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1705 /// scalarized - 1706 /// i.e. either vector version isn't available, or is too expensive. 1707 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1708 bool &NeedToScalarize) const; 1709 1710 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1711 /// that of B. 1712 bool isMoreProfitable(const VectorizationFactor &A, 1713 const VectorizationFactor &B) const; 1714 1715 /// Invalidates decisions already taken by the cost model. 1716 void invalidateCostModelingDecisions() { 1717 WideningDecisions.clear(); 1718 Uniforms.clear(); 1719 Scalars.clear(); 1720 } 1721 1722 private: 1723 unsigned NumPredStores = 0; 1724 1725 /// \return An upper bound for the vectorization factors for both 1726 /// fixed and scalable vectorization, where the minimum-known number of 1727 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1728 /// disabled or unsupported, then the scalable part will be equal to 1729 /// ElementCount::getScalable(0). 1730 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1731 ElementCount UserVF); 1732 1733 /// \return the maximized element count based on the targets vector 1734 /// registers and the loop trip-count, but limited to a maximum safe VF. 1735 /// This is a helper function of computeFeasibleMaxVF. 1736 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1737 /// issue that occurred on one of the buildbots which cannot be reproduced 1738 /// without having access to the properietary compiler (see comments on 1739 /// D98509). The issue is currently under investigation and this workaround 1740 /// will be removed as soon as possible. 1741 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1742 unsigned SmallestType, 1743 unsigned WidestType, 1744 const ElementCount &MaxSafeVF); 1745 1746 /// \return the maximum legal scalable VF, based on the safe max number 1747 /// of elements. 1748 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1749 1750 /// The vectorization cost is a combination of the cost itself and a boolean 1751 /// indicating whether any of the contributing operations will actually 1752 /// operate on vector values after type legalization in the backend. If this 1753 /// latter value is false, then all operations will be scalarized (i.e. no 1754 /// vectorization has actually taken place). 1755 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1756 1757 /// Returns the expected execution cost. The unit of the cost does 1758 /// not matter because we use the 'cost' units to compare different 1759 /// vector widths. The cost that is returned is *not* normalized by 1760 /// the factor width. If \p Invalid is not nullptr, this function 1761 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1762 /// each instruction that has an Invalid cost for the given VF. 1763 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1764 VectorizationCostTy 1765 expectedCost(ElementCount VF, 1766 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1767 1768 /// Returns the execution time cost of an instruction for a given vector 1769 /// width. Vector width of one means scalar. 1770 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1771 1772 /// The cost-computation logic from getInstructionCost which provides 1773 /// the vector type as an output parameter. 1774 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1775 Type *&VectorTy); 1776 1777 /// Return the cost of instructions in an inloop reduction pattern, if I is 1778 /// part of that pattern. 1779 Optional<InstructionCost> 1780 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1781 TTI::TargetCostKind CostKind); 1782 1783 /// Calculate vectorization cost of memory instruction \p I. 1784 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1785 1786 /// The cost computation for scalarized memory instruction. 1787 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1788 1789 /// The cost computation for interleaving group of memory instructions. 1790 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1791 1792 /// The cost computation for Gather/Scatter instruction. 1793 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1794 1795 /// The cost computation for widening instruction \p I with consecutive 1796 /// memory access. 1797 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1798 1799 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1800 /// Load: scalar load + broadcast. 1801 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1802 /// element) 1803 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1804 1805 /// Estimate the overhead of scalarizing an instruction. This is a 1806 /// convenience wrapper for the type-based getScalarizationOverhead API. 1807 InstructionCost getScalarizationOverhead(Instruction *I, 1808 ElementCount VF) const; 1809 1810 /// Returns whether the instruction is a load or store and will be a emitted 1811 /// as a vector operation. 1812 bool isConsecutiveLoadOrStore(Instruction *I); 1813 1814 /// Returns true if an artificially high cost for emulated masked memrefs 1815 /// should be used. 1816 bool useEmulatedMaskMemRefHack(Instruction *I); 1817 1818 /// Map of scalar integer values to the smallest bitwidth they can be legally 1819 /// represented as. The vector equivalents of these values should be truncated 1820 /// to this type. 1821 MapVector<Instruction *, uint64_t> MinBWs; 1822 1823 /// A type representing the costs for instructions if they were to be 1824 /// scalarized rather than vectorized. The entries are Instruction-Cost 1825 /// pairs. 1826 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1827 1828 /// A set containing all BasicBlocks that are known to present after 1829 /// vectorization as a predicated block. 1830 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1831 1832 /// Records whether it is allowed to have the original scalar loop execute at 1833 /// least once. This may be needed as a fallback loop in case runtime 1834 /// aliasing/dependence checks fail, or to handle the tail/remainder 1835 /// iterations when the trip count is unknown or doesn't divide by the VF, 1836 /// or as a peel-loop to handle gaps in interleave-groups. 1837 /// Under optsize and when the trip count is very small we don't allow any 1838 /// iterations to execute in the scalar loop. 1839 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1840 1841 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1842 bool FoldTailByMasking = false; 1843 1844 /// A map holding scalar costs for different vectorization factors. The 1845 /// presence of a cost for an instruction in the mapping indicates that the 1846 /// instruction will be scalarized when vectorizing with the associated 1847 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1848 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1849 1850 /// Holds the instructions known to be uniform after vectorization. 1851 /// The data is collected per VF. 1852 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1853 1854 /// Holds the instructions known to be scalar after vectorization. 1855 /// The data is collected per VF. 1856 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1857 1858 /// Holds the instructions (address computations) that are forced to be 1859 /// scalarized. 1860 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1861 1862 /// PHINodes of the reductions that should be expanded in-loop along with 1863 /// their associated chains of reduction operations, in program order from top 1864 /// (PHI) to bottom 1865 ReductionChainMap InLoopReductionChains; 1866 1867 /// A Map of inloop reduction operations and their immediate chain operand. 1868 /// FIXME: This can be removed once reductions can be costed correctly in 1869 /// vplan. This was added to allow quick lookup to the inloop operations, 1870 /// without having to loop through InLoopReductionChains. 1871 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1872 1873 /// Returns the expected difference in cost from scalarizing the expression 1874 /// feeding a predicated instruction \p PredInst. The instructions to 1875 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1876 /// non-negative return value implies the expression will be scalarized. 1877 /// Currently, only single-use chains are considered for scalarization. 1878 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1879 ElementCount VF); 1880 1881 /// Collect the instructions that are uniform after vectorization. An 1882 /// instruction is uniform if we represent it with a single scalar value in 1883 /// the vectorized loop corresponding to each vector iteration. Examples of 1884 /// uniform instructions include pointer operands of consecutive or 1885 /// interleaved memory accesses. Note that although uniformity implies an 1886 /// instruction will be scalar, the reverse is not true. In general, a 1887 /// scalarized instruction will be represented by VF scalar values in the 1888 /// vectorized loop, each corresponding to an iteration of the original 1889 /// scalar loop. 1890 void collectLoopUniforms(ElementCount VF); 1891 1892 /// Collect the instructions that are scalar after vectorization. An 1893 /// instruction is scalar if it is known to be uniform or will be scalarized 1894 /// during vectorization. collectLoopScalars should only add non-uniform nodes 1895 /// to the list if they are used by a load/store instruction that is marked as 1896 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by 1897 /// VF values in the vectorized loop, each corresponding to an iteration of 1898 /// the original scalar loop. 1899 void collectLoopScalars(ElementCount VF); 1900 1901 /// Keeps cost model vectorization decision and cost for instructions. 1902 /// Right now it is used for memory instructions only. 1903 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1904 std::pair<InstWidening, InstructionCost>>; 1905 1906 DecisionList WideningDecisions; 1907 1908 /// Returns true if \p V is expected to be vectorized and it needs to be 1909 /// extracted. 1910 bool needsExtract(Value *V, ElementCount VF) const { 1911 Instruction *I = dyn_cast<Instruction>(V); 1912 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1913 TheLoop->isLoopInvariant(I)) 1914 return false; 1915 1916 // Assume we can vectorize V (and hence we need extraction) if the 1917 // scalars are not computed yet. This can happen, because it is called 1918 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1919 // the scalars are collected. That should be a safe assumption in most 1920 // cases, because we check if the operands have vectorizable types 1921 // beforehand in LoopVectorizationLegality. 1922 return Scalars.find(VF) == Scalars.end() || 1923 !isScalarAfterVectorization(I, VF); 1924 }; 1925 1926 /// Returns a range containing only operands needing to be extracted. 1927 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1928 ElementCount VF) const { 1929 return SmallVector<Value *, 4>(make_filter_range( 1930 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1931 } 1932 1933 /// Determines if we have the infrastructure to vectorize loop \p L and its 1934 /// epilogue, assuming the main loop is vectorized by \p VF. 1935 bool isCandidateForEpilogueVectorization(const Loop &L, 1936 const ElementCount VF) const; 1937 1938 /// Returns true if epilogue vectorization is considered profitable, and 1939 /// false otherwise. 1940 /// \p VF is the vectorization factor chosen for the original loop. 1941 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1942 1943 public: 1944 /// The loop that we evaluate. 1945 Loop *TheLoop; 1946 1947 /// Predicated scalar evolution analysis. 1948 PredicatedScalarEvolution &PSE; 1949 1950 /// Loop Info analysis. 1951 LoopInfo *LI; 1952 1953 /// Vectorization legality. 1954 LoopVectorizationLegality *Legal; 1955 1956 /// Vector target information. 1957 const TargetTransformInfo &TTI; 1958 1959 /// Target Library Info. 1960 const TargetLibraryInfo *TLI; 1961 1962 /// Demanded bits analysis. 1963 DemandedBits *DB; 1964 1965 /// Assumption cache. 1966 AssumptionCache *AC; 1967 1968 /// Interface to emit optimization remarks. 1969 OptimizationRemarkEmitter *ORE; 1970 1971 const Function *TheFunction; 1972 1973 /// Loop Vectorize Hint. 1974 const LoopVectorizeHints *Hints; 1975 1976 /// The interleave access information contains groups of interleaved accesses 1977 /// with the same stride and close to each other. 1978 InterleavedAccessInfo &InterleaveInfo; 1979 1980 /// Values to ignore in the cost model. 1981 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1982 1983 /// Values to ignore in the cost model when VF > 1. 1984 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1985 1986 /// All element types found in the loop. 1987 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1988 1989 /// Profitable vector factors. 1990 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1991 }; 1992 } // end namespace llvm 1993 1994 /// Helper struct to manage generating runtime checks for vectorization. 1995 /// 1996 /// The runtime checks are created up-front in temporary blocks to allow better 1997 /// estimating the cost and un-linked from the existing IR. After deciding to 1998 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1999 /// temporary blocks are completely removed. 2000 class GeneratedRTChecks { 2001 /// Basic block which contains the generated SCEV checks, if any. 2002 BasicBlock *SCEVCheckBlock = nullptr; 2003 2004 /// The value representing the result of the generated SCEV checks. If it is 2005 /// nullptr, either no SCEV checks have been generated or they have been used. 2006 Value *SCEVCheckCond = nullptr; 2007 2008 /// Basic block which contains the generated memory runtime checks, if any. 2009 BasicBlock *MemCheckBlock = nullptr; 2010 2011 /// The value representing the result of the generated memory runtime checks. 2012 /// If it is nullptr, either no memory runtime checks have been generated or 2013 /// they have been used. 2014 Value *MemRuntimeCheckCond = nullptr; 2015 2016 DominatorTree *DT; 2017 LoopInfo *LI; 2018 2019 SCEVExpander SCEVExp; 2020 SCEVExpander MemCheckExp; 2021 2022 public: 2023 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 2024 const DataLayout &DL) 2025 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 2026 MemCheckExp(SE, DL, "scev.check") {} 2027 2028 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 2029 /// accurately estimate the cost of the runtime checks. The blocks are 2030 /// un-linked from the IR and is added back during vector code generation. If 2031 /// there is no vector code generation, the check blocks are removed 2032 /// completely. 2033 void Create(Loop *L, const LoopAccessInfo &LAI, 2034 const SCEVUnionPredicate &UnionPred) { 2035 2036 BasicBlock *LoopHeader = L->getHeader(); 2037 BasicBlock *Preheader = L->getLoopPreheader(); 2038 2039 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 2040 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 2041 // may be used by SCEVExpander. The blocks will be un-linked from their 2042 // predecessors and removed from LI & DT at the end of the function. 2043 if (!UnionPred.isAlwaysTrue()) { 2044 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 2045 nullptr, "vector.scevcheck"); 2046 2047 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 2048 &UnionPred, SCEVCheckBlock->getTerminator()); 2049 } 2050 2051 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 2052 if (RtPtrChecking.Need) { 2053 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 2054 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 2055 "vector.memcheck"); 2056 2057 MemRuntimeCheckCond = 2058 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 2059 RtPtrChecking.getChecks(), MemCheckExp); 2060 assert(MemRuntimeCheckCond && 2061 "no RT checks generated although RtPtrChecking " 2062 "claimed checks are required"); 2063 } 2064 2065 if (!MemCheckBlock && !SCEVCheckBlock) 2066 return; 2067 2068 // Unhook the temporary block with the checks, update various places 2069 // accordingly. 2070 if (SCEVCheckBlock) 2071 SCEVCheckBlock->replaceAllUsesWith(Preheader); 2072 if (MemCheckBlock) 2073 MemCheckBlock->replaceAllUsesWith(Preheader); 2074 2075 if (SCEVCheckBlock) { 2076 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2077 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 2078 Preheader->getTerminator()->eraseFromParent(); 2079 } 2080 if (MemCheckBlock) { 2081 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2082 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2083 Preheader->getTerminator()->eraseFromParent(); 2084 } 2085 2086 DT->changeImmediateDominator(LoopHeader, Preheader); 2087 if (MemCheckBlock) { 2088 DT->eraseNode(MemCheckBlock); 2089 LI->removeBlock(MemCheckBlock); 2090 } 2091 if (SCEVCheckBlock) { 2092 DT->eraseNode(SCEVCheckBlock); 2093 LI->removeBlock(SCEVCheckBlock); 2094 } 2095 } 2096 2097 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2098 /// unused. 2099 ~GeneratedRTChecks() { 2100 SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT); 2101 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT); 2102 if (!SCEVCheckCond) 2103 SCEVCleaner.markResultUsed(); 2104 2105 if (!MemRuntimeCheckCond) 2106 MemCheckCleaner.markResultUsed(); 2107 2108 if (MemRuntimeCheckCond) { 2109 auto &SE = *MemCheckExp.getSE(); 2110 // Memory runtime check generation creates compares that use expanded 2111 // values. Remove them before running the SCEVExpanderCleaners. 2112 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2113 if (MemCheckExp.isInsertedInstruction(&I)) 2114 continue; 2115 SE.forgetValue(&I); 2116 I.eraseFromParent(); 2117 } 2118 } 2119 MemCheckCleaner.cleanup(); 2120 SCEVCleaner.cleanup(); 2121 2122 if (SCEVCheckCond) 2123 SCEVCheckBlock->eraseFromParent(); 2124 if (MemRuntimeCheckCond) 2125 MemCheckBlock->eraseFromParent(); 2126 } 2127 2128 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2129 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2130 /// depending on the generated condition. 2131 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, 2132 BasicBlock *LoopVectorPreHeader, 2133 BasicBlock *LoopExitBlock) { 2134 if (!SCEVCheckCond) 2135 return nullptr; 2136 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2137 if (C->isZero()) 2138 return nullptr; 2139 2140 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2141 2142 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2143 // Create new preheader for vector loop. 2144 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2145 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2146 2147 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2148 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2149 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2150 SCEVCheckBlock); 2151 2152 DT->addNewBlock(SCEVCheckBlock, Pred); 2153 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2154 2155 ReplaceInstWithInst( 2156 SCEVCheckBlock->getTerminator(), 2157 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2158 // Mark the check as used, to prevent it from being removed during cleanup. 2159 SCEVCheckCond = nullptr; 2160 return SCEVCheckBlock; 2161 } 2162 2163 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2164 /// the branches to branch to the vector preheader or \p Bypass, depending on 2165 /// the generated condition. 2166 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, 2167 BasicBlock *LoopVectorPreHeader) { 2168 // Check if we generated code that checks in runtime if arrays overlap. 2169 if (!MemRuntimeCheckCond) 2170 return nullptr; 2171 2172 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2173 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2174 MemCheckBlock); 2175 2176 DT->addNewBlock(MemCheckBlock, Pred); 2177 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2178 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2179 2180 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2181 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2182 2183 ReplaceInstWithInst( 2184 MemCheckBlock->getTerminator(), 2185 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2186 MemCheckBlock->getTerminator()->setDebugLoc( 2187 Pred->getTerminator()->getDebugLoc()); 2188 2189 // Mark the check as used, to prevent it from being removed during cleanup. 2190 MemRuntimeCheckCond = nullptr; 2191 return MemCheckBlock; 2192 } 2193 }; 2194 2195 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2196 // vectorization. The loop needs to be annotated with #pragma omp simd 2197 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2198 // vector length information is not provided, vectorization is not considered 2199 // explicit. Interleave hints are not allowed either. These limitations will be 2200 // relaxed in the future. 2201 // Please, note that we are currently forced to abuse the pragma 'clang 2202 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2203 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2204 // provides *explicit vectorization hints* (LV can bypass legal checks and 2205 // assume that vectorization is legal). However, both hints are implemented 2206 // using the same metadata (llvm.loop.vectorize, processed by 2207 // LoopVectorizeHints). This will be fixed in the future when the native IR 2208 // representation for pragma 'omp simd' is introduced. 2209 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2210 OptimizationRemarkEmitter *ORE) { 2211 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2212 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2213 2214 // Only outer loops with an explicit vectorization hint are supported. 2215 // Unannotated outer loops are ignored. 2216 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2217 return false; 2218 2219 Function *Fn = OuterLp->getHeader()->getParent(); 2220 if (!Hints.allowVectorization(Fn, OuterLp, 2221 true /*VectorizeOnlyWhenForced*/)) { 2222 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2223 return false; 2224 } 2225 2226 if (Hints.getInterleave() > 1) { 2227 // TODO: Interleave support is future work. 2228 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2229 "outer loops.\n"); 2230 Hints.emitRemarkWithHints(); 2231 return false; 2232 } 2233 2234 return true; 2235 } 2236 2237 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2238 OptimizationRemarkEmitter *ORE, 2239 SmallVectorImpl<Loop *> &V) { 2240 // Collect inner loops and outer loops without irreducible control flow. For 2241 // now, only collect outer loops that have explicit vectorization hints. If we 2242 // are stress testing the VPlan H-CFG construction, we collect the outermost 2243 // loop of every loop nest. 2244 if (L.isInnermost() || VPlanBuildStressTest || 2245 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2246 LoopBlocksRPO RPOT(&L); 2247 RPOT.perform(LI); 2248 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2249 V.push_back(&L); 2250 // TODO: Collect inner loops inside marked outer loops in case 2251 // vectorization fails for the outer loop. Do not invoke 2252 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2253 // already known to be reducible. We can use an inherited attribute for 2254 // that. 2255 return; 2256 } 2257 } 2258 for (Loop *InnerL : L) 2259 collectSupportedLoops(*InnerL, LI, ORE, V); 2260 } 2261 2262 namespace { 2263 2264 /// The LoopVectorize Pass. 2265 struct LoopVectorize : public FunctionPass { 2266 /// Pass identification, replacement for typeid 2267 static char ID; 2268 2269 LoopVectorizePass Impl; 2270 2271 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2272 bool VectorizeOnlyWhenForced = false) 2273 : FunctionPass(ID), 2274 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2275 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2276 } 2277 2278 bool runOnFunction(Function &F) override { 2279 if (skipFunction(F)) 2280 return false; 2281 2282 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2283 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2284 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2285 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2286 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2287 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2288 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2289 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2290 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2291 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2292 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2293 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2294 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2295 2296 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2297 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2298 2299 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2300 GetLAA, *ORE, PSI).MadeAnyChange; 2301 } 2302 2303 void getAnalysisUsage(AnalysisUsage &AU) const override { 2304 AU.addRequired<AssumptionCacheTracker>(); 2305 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2306 AU.addRequired<DominatorTreeWrapperPass>(); 2307 AU.addRequired<LoopInfoWrapperPass>(); 2308 AU.addRequired<ScalarEvolutionWrapperPass>(); 2309 AU.addRequired<TargetTransformInfoWrapperPass>(); 2310 AU.addRequired<AAResultsWrapperPass>(); 2311 AU.addRequired<LoopAccessLegacyAnalysis>(); 2312 AU.addRequired<DemandedBitsWrapperPass>(); 2313 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2314 AU.addRequired<InjectTLIMappingsLegacy>(); 2315 2316 // We currently do not preserve loopinfo/dominator analyses with outer loop 2317 // vectorization. Until this is addressed, mark these analyses as preserved 2318 // only for non-VPlan-native path. 2319 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2320 if (!EnableVPlanNativePath) { 2321 AU.addPreserved<LoopInfoWrapperPass>(); 2322 AU.addPreserved<DominatorTreeWrapperPass>(); 2323 } 2324 2325 AU.addPreserved<BasicAAWrapperPass>(); 2326 AU.addPreserved<GlobalsAAWrapperPass>(); 2327 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2328 } 2329 }; 2330 2331 } // end anonymous namespace 2332 2333 //===----------------------------------------------------------------------===// 2334 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2335 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2336 //===----------------------------------------------------------------------===// 2337 2338 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2339 // We need to place the broadcast of invariant variables outside the loop, 2340 // but only if it's proven safe to do so. Else, broadcast will be inside 2341 // vector loop body. 2342 Instruction *Instr = dyn_cast<Instruction>(V); 2343 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2344 (!Instr || 2345 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2346 // Place the code for broadcasting invariant variables in the new preheader. 2347 IRBuilder<>::InsertPointGuard Guard(Builder); 2348 if (SafeToHoist) 2349 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2350 2351 // Broadcast the scalar into all locations in the vector. 2352 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2353 2354 return Shuf; 2355 } 2356 2357 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2358 const InductionDescriptor &II, Value *Step, Value *Start, 2359 Instruction *EntryVal, VPValue *Def, VPValue *CastDef, 2360 VPTransformState &State) { 2361 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2362 "Expected either an induction phi-node or a truncate of it!"); 2363 2364 // Construct the initial value of the vector IV in the vector loop preheader 2365 auto CurrIP = Builder.saveIP(); 2366 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2367 if (isa<TruncInst>(EntryVal)) { 2368 assert(Start->getType()->isIntegerTy() && 2369 "Truncation requires an integer type"); 2370 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2371 Step = Builder.CreateTrunc(Step, TruncType); 2372 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2373 } 2374 2375 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 2376 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2377 Value *SteppedStart = 2378 getStepVector(SplatStart, Zero, Step, II.getInductionOpcode()); 2379 2380 // We create vector phi nodes for both integer and floating-point induction 2381 // variables. Here, we determine the kind of arithmetic we will perform. 2382 Instruction::BinaryOps AddOp; 2383 Instruction::BinaryOps MulOp; 2384 if (Step->getType()->isIntegerTy()) { 2385 AddOp = Instruction::Add; 2386 MulOp = Instruction::Mul; 2387 } else { 2388 AddOp = II.getInductionOpcode(); 2389 MulOp = Instruction::FMul; 2390 } 2391 2392 // Multiply the vectorization factor by the step using integer or 2393 // floating-point arithmetic as appropriate. 2394 Type *StepType = Step->getType(); 2395 Value *RuntimeVF; 2396 if (Step->getType()->isFloatingPointTy()) 2397 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, VF); 2398 else 2399 RuntimeVF = getRuntimeVF(Builder, StepType, VF); 2400 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 2401 2402 // Create a vector splat to use in the induction update. 2403 // 2404 // FIXME: If the step is non-constant, we create the vector splat with 2405 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2406 // handle a constant vector splat. 2407 Value *SplatVF = isa<Constant>(Mul) 2408 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2409 : Builder.CreateVectorSplat(VF, Mul); 2410 Builder.restoreIP(CurrIP); 2411 2412 // We may need to add the step a number of times, depending on the unroll 2413 // factor. The last of those goes into the PHI. 2414 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2415 &*LoopVectorBody->getFirstInsertionPt()); 2416 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2417 Instruction *LastInduction = VecInd; 2418 for (unsigned Part = 0; Part < UF; ++Part) { 2419 State.set(Def, LastInduction, Part); 2420 2421 if (isa<TruncInst>(EntryVal)) 2422 addMetadata(LastInduction, EntryVal); 2423 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, 2424 State, Part); 2425 2426 LastInduction = cast<Instruction>( 2427 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 2428 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2429 } 2430 2431 // Move the last step to the end of the latch block. This ensures consistent 2432 // placement of all induction updates. 2433 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2434 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2435 auto *ICmp = cast<Instruction>(Br->getCondition()); 2436 LastInduction->moveBefore(ICmp); 2437 LastInduction->setName("vec.ind.next"); 2438 2439 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2440 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2441 } 2442 2443 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2444 return Cost->isScalarAfterVectorization(I, VF) || 2445 Cost->isProfitableToScalarize(I, VF); 2446 } 2447 2448 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2449 if (shouldScalarizeInstruction(IV)) 2450 return true; 2451 auto isScalarInst = [&](User *U) -> bool { 2452 auto *I = cast<Instruction>(U); 2453 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2454 }; 2455 return llvm::any_of(IV->users(), isScalarInst); 2456 } 2457 2458 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2459 const InductionDescriptor &ID, const Instruction *EntryVal, 2460 Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, 2461 unsigned Part, unsigned Lane) { 2462 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2463 "Expected either an induction phi-node or a truncate of it!"); 2464 2465 // This induction variable is not the phi from the original loop but the 2466 // newly-created IV based on the proof that casted Phi is equal to the 2467 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2468 // re-uses the same InductionDescriptor that original IV uses but we don't 2469 // have to do any recording in this case - that is done when original IV is 2470 // processed. 2471 if (isa<TruncInst>(EntryVal)) 2472 return; 2473 2474 if (!CastDef) { 2475 assert(ID.getCastInsts().empty() && 2476 "there are casts for ID, but no CastDef"); 2477 return; 2478 } 2479 assert(!ID.getCastInsts().empty() && 2480 "there is a CastDef, but no casts for ID"); 2481 // Only the first Cast instruction in the Casts vector is of interest. 2482 // The rest of the Casts (if exist) have no uses outside the 2483 // induction update chain itself. 2484 if (Lane < UINT_MAX) 2485 State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); 2486 else 2487 State.set(CastDef, VectorLoopVal, Part); 2488 } 2489 2490 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2491 TruncInst *Trunc, VPValue *Def, 2492 VPValue *CastDef, 2493 VPTransformState &State) { 2494 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2495 "Primary induction variable must have an integer type"); 2496 2497 auto II = Legal->getInductionVars().find(IV); 2498 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2499 2500 auto ID = II->second; 2501 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2502 2503 // The value from the original loop to which we are mapping the new induction 2504 // variable. 2505 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2506 2507 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2508 2509 // Generate code for the induction step. Note that induction steps are 2510 // required to be loop-invariant 2511 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2512 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2513 "Induction step should be loop invariant"); 2514 if (PSE.getSE()->isSCEVable(IV->getType())) { 2515 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2516 return Exp.expandCodeFor(Step, Step->getType(), 2517 LoopVectorPreHeader->getTerminator()); 2518 } 2519 return cast<SCEVUnknown>(Step)->getValue(); 2520 }; 2521 2522 // The scalar value to broadcast. This is derived from the canonical 2523 // induction variable. If a truncation type is given, truncate the canonical 2524 // induction variable and step. Otherwise, derive these values from the 2525 // induction descriptor. 2526 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2527 Value *ScalarIV = Induction; 2528 if (IV != OldInduction) { 2529 ScalarIV = IV->getType()->isIntegerTy() 2530 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2531 : Builder.CreateCast(Instruction::SIToFP, Induction, 2532 IV->getType()); 2533 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2534 ScalarIV->setName("offset.idx"); 2535 } 2536 if (Trunc) { 2537 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2538 assert(Step->getType()->isIntegerTy() && 2539 "Truncation requires an integer step"); 2540 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2541 Step = Builder.CreateTrunc(Step, TruncType); 2542 } 2543 return ScalarIV; 2544 }; 2545 2546 // Create the vector values from the scalar IV, in the absence of creating a 2547 // vector IV. 2548 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2549 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2550 for (unsigned Part = 0; Part < UF; ++Part) { 2551 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2552 Value *StartIdx; 2553 if (Step->getType()->isFloatingPointTy()) 2554 StartIdx = getRuntimeVFAsFloat(Builder, Step->getType(), VF * Part); 2555 else 2556 StartIdx = getRuntimeVF(Builder, Step->getType(), VF * Part); 2557 2558 Value *EntryPart = 2559 getStepVector(Broadcasted, StartIdx, Step, ID.getInductionOpcode()); 2560 State.set(Def, EntryPart, Part); 2561 if (Trunc) 2562 addMetadata(EntryPart, Trunc); 2563 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, 2564 State, Part); 2565 } 2566 }; 2567 2568 // Fast-math-flags propagate from the original induction instruction. 2569 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 2570 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 2571 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 2572 2573 // Now do the actual transformations, and start with creating the step value. 2574 Value *Step = CreateStepValue(ID.getStep()); 2575 if (VF.isZero() || VF.isScalar()) { 2576 Value *ScalarIV = CreateScalarIV(Step); 2577 CreateSplatIV(ScalarIV, Step); 2578 return; 2579 } 2580 2581 // Determine if we want a scalar version of the induction variable. This is 2582 // true if the induction variable itself is not widened, or if it has at 2583 // least one user in the loop that is not widened. 2584 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2585 if (!NeedsScalarIV) { 2586 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2587 State); 2588 return; 2589 } 2590 2591 // Try to create a new independent vector induction variable. If we can't 2592 // create the phi node, we will splat the scalar induction variable in each 2593 // loop iteration. 2594 if (!shouldScalarizeInstruction(EntryVal)) { 2595 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2596 State); 2597 Value *ScalarIV = CreateScalarIV(Step); 2598 // Create scalar steps that can be used by instructions we will later 2599 // scalarize. Note that the addition of the scalar steps will not increase 2600 // the number of instructions in the loop in the common case prior to 2601 // InstCombine. We will be trading one vector extract for each scalar step. 2602 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2603 return; 2604 } 2605 2606 // All IV users are scalar instructions, so only emit a scalar IV, not a 2607 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2608 // predicate used by the masked loads/stores. 2609 Value *ScalarIV = CreateScalarIV(Step); 2610 if (!Cost->isScalarEpilogueAllowed()) 2611 CreateSplatIV(ScalarIV, Step); 2612 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2613 } 2614 2615 Value *InnerLoopVectorizer::getStepVector(Value *Val, Value *StartIdx, 2616 Value *Step, 2617 Instruction::BinaryOps BinOp) { 2618 // Create and check the types. 2619 auto *ValVTy = cast<VectorType>(Val->getType()); 2620 ElementCount VLen = ValVTy->getElementCount(); 2621 2622 Type *STy = Val->getType()->getScalarType(); 2623 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2624 "Induction Step must be an integer or FP"); 2625 assert(Step->getType() == STy && "Step has wrong type"); 2626 2627 SmallVector<Constant *, 8> Indices; 2628 2629 // Create a vector of consecutive numbers from zero to VF. 2630 VectorType *InitVecValVTy = ValVTy; 2631 Type *InitVecValSTy = STy; 2632 if (STy->isFloatingPointTy()) { 2633 InitVecValSTy = 2634 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2635 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2636 } 2637 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2638 2639 // Splat the StartIdx 2640 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2641 2642 if (STy->isIntegerTy()) { 2643 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2644 Step = Builder.CreateVectorSplat(VLen, Step); 2645 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2646 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2647 // which can be found from the original scalar operations. 2648 Step = Builder.CreateMul(InitVec, Step); 2649 return Builder.CreateAdd(Val, Step, "induction"); 2650 } 2651 2652 // Floating point induction. 2653 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2654 "Binary Opcode should be specified for FP induction"); 2655 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2656 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2657 2658 Step = Builder.CreateVectorSplat(VLen, Step); 2659 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2660 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2661 } 2662 2663 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2664 Instruction *EntryVal, 2665 const InductionDescriptor &ID, 2666 VPValue *Def, VPValue *CastDef, 2667 VPTransformState &State) { 2668 // We shouldn't have to build scalar steps if we aren't vectorizing. 2669 assert(VF.isVector() && "VF should be greater than one"); 2670 // Get the value type and ensure it and the step have the same integer type. 2671 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2672 assert(ScalarIVTy == Step->getType() && 2673 "Val and Step should have the same type"); 2674 2675 // We build scalar steps for both integer and floating-point induction 2676 // variables. Here, we determine the kind of arithmetic we will perform. 2677 Instruction::BinaryOps AddOp; 2678 Instruction::BinaryOps MulOp; 2679 if (ScalarIVTy->isIntegerTy()) { 2680 AddOp = Instruction::Add; 2681 MulOp = Instruction::Mul; 2682 } else { 2683 AddOp = ID.getInductionOpcode(); 2684 MulOp = Instruction::FMul; 2685 } 2686 2687 // Determine the number of scalars we need to generate for each unroll 2688 // iteration. If EntryVal is uniform, we only need to generate the first 2689 // lane. Otherwise, we generate all VF values. 2690 bool IsUniform = 2691 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF); 2692 unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue(); 2693 // Compute the scalar steps and save the results in State. 2694 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2695 ScalarIVTy->getScalarSizeInBits()); 2696 Type *VecIVTy = nullptr; 2697 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2698 if (!IsUniform && VF.isScalable()) { 2699 VecIVTy = VectorType::get(ScalarIVTy, VF); 2700 UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF)); 2701 SplatStep = Builder.CreateVectorSplat(VF, Step); 2702 SplatIV = Builder.CreateVectorSplat(VF, ScalarIV); 2703 } 2704 2705 for (unsigned Part = 0; Part < UF; ++Part) { 2706 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, VF, Part); 2707 2708 if (!IsUniform && VF.isScalable()) { 2709 auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0); 2710 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2711 if (ScalarIVTy->isFloatingPointTy()) 2712 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2713 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2714 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2715 State.set(Def, Add, Part); 2716 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2717 Part); 2718 // It's useful to record the lane values too for the known minimum number 2719 // of elements so we do those below. This improves the code quality when 2720 // trying to extract the first element, for example. 2721 } 2722 2723 if (ScalarIVTy->isFloatingPointTy()) 2724 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2725 2726 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2727 Value *StartIdx = Builder.CreateBinOp( 2728 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2729 // The step returned by `createStepForVF` is a runtime-evaluated value 2730 // when VF is scalable. Otherwise, it should be folded into a Constant. 2731 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2732 "Expected StartIdx to be folded to a constant when VF is not " 2733 "scalable"); 2734 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2735 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2736 State.set(Def, Add, VPIteration(Part, Lane)); 2737 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2738 Part, Lane); 2739 } 2740 } 2741 } 2742 2743 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2744 const VPIteration &Instance, 2745 VPTransformState &State) { 2746 Value *ScalarInst = State.get(Def, Instance); 2747 Value *VectorValue = State.get(Def, Instance.Part); 2748 VectorValue = Builder.CreateInsertElement( 2749 VectorValue, ScalarInst, 2750 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2751 State.set(Def, VectorValue, Instance.Part); 2752 } 2753 2754 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2755 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2756 return Builder.CreateVectorReverse(Vec, "reverse"); 2757 } 2758 2759 // Return whether we allow using masked interleave-groups (for dealing with 2760 // strided loads/stores that reside in predicated blocks, or for dealing 2761 // with gaps). 2762 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2763 // If an override option has been passed in for interleaved accesses, use it. 2764 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2765 return EnableMaskedInterleavedMemAccesses; 2766 2767 return TTI.enableMaskedInterleavedAccessVectorization(); 2768 } 2769 2770 // Try to vectorize the interleave group that \p Instr belongs to. 2771 // 2772 // E.g. Translate following interleaved load group (factor = 3): 2773 // for (i = 0; i < N; i+=3) { 2774 // R = Pic[i]; // Member of index 0 2775 // G = Pic[i+1]; // Member of index 1 2776 // B = Pic[i+2]; // Member of index 2 2777 // ... // do something to R, G, B 2778 // } 2779 // To: 2780 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2781 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2782 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2783 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2784 // 2785 // Or translate following interleaved store group (factor = 3): 2786 // for (i = 0; i < N; i+=3) { 2787 // ... do something to R, G, B 2788 // Pic[i] = R; // Member of index 0 2789 // Pic[i+1] = G; // Member of index 1 2790 // Pic[i+2] = B; // Member of index 2 2791 // } 2792 // To: 2793 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2794 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2795 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2796 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2797 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2798 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2799 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2800 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2801 VPValue *BlockInMask) { 2802 Instruction *Instr = Group->getInsertPos(); 2803 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2804 2805 // Prepare for the vector type of the interleaved load/store. 2806 Type *ScalarTy = getLoadStoreType(Instr); 2807 unsigned InterleaveFactor = Group->getFactor(); 2808 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2809 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2810 2811 // Prepare for the new pointers. 2812 SmallVector<Value *, 2> AddrParts; 2813 unsigned Index = Group->getIndex(Instr); 2814 2815 // TODO: extend the masked interleaved-group support to reversed access. 2816 assert((!BlockInMask || !Group->isReverse()) && 2817 "Reversed masked interleave-group not supported."); 2818 2819 // If the group is reverse, adjust the index to refer to the last vector lane 2820 // instead of the first. We adjust the index from the first vector lane, 2821 // rather than directly getting the pointer for lane VF - 1, because the 2822 // pointer operand of the interleaved access is supposed to be uniform. For 2823 // uniform instructions, we're only required to generate a value for the 2824 // first vector lane in each unroll iteration. 2825 if (Group->isReverse()) 2826 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2827 2828 for (unsigned Part = 0; Part < UF; Part++) { 2829 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2830 setDebugLocFromInst(AddrPart); 2831 2832 // Notice current instruction could be any index. Need to adjust the address 2833 // to the member of index 0. 2834 // 2835 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2836 // b = A[i]; // Member of index 0 2837 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2838 // 2839 // E.g. A[i+1] = a; // Member of index 1 2840 // A[i] = b; // Member of index 0 2841 // A[i+2] = c; // Member of index 2 (Current instruction) 2842 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2843 2844 bool InBounds = false; 2845 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2846 InBounds = gep->isInBounds(); 2847 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2848 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2849 2850 // Cast to the vector pointer type. 2851 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2852 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2853 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2854 } 2855 2856 setDebugLocFromInst(Instr); 2857 Value *PoisonVec = PoisonValue::get(VecTy); 2858 2859 Value *MaskForGaps = nullptr; 2860 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2861 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2862 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2863 } 2864 2865 // Vectorize the interleaved load group. 2866 if (isa<LoadInst>(Instr)) { 2867 // For each unroll part, create a wide load for the group. 2868 SmallVector<Value *, 2> NewLoads; 2869 for (unsigned Part = 0; Part < UF; Part++) { 2870 Instruction *NewLoad; 2871 if (BlockInMask || MaskForGaps) { 2872 assert(useMaskedInterleavedAccesses(*TTI) && 2873 "masked interleaved groups are not allowed."); 2874 Value *GroupMask = MaskForGaps; 2875 if (BlockInMask) { 2876 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2877 Value *ShuffledMask = Builder.CreateShuffleVector( 2878 BlockInMaskPart, 2879 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2880 "interleaved.mask"); 2881 GroupMask = MaskForGaps 2882 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2883 MaskForGaps) 2884 : ShuffledMask; 2885 } 2886 NewLoad = 2887 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2888 GroupMask, PoisonVec, "wide.masked.vec"); 2889 } 2890 else 2891 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2892 Group->getAlign(), "wide.vec"); 2893 Group->addMetadata(NewLoad); 2894 NewLoads.push_back(NewLoad); 2895 } 2896 2897 // For each member in the group, shuffle out the appropriate data from the 2898 // wide loads. 2899 unsigned J = 0; 2900 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2901 Instruction *Member = Group->getMember(I); 2902 2903 // Skip the gaps in the group. 2904 if (!Member) 2905 continue; 2906 2907 auto StrideMask = 2908 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2909 for (unsigned Part = 0; Part < UF; Part++) { 2910 Value *StridedVec = Builder.CreateShuffleVector( 2911 NewLoads[Part], StrideMask, "strided.vec"); 2912 2913 // If this member has different type, cast the result type. 2914 if (Member->getType() != ScalarTy) { 2915 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2916 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2917 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2918 } 2919 2920 if (Group->isReverse()) 2921 StridedVec = reverseVector(StridedVec); 2922 2923 State.set(VPDefs[J], StridedVec, Part); 2924 } 2925 ++J; 2926 } 2927 return; 2928 } 2929 2930 // The sub vector type for current instruction. 2931 auto *SubVT = VectorType::get(ScalarTy, VF); 2932 2933 // Vectorize the interleaved store group. 2934 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2935 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2936 "masked interleaved groups are not allowed."); 2937 assert((!MaskForGaps || !VF.isScalable()) && 2938 "masking gaps for scalable vectors is not yet supported."); 2939 for (unsigned Part = 0; Part < UF; Part++) { 2940 // Collect the stored vector from each member. 2941 SmallVector<Value *, 4> StoredVecs; 2942 for (unsigned i = 0; i < InterleaveFactor; i++) { 2943 assert((Group->getMember(i) || MaskForGaps) && 2944 "Fail to get a member from an interleaved store group"); 2945 Instruction *Member = Group->getMember(i); 2946 2947 // Skip the gaps in the group. 2948 if (!Member) { 2949 Value *Undef = PoisonValue::get(SubVT); 2950 StoredVecs.push_back(Undef); 2951 continue; 2952 } 2953 2954 Value *StoredVec = State.get(StoredValues[i], Part); 2955 2956 if (Group->isReverse()) 2957 StoredVec = reverseVector(StoredVec); 2958 2959 // If this member has different type, cast it to a unified type. 2960 2961 if (StoredVec->getType() != SubVT) 2962 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2963 2964 StoredVecs.push_back(StoredVec); 2965 } 2966 2967 // Concatenate all vectors into a wide vector. 2968 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2969 2970 // Interleave the elements in the wide vector. 2971 Value *IVec = Builder.CreateShuffleVector( 2972 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2973 "interleaved.vec"); 2974 2975 Instruction *NewStoreInstr; 2976 if (BlockInMask || MaskForGaps) { 2977 Value *GroupMask = MaskForGaps; 2978 if (BlockInMask) { 2979 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2980 Value *ShuffledMask = Builder.CreateShuffleVector( 2981 BlockInMaskPart, 2982 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2983 "interleaved.mask"); 2984 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2985 ShuffledMask, MaskForGaps) 2986 : ShuffledMask; 2987 } 2988 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2989 Group->getAlign(), GroupMask); 2990 } else 2991 NewStoreInstr = 2992 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2993 2994 Group->addMetadata(NewStoreInstr); 2995 } 2996 } 2997 2998 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2999 VPReplicateRecipe *RepRecipe, 3000 const VPIteration &Instance, 3001 bool IfPredicateInstr, 3002 VPTransformState &State) { 3003 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 3004 3005 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 3006 // the first lane and part. 3007 if (isa<NoAliasScopeDeclInst>(Instr)) 3008 if (!Instance.isFirstIteration()) 3009 return; 3010 3011 setDebugLocFromInst(Instr); 3012 3013 // Does this instruction return a value ? 3014 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3015 3016 Instruction *Cloned = Instr->clone(); 3017 if (!IsVoidRetTy) 3018 Cloned->setName(Instr->getName() + ".cloned"); 3019 3020 // If the scalarized instruction contributes to the address computation of a 3021 // widen masked load/store which was in a basic block that needed predication 3022 // and is not predicated after vectorization, we can't propagate 3023 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized 3024 // instruction could feed a poison value to the base address of the widen 3025 // load/store. 3026 if (State.MayGeneratePoisonRecipes.count(RepRecipe) > 0) 3027 Cloned->dropPoisonGeneratingFlags(); 3028 3029 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 3030 Builder.GetInsertPoint()); 3031 // Replace the operands of the cloned instructions with their scalar 3032 // equivalents in the new loop. 3033 for (auto &I : enumerate(RepRecipe->operands())) { 3034 auto InputInstance = Instance; 3035 VPValue *Operand = I.value(); 3036 if (State.Plan->isUniformAfterVectorization(Operand)) 3037 InputInstance.Lane = VPLane::getFirstLane(); 3038 Cloned->setOperand(I.index(), State.get(Operand, InputInstance)); 3039 } 3040 addNewMetadata(Cloned, Instr); 3041 3042 // Place the cloned scalar in the new loop. 3043 Builder.Insert(Cloned); 3044 3045 State.set(RepRecipe, Cloned, Instance); 3046 3047 // If we just cloned a new assumption, add it the assumption cache. 3048 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 3049 AC->registerAssumption(II); 3050 3051 // End if-block. 3052 if (IfPredicateInstr) 3053 PredicatedInstructions.push_back(Cloned); 3054 } 3055 3056 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3057 Value *End, Value *Step, 3058 Instruction *DL) { 3059 BasicBlock *Header = L->getHeader(); 3060 BasicBlock *Latch = L->getLoopLatch(); 3061 // As we're just creating this loop, it's possible no latch exists 3062 // yet. If so, use the header as this will be a single block loop. 3063 if (!Latch) 3064 Latch = Header; 3065 3066 IRBuilder<> B(&*Header->getFirstInsertionPt()); 3067 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3068 setDebugLocFromInst(OldInst, &B); 3069 auto *Induction = B.CreatePHI(Start->getType(), 2, "index"); 3070 3071 B.SetInsertPoint(Latch->getTerminator()); 3072 setDebugLocFromInst(OldInst, &B); 3073 3074 // Create i+1 and fill the PHINode. 3075 // 3076 // If the tail is not folded, we know that End - Start >= Step (either 3077 // statically or through the minimum iteration checks). We also know that both 3078 // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV + 3079 // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned 3080 // overflows and we can mark the induction increment as NUW. 3081 Value *Next = B.CreateAdd(Induction, Step, "index.next", 3082 /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false); 3083 Induction->addIncoming(Start, L->getLoopPreheader()); 3084 Induction->addIncoming(Next, Latch); 3085 // Create the compare. 3086 Value *ICmp = B.CreateICmpEQ(Next, End); 3087 B.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 3088 3089 // Now we have two terminators. Remove the old one from the block. 3090 Latch->getTerminator()->eraseFromParent(); 3091 3092 return Induction; 3093 } 3094 3095 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3096 if (TripCount) 3097 return TripCount; 3098 3099 assert(L && "Create Trip Count for null loop."); 3100 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3101 // Find the loop boundaries. 3102 ScalarEvolution *SE = PSE.getSE(); 3103 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3104 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 3105 "Invalid loop count"); 3106 3107 Type *IdxTy = Legal->getWidestInductionType(); 3108 assert(IdxTy && "No type for induction"); 3109 3110 // The exit count might have the type of i64 while the phi is i32. This can 3111 // happen if we have an induction variable that is sign extended before the 3112 // compare. The only way that we get a backedge taken count is that the 3113 // induction variable was signed and as such will not overflow. In such a case 3114 // truncation is legal. 3115 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3116 IdxTy->getPrimitiveSizeInBits()) 3117 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3118 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3119 3120 // Get the total trip count from the count by adding 1. 3121 const SCEV *ExitCount = SE->getAddExpr( 3122 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3123 3124 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3125 3126 // Expand the trip count and place the new instructions in the preheader. 3127 // Notice that the pre-header does not change, only the loop body. 3128 SCEVExpander Exp(*SE, DL, "induction"); 3129 3130 // Count holds the overall loop count (N). 3131 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3132 L->getLoopPreheader()->getTerminator()); 3133 3134 if (TripCount->getType()->isPointerTy()) 3135 TripCount = 3136 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3137 L->getLoopPreheader()->getTerminator()); 3138 3139 return TripCount; 3140 } 3141 3142 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3143 if (VectorTripCount) 3144 return VectorTripCount; 3145 3146 Value *TC = getOrCreateTripCount(L); 3147 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3148 3149 Type *Ty = TC->getType(); 3150 // This is where we can make the step a runtime constant. 3151 Value *Step = createStepForVF(Builder, Ty, VF, UF); 3152 3153 // If the tail is to be folded by masking, round the number of iterations N 3154 // up to a multiple of Step instead of rounding down. This is done by first 3155 // adding Step-1 and then rounding down. Note that it's ok if this addition 3156 // overflows: the vector induction variable will eventually wrap to zero given 3157 // that it starts at zero and its Step is a power of two; the loop will then 3158 // exit, with the last early-exit vector comparison also producing all-true. 3159 if (Cost->foldTailByMasking()) { 3160 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3161 "VF*UF must be a power of 2 when folding tail by masking"); 3162 assert(!VF.isScalable() && 3163 "Tail folding not yet supported for scalable vectors"); 3164 TC = Builder.CreateAdd( 3165 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 3166 } 3167 3168 // Now we need to generate the expression for the part of the loop that the 3169 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3170 // iterations are not required for correctness, or N - Step, otherwise. Step 3171 // is equal to the vectorization factor (number of SIMD elements) times the 3172 // unroll factor (number of SIMD instructions). 3173 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3174 3175 // There are cases where we *must* run at least one iteration in the remainder 3176 // loop. See the cost model for when this can happen. If the step evenly 3177 // divides the trip count, we set the remainder to be equal to the step. If 3178 // the step does not evenly divide the trip count, no adjustment is necessary 3179 // since there will already be scalar iterations. Note that the minimum 3180 // iterations check ensures that N >= Step. 3181 if (Cost->requiresScalarEpilogue(VF)) { 3182 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3183 R = Builder.CreateSelect(IsZero, Step, R); 3184 } 3185 3186 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3187 3188 return VectorTripCount; 3189 } 3190 3191 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3192 const DataLayout &DL) { 3193 // Verify that V is a vector type with same number of elements as DstVTy. 3194 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3195 unsigned VF = DstFVTy->getNumElements(); 3196 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3197 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3198 Type *SrcElemTy = SrcVecTy->getElementType(); 3199 Type *DstElemTy = DstFVTy->getElementType(); 3200 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3201 "Vector elements must have same size"); 3202 3203 // Do a direct cast if element types are castable. 3204 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3205 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3206 } 3207 // V cannot be directly casted to desired vector type. 3208 // May happen when V is a floating point vector but DstVTy is a vector of 3209 // pointers or vice-versa. Handle this using a two-step bitcast using an 3210 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3211 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3212 "Only one type should be a pointer type"); 3213 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3214 "Only one type should be a floating point type"); 3215 Type *IntTy = 3216 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3217 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3218 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3219 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3220 } 3221 3222 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3223 BasicBlock *Bypass) { 3224 Value *Count = getOrCreateTripCount(L); 3225 // Reuse existing vector loop preheader for TC checks. 3226 // Note that new preheader block is generated for vector loop. 3227 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3228 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3229 3230 // Generate code to check if the loop's trip count is less than VF * UF, or 3231 // equal to it in case a scalar epilogue is required; this implies that the 3232 // vector trip count is zero. This check also covers the case where adding one 3233 // to the backedge-taken count overflowed leading to an incorrect trip count 3234 // of zero. In this case we will also jump to the scalar loop. 3235 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 3236 : ICmpInst::ICMP_ULT; 3237 3238 // If tail is to be folded, vector loop takes care of all iterations. 3239 Value *CheckMinIters = Builder.getFalse(); 3240 if (!Cost->foldTailByMasking()) { 3241 Value *Step = createStepForVF(Builder, Count->getType(), VF, UF); 3242 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3243 } 3244 // Create new preheader for vector loop. 3245 LoopVectorPreHeader = 3246 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3247 "vector.ph"); 3248 3249 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3250 DT->getNode(Bypass)->getIDom()) && 3251 "TC check is expected to dominate Bypass"); 3252 3253 // Update dominator for Bypass & LoopExit (if needed). 3254 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3255 if (!Cost->requiresScalarEpilogue(VF)) 3256 // If there is an epilogue which must run, there's no edge from the 3257 // middle block to exit blocks and thus no need to update the immediate 3258 // dominator of the exit blocks. 3259 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3260 3261 ReplaceInstWithInst( 3262 TCCheckBlock->getTerminator(), 3263 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3264 LoopBypassBlocks.push_back(TCCheckBlock); 3265 } 3266 3267 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3268 3269 BasicBlock *const SCEVCheckBlock = 3270 RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); 3271 if (!SCEVCheckBlock) 3272 return nullptr; 3273 3274 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3275 (OptForSizeBasedOnProfile && 3276 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3277 "Cannot SCEV check stride or overflow when optimizing for size"); 3278 3279 3280 // Update dominator only if this is first RT check. 3281 if (LoopBypassBlocks.empty()) { 3282 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3283 if (!Cost->requiresScalarEpilogue(VF)) 3284 // If there is an epilogue which must run, there's no edge from the 3285 // middle block to exit blocks and thus no need to update the immediate 3286 // dominator of the exit blocks. 3287 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3288 } 3289 3290 LoopBypassBlocks.push_back(SCEVCheckBlock); 3291 AddedSafetyChecks = true; 3292 return SCEVCheckBlock; 3293 } 3294 3295 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 3296 BasicBlock *Bypass) { 3297 // VPlan-native path does not do any analysis for runtime checks currently. 3298 if (EnableVPlanNativePath) 3299 return nullptr; 3300 3301 BasicBlock *const MemCheckBlock = 3302 RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); 3303 3304 // Check if we generated code that checks in runtime if arrays overlap. We put 3305 // the checks into a separate block to make the more common case of few 3306 // elements faster. 3307 if (!MemCheckBlock) 3308 return nullptr; 3309 3310 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3311 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3312 "Cannot emit memory checks when optimizing for size, unless forced " 3313 "to vectorize."); 3314 ORE->emit([&]() { 3315 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3316 L->getStartLoc(), L->getHeader()) 3317 << "Code-size may be reduced by not forcing " 3318 "vectorization, or by source-code modifications " 3319 "eliminating the need for runtime checks " 3320 "(e.g., adding 'restrict')."; 3321 }); 3322 } 3323 3324 LoopBypassBlocks.push_back(MemCheckBlock); 3325 3326 AddedSafetyChecks = true; 3327 3328 // We currently don't use LoopVersioning for the actual loop cloning but we 3329 // still use it to add the noalias metadata. 3330 LVer = std::make_unique<LoopVersioning>( 3331 *Legal->getLAI(), 3332 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3333 DT, PSE.getSE()); 3334 LVer->prepareNoAliasMetadata(); 3335 return MemCheckBlock; 3336 } 3337 3338 Value *InnerLoopVectorizer::emitTransformedIndex( 3339 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3340 const InductionDescriptor &ID) const { 3341 3342 SCEVExpander Exp(*SE, DL, "induction"); 3343 auto Step = ID.getStep(); 3344 auto StartValue = ID.getStartValue(); 3345 assert(Index->getType()->getScalarType() == Step->getType() && 3346 "Index scalar type does not match StepValue type"); 3347 3348 // Note: the IR at this point is broken. We cannot use SE to create any new 3349 // SCEV and then expand it, hoping that SCEV's simplification will give us 3350 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3351 // lead to various SCEV crashes. So all we can do is to use builder and rely 3352 // on InstCombine for future simplifications. Here we handle some trivial 3353 // cases only. 3354 auto CreateAdd = [&B](Value *X, Value *Y) { 3355 assert(X->getType() == Y->getType() && "Types don't match!"); 3356 if (auto *CX = dyn_cast<ConstantInt>(X)) 3357 if (CX->isZero()) 3358 return Y; 3359 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3360 if (CY->isZero()) 3361 return X; 3362 return B.CreateAdd(X, Y); 3363 }; 3364 3365 // We allow X to be a vector type, in which case Y will potentially be 3366 // splatted into a vector with the same element count. 3367 auto CreateMul = [&B](Value *X, Value *Y) { 3368 assert(X->getType()->getScalarType() == Y->getType() && 3369 "Types don't match!"); 3370 if (auto *CX = dyn_cast<ConstantInt>(X)) 3371 if (CX->isOne()) 3372 return Y; 3373 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3374 if (CY->isOne()) 3375 return X; 3376 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 3377 if (XVTy && !isa<VectorType>(Y->getType())) 3378 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 3379 return B.CreateMul(X, Y); 3380 }; 3381 3382 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3383 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3384 // the DomTree is not kept up-to-date for additional blocks generated in the 3385 // vector loop. By using the header as insertion point, we guarantee that the 3386 // expanded instructions dominate all their uses. 3387 auto GetInsertPoint = [this, &B]() { 3388 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3389 if (InsertBB != LoopVectorBody && 3390 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3391 return LoopVectorBody->getTerminator(); 3392 return &*B.GetInsertPoint(); 3393 }; 3394 3395 switch (ID.getKind()) { 3396 case InductionDescriptor::IK_IntInduction: { 3397 assert(!isa<VectorType>(Index->getType()) && 3398 "Vector indices not supported for integer inductions yet"); 3399 assert(Index->getType() == StartValue->getType() && 3400 "Index type does not match StartValue type"); 3401 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3402 return B.CreateSub(StartValue, Index); 3403 auto *Offset = CreateMul( 3404 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3405 return CreateAdd(StartValue, Offset); 3406 } 3407 case InductionDescriptor::IK_PtrInduction: { 3408 assert(isa<SCEVConstant>(Step) && 3409 "Expected constant step for pointer induction"); 3410 return B.CreateGEP( 3411 ID.getElementType(), StartValue, 3412 CreateMul(Index, 3413 Exp.expandCodeFor(Step, Index->getType()->getScalarType(), 3414 GetInsertPoint()))); 3415 } 3416 case InductionDescriptor::IK_FpInduction: { 3417 assert(!isa<VectorType>(Index->getType()) && 3418 "Vector indices not supported for FP inductions yet"); 3419 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3420 auto InductionBinOp = ID.getInductionBinOp(); 3421 assert(InductionBinOp && 3422 (InductionBinOp->getOpcode() == Instruction::FAdd || 3423 InductionBinOp->getOpcode() == Instruction::FSub) && 3424 "Original bin op should be defined for FP induction"); 3425 3426 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3427 Value *MulExp = B.CreateFMul(StepValue, Index); 3428 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3429 "induction"); 3430 } 3431 case InductionDescriptor::IK_NoInduction: 3432 return nullptr; 3433 } 3434 llvm_unreachable("invalid enum"); 3435 } 3436 3437 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3438 LoopScalarBody = OrigLoop->getHeader(); 3439 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3440 assert(LoopVectorPreHeader && "Invalid loop structure"); 3441 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3442 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3443 "multiple exit loop without required epilogue?"); 3444 3445 LoopMiddleBlock = 3446 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3447 LI, nullptr, Twine(Prefix) + "middle.block"); 3448 LoopScalarPreHeader = 3449 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3450 nullptr, Twine(Prefix) + "scalar.ph"); 3451 3452 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3453 3454 // Set up the middle block terminator. Two cases: 3455 // 1) If we know that we must execute the scalar epilogue, emit an 3456 // unconditional branch. 3457 // 2) Otherwise, we must have a single unique exit block (due to how we 3458 // implement the multiple exit case). In this case, set up a conditonal 3459 // branch from the middle block to the loop scalar preheader, and the 3460 // exit block. completeLoopSkeleton will update the condition to use an 3461 // iteration check, if required to decide whether to execute the remainder. 3462 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3463 BranchInst::Create(LoopScalarPreHeader) : 3464 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3465 Builder.getTrue()); 3466 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3467 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3468 3469 // We intentionally don't let SplitBlock to update LoopInfo since 3470 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3471 // LoopVectorBody is explicitly added to the correct place few lines later. 3472 LoopVectorBody = 3473 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3474 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3475 3476 // Update dominator for loop exit. 3477 if (!Cost->requiresScalarEpilogue(VF)) 3478 // If there is an epilogue which must run, there's no edge from the 3479 // middle block to exit blocks and thus no need to update the immediate 3480 // dominator of the exit blocks. 3481 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3482 3483 // Create and register the new vector loop. 3484 Loop *Lp = LI->AllocateLoop(); 3485 Loop *ParentLoop = OrigLoop->getParentLoop(); 3486 3487 // Insert the new loop into the loop nest and register the new basic blocks 3488 // before calling any utilities such as SCEV that require valid LoopInfo. 3489 if (ParentLoop) { 3490 ParentLoop->addChildLoop(Lp); 3491 } else { 3492 LI->addTopLevelLoop(Lp); 3493 } 3494 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3495 return Lp; 3496 } 3497 3498 void InnerLoopVectorizer::createInductionResumeValues( 3499 Loop *L, Value *VectorTripCount, 3500 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3501 assert(VectorTripCount && L && "Expected valid arguments"); 3502 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3503 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3504 "Inconsistent information about additional bypass."); 3505 // We are going to resume the execution of the scalar loop. 3506 // Go over all of the induction variables that we found and fix the 3507 // PHIs that are left in the scalar version of the loop. 3508 // The starting values of PHI nodes depend on the counter of the last 3509 // iteration in the vectorized loop. 3510 // If we come from a bypass edge then we need to start from the original 3511 // start value. 3512 for (auto &InductionEntry : Legal->getInductionVars()) { 3513 PHINode *OrigPhi = InductionEntry.first; 3514 InductionDescriptor II = InductionEntry.second; 3515 3516 // Create phi nodes to merge from the backedge-taken check block. 3517 PHINode *BCResumeVal = 3518 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3519 LoopScalarPreHeader->getTerminator()); 3520 // Copy original phi DL over to the new one. 3521 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3522 Value *&EndValue = IVEndValues[OrigPhi]; 3523 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3524 if (OrigPhi == OldInduction) { 3525 // We know what the end value is. 3526 EndValue = VectorTripCount; 3527 } else { 3528 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3529 3530 // Fast-math-flags propagate from the original induction instruction. 3531 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3532 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3533 3534 Type *StepType = II.getStep()->getType(); 3535 Instruction::CastOps CastOp = 3536 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3537 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3538 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3539 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3540 EndValue->setName("ind.end"); 3541 3542 // Compute the end value for the additional bypass (if applicable). 3543 if (AdditionalBypass.first) { 3544 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3545 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3546 StepType, true); 3547 CRD = 3548 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3549 EndValueFromAdditionalBypass = 3550 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3551 EndValueFromAdditionalBypass->setName("ind.end"); 3552 } 3553 } 3554 // The new PHI merges the original incoming value, in case of a bypass, 3555 // or the value at the end of the vectorized loop. 3556 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3557 3558 // Fix the scalar body counter (PHI node). 3559 // The old induction's phi node in the scalar body needs the truncated 3560 // value. 3561 for (BasicBlock *BB : LoopBypassBlocks) 3562 BCResumeVal->addIncoming(II.getStartValue(), BB); 3563 3564 if (AdditionalBypass.first) 3565 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3566 EndValueFromAdditionalBypass); 3567 3568 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3569 } 3570 } 3571 3572 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3573 MDNode *OrigLoopID) { 3574 assert(L && "Expected valid loop."); 3575 3576 // The trip counts should be cached by now. 3577 Value *Count = getOrCreateTripCount(L); 3578 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3579 3580 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3581 3582 // Add a check in the middle block to see if we have completed 3583 // all of the iterations in the first vector loop. Three cases: 3584 // 1) If we require a scalar epilogue, there is no conditional branch as 3585 // we unconditionally branch to the scalar preheader. Do nothing. 3586 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3587 // Thus if tail is to be folded, we know we don't need to run the 3588 // remainder and we can use the previous value for the condition (true). 3589 // 3) Otherwise, construct a runtime check. 3590 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3591 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3592 Count, VectorTripCount, "cmp.n", 3593 LoopMiddleBlock->getTerminator()); 3594 3595 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3596 // of the corresponding compare because they may have ended up with 3597 // different line numbers and we want to avoid awkward line stepping while 3598 // debugging. Eg. if the compare has got a line number inside the loop. 3599 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3600 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3601 } 3602 3603 // Get ready to start creating new instructions into the vectorized body. 3604 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3605 "Inconsistent vector loop preheader"); 3606 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3607 3608 Optional<MDNode *> VectorizedLoopID = 3609 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3610 LLVMLoopVectorizeFollowupVectorized}); 3611 if (VectorizedLoopID.hasValue()) { 3612 L->setLoopID(VectorizedLoopID.getValue()); 3613 3614 // Do not setAlreadyVectorized if loop attributes have been defined 3615 // explicitly. 3616 return LoopVectorPreHeader; 3617 } 3618 3619 // Keep all loop hints from the original loop on the vector loop (we'll 3620 // replace the vectorizer-specific hints below). 3621 if (MDNode *LID = OrigLoop->getLoopID()) 3622 L->setLoopID(LID); 3623 3624 LoopVectorizeHints Hints(L, true, *ORE); 3625 Hints.setAlreadyVectorized(); 3626 3627 #ifdef EXPENSIVE_CHECKS 3628 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3629 LI->verify(*DT); 3630 #endif 3631 3632 return LoopVectorPreHeader; 3633 } 3634 3635 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3636 /* 3637 In this function we generate a new loop. The new loop will contain 3638 the vectorized instructions while the old loop will continue to run the 3639 scalar remainder. 3640 3641 [ ] <-- loop iteration number check. 3642 / | 3643 / v 3644 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3645 | / | 3646 | / v 3647 || [ ] <-- vector pre header. 3648 |/ | 3649 | v 3650 | [ ] \ 3651 | [ ]_| <-- vector loop. 3652 | | 3653 | v 3654 \ -[ ] <--- middle-block. 3655 \/ | 3656 /\ v 3657 | ->[ ] <--- new preheader. 3658 | | 3659 (opt) v <-- edge from middle to exit iff epilogue is not required. 3660 | [ ] \ 3661 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3662 \ | 3663 \ v 3664 >[ ] <-- exit block(s). 3665 ... 3666 */ 3667 3668 // Get the metadata of the original loop before it gets modified. 3669 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3670 3671 // Workaround! Compute the trip count of the original loop and cache it 3672 // before we start modifying the CFG. This code has a systemic problem 3673 // wherein it tries to run analysis over partially constructed IR; this is 3674 // wrong, and not simply for SCEV. The trip count of the original loop 3675 // simply happens to be prone to hitting this in practice. In theory, we 3676 // can hit the same issue for any SCEV, or ValueTracking query done during 3677 // mutation. See PR49900. 3678 getOrCreateTripCount(OrigLoop); 3679 3680 // Create an empty vector loop, and prepare basic blocks for the runtime 3681 // checks. 3682 Loop *Lp = createVectorLoopSkeleton(""); 3683 3684 // Now, compare the new count to zero. If it is zero skip the vector loop and 3685 // jump to the scalar loop. This check also covers the case where the 3686 // backedge-taken count is uint##_max: adding one to it will overflow leading 3687 // to an incorrect trip count of zero. In this (rare) case we will also jump 3688 // to the scalar loop. 3689 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3690 3691 // Generate the code to check any assumptions that we've made for SCEV 3692 // expressions. 3693 emitSCEVChecks(Lp, LoopScalarPreHeader); 3694 3695 // Generate the code that checks in runtime if arrays overlap. We put the 3696 // checks into a separate block to make the more common case of few elements 3697 // faster. 3698 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3699 3700 // Some loops have a single integer induction variable, while other loops 3701 // don't. One example is c++ iterators that often have multiple pointer 3702 // induction variables. In the code below we also support a case where we 3703 // don't have a single induction variable. 3704 // 3705 // We try to obtain an induction variable from the original loop as hard 3706 // as possible. However if we don't find one that: 3707 // - is an integer 3708 // - counts from zero, stepping by one 3709 // - is the size of the widest induction variable type 3710 // then we create a new one. 3711 OldInduction = Legal->getPrimaryInduction(); 3712 Type *IdxTy = Legal->getWidestInductionType(); 3713 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3714 // The loop step is equal to the vectorization factor (num of SIMD elements) 3715 // times the unroll factor (num of SIMD instructions). 3716 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3717 Value *Step = createStepForVF(Builder, IdxTy, VF, UF); 3718 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3719 Induction = 3720 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3721 getDebugLocFromInstOrOperands(OldInduction)); 3722 3723 // Emit phis for the new starting index of the scalar loop. 3724 createInductionResumeValues(Lp, CountRoundDown); 3725 3726 return completeLoopSkeleton(Lp, OrigLoopID); 3727 } 3728 3729 // Fix up external users of the induction variable. At this point, we are 3730 // in LCSSA form, with all external PHIs that use the IV having one input value, 3731 // coming from the remainder loop. We need those PHIs to also have a correct 3732 // value for the IV when arriving directly from the middle block. 3733 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3734 const InductionDescriptor &II, 3735 Value *CountRoundDown, Value *EndValue, 3736 BasicBlock *MiddleBlock) { 3737 // There are two kinds of external IV usages - those that use the value 3738 // computed in the last iteration (the PHI) and those that use the penultimate 3739 // value (the value that feeds into the phi from the loop latch). 3740 // We allow both, but they, obviously, have different values. 3741 3742 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3743 3744 DenseMap<Value *, Value *> MissingVals; 3745 3746 // An external user of the last iteration's value should see the value that 3747 // the remainder loop uses to initialize its own IV. 3748 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3749 for (User *U : PostInc->users()) { 3750 Instruction *UI = cast<Instruction>(U); 3751 if (!OrigLoop->contains(UI)) { 3752 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3753 MissingVals[UI] = EndValue; 3754 } 3755 } 3756 3757 // An external user of the penultimate value need to see EndValue - Step. 3758 // The simplest way to get this is to recompute it from the constituent SCEVs, 3759 // that is Start + (Step * (CRD - 1)). 3760 for (User *U : OrigPhi->users()) { 3761 auto *UI = cast<Instruction>(U); 3762 if (!OrigLoop->contains(UI)) { 3763 const DataLayout &DL = 3764 OrigLoop->getHeader()->getModule()->getDataLayout(); 3765 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3766 3767 IRBuilder<> B(MiddleBlock->getTerminator()); 3768 3769 // Fast-math-flags propagate from the original induction instruction. 3770 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3771 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3772 3773 Value *CountMinusOne = B.CreateSub( 3774 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3775 Value *CMO = 3776 !II.getStep()->getType()->isIntegerTy() 3777 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3778 II.getStep()->getType()) 3779 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3780 CMO->setName("cast.cmo"); 3781 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3782 Escape->setName("ind.escape"); 3783 MissingVals[UI] = Escape; 3784 } 3785 } 3786 3787 for (auto &I : MissingVals) { 3788 PHINode *PHI = cast<PHINode>(I.first); 3789 // One corner case we have to handle is two IVs "chasing" each-other, 3790 // that is %IV2 = phi [...], [ %IV1, %latch ] 3791 // In this case, if IV1 has an external use, we need to avoid adding both 3792 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3793 // don't already have an incoming value for the middle block. 3794 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3795 PHI->addIncoming(I.second, MiddleBlock); 3796 } 3797 } 3798 3799 namespace { 3800 3801 struct CSEDenseMapInfo { 3802 static bool canHandle(const Instruction *I) { 3803 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3804 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3805 } 3806 3807 static inline Instruction *getEmptyKey() { 3808 return DenseMapInfo<Instruction *>::getEmptyKey(); 3809 } 3810 3811 static inline Instruction *getTombstoneKey() { 3812 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3813 } 3814 3815 static unsigned getHashValue(const Instruction *I) { 3816 assert(canHandle(I) && "Unknown instruction!"); 3817 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3818 I->value_op_end())); 3819 } 3820 3821 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3822 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3823 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3824 return LHS == RHS; 3825 return LHS->isIdenticalTo(RHS); 3826 } 3827 }; 3828 3829 } // end anonymous namespace 3830 3831 ///Perform cse of induction variable instructions. 3832 static void cse(BasicBlock *BB) { 3833 // Perform simple cse. 3834 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3835 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3836 if (!CSEDenseMapInfo::canHandle(&In)) 3837 continue; 3838 3839 // Check if we can replace this instruction with any of the 3840 // visited instructions. 3841 if (Instruction *V = CSEMap.lookup(&In)) { 3842 In.replaceAllUsesWith(V); 3843 In.eraseFromParent(); 3844 continue; 3845 } 3846 3847 CSEMap[&In] = &In; 3848 } 3849 } 3850 3851 InstructionCost 3852 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3853 bool &NeedToScalarize) const { 3854 Function *F = CI->getCalledFunction(); 3855 Type *ScalarRetTy = CI->getType(); 3856 SmallVector<Type *, 4> Tys, ScalarTys; 3857 for (auto &ArgOp : CI->args()) 3858 ScalarTys.push_back(ArgOp->getType()); 3859 3860 // Estimate cost of scalarized vector call. The source operands are assumed 3861 // to be vectors, so we need to extract individual elements from there, 3862 // execute VF scalar calls, and then gather the result into the vector return 3863 // value. 3864 InstructionCost ScalarCallCost = 3865 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3866 if (VF.isScalar()) 3867 return ScalarCallCost; 3868 3869 // Compute corresponding vector type for return value and arguments. 3870 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3871 for (Type *ScalarTy : ScalarTys) 3872 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3873 3874 // Compute costs of unpacking argument values for the scalar calls and 3875 // packing the return values to a vector. 3876 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3877 3878 InstructionCost Cost = 3879 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3880 3881 // If we can't emit a vector call for this function, then the currently found 3882 // cost is the cost we need to return. 3883 NeedToScalarize = true; 3884 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3885 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3886 3887 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3888 return Cost; 3889 3890 // If the corresponding vector cost is cheaper, return its cost. 3891 InstructionCost VectorCallCost = 3892 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3893 if (VectorCallCost < Cost) { 3894 NeedToScalarize = false; 3895 Cost = VectorCallCost; 3896 } 3897 return Cost; 3898 } 3899 3900 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3901 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3902 return Elt; 3903 return VectorType::get(Elt, VF); 3904 } 3905 3906 InstructionCost 3907 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3908 ElementCount VF) const { 3909 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3910 assert(ID && "Expected intrinsic call!"); 3911 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3912 FastMathFlags FMF; 3913 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3914 FMF = FPMO->getFastMathFlags(); 3915 3916 SmallVector<const Value *> Arguments(CI->args()); 3917 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3918 SmallVector<Type *> ParamTys; 3919 std::transform(FTy->param_begin(), FTy->param_end(), 3920 std::back_inserter(ParamTys), 3921 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3922 3923 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3924 dyn_cast<IntrinsicInst>(CI)); 3925 return TTI.getIntrinsicInstrCost(CostAttrs, 3926 TargetTransformInfo::TCK_RecipThroughput); 3927 } 3928 3929 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3930 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3931 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3932 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3933 } 3934 3935 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3936 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3937 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3938 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3939 } 3940 3941 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3942 // For every instruction `I` in MinBWs, truncate the operands, create a 3943 // truncated version of `I` and reextend its result. InstCombine runs 3944 // later and will remove any ext/trunc pairs. 3945 SmallPtrSet<Value *, 4> Erased; 3946 for (const auto &KV : Cost->getMinimalBitwidths()) { 3947 // If the value wasn't vectorized, we must maintain the original scalar 3948 // type. The absence of the value from State indicates that it 3949 // wasn't vectorized. 3950 // FIXME: Should not rely on getVPValue at this point. 3951 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3952 if (!State.hasAnyVectorValue(Def)) 3953 continue; 3954 for (unsigned Part = 0; Part < UF; ++Part) { 3955 Value *I = State.get(Def, Part); 3956 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3957 continue; 3958 Type *OriginalTy = I->getType(); 3959 Type *ScalarTruncatedTy = 3960 IntegerType::get(OriginalTy->getContext(), KV.second); 3961 auto *TruncatedTy = VectorType::get( 3962 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3963 if (TruncatedTy == OriginalTy) 3964 continue; 3965 3966 IRBuilder<> B(cast<Instruction>(I)); 3967 auto ShrinkOperand = [&](Value *V) -> Value * { 3968 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3969 if (ZI->getSrcTy() == TruncatedTy) 3970 return ZI->getOperand(0); 3971 return B.CreateZExtOrTrunc(V, TruncatedTy); 3972 }; 3973 3974 // The actual instruction modification depends on the instruction type, 3975 // unfortunately. 3976 Value *NewI = nullptr; 3977 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3978 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3979 ShrinkOperand(BO->getOperand(1))); 3980 3981 // Any wrapping introduced by shrinking this operation shouldn't be 3982 // considered undefined behavior. So, we can't unconditionally copy 3983 // arithmetic wrapping flags to NewI. 3984 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3985 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3986 NewI = 3987 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3988 ShrinkOperand(CI->getOperand(1))); 3989 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3990 NewI = B.CreateSelect(SI->getCondition(), 3991 ShrinkOperand(SI->getTrueValue()), 3992 ShrinkOperand(SI->getFalseValue())); 3993 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3994 switch (CI->getOpcode()) { 3995 default: 3996 llvm_unreachable("Unhandled cast!"); 3997 case Instruction::Trunc: 3998 NewI = ShrinkOperand(CI->getOperand(0)); 3999 break; 4000 case Instruction::SExt: 4001 NewI = B.CreateSExtOrTrunc( 4002 CI->getOperand(0), 4003 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4004 break; 4005 case Instruction::ZExt: 4006 NewI = B.CreateZExtOrTrunc( 4007 CI->getOperand(0), 4008 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4009 break; 4010 } 4011 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 4012 auto Elements0 = 4013 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 4014 auto *O0 = B.CreateZExtOrTrunc( 4015 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 4016 auto Elements1 = 4017 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 4018 auto *O1 = B.CreateZExtOrTrunc( 4019 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 4020 4021 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 4022 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 4023 // Don't do anything with the operands, just extend the result. 4024 continue; 4025 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 4026 auto Elements = 4027 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 4028 auto *O0 = B.CreateZExtOrTrunc( 4029 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4030 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 4031 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 4032 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 4033 auto Elements = 4034 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 4035 auto *O0 = B.CreateZExtOrTrunc( 4036 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4037 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 4038 } else { 4039 // If we don't know what to do, be conservative and don't do anything. 4040 continue; 4041 } 4042 4043 // Lastly, extend the result. 4044 NewI->takeName(cast<Instruction>(I)); 4045 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 4046 I->replaceAllUsesWith(Res); 4047 cast<Instruction>(I)->eraseFromParent(); 4048 Erased.insert(I); 4049 State.reset(Def, Res, Part); 4050 } 4051 } 4052 4053 // We'll have created a bunch of ZExts that are now parentless. Clean up. 4054 for (const auto &KV : Cost->getMinimalBitwidths()) { 4055 // If the value wasn't vectorized, we must maintain the original scalar 4056 // type. The absence of the value from State indicates that it 4057 // wasn't vectorized. 4058 // FIXME: Should not rely on getVPValue at this point. 4059 VPValue *Def = State.Plan->getVPValue(KV.first, true); 4060 if (!State.hasAnyVectorValue(Def)) 4061 continue; 4062 for (unsigned Part = 0; Part < UF; ++Part) { 4063 Value *I = State.get(Def, Part); 4064 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 4065 if (Inst && Inst->use_empty()) { 4066 Value *NewI = Inst->getOperand(0); 4067 Inst->eraseFromParent(); 4068 State.reset(Def, NewI, Part); 4069 } 4070 } 4071 } 4072 } 4073 4074 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 4075 // Insert truncates and extends for any truncated instructions as hints to 4076 // InstCombine. 4077 if (VF.isVector()) 4078 truncateToMinimalBitwidths(State); 4079 4080 // Fix widened non-induction PHIs by setting up the PHI operands. 4081 if (OrigPHIsToFix.size()) { 4082 assert(EnableVPlanNativePath && 4083 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 4084 fixNonInductionPHIs(State); 4085 } 4086 4087 // At this point every instruction in the original loop is widened to a 4088 // vector form. Now we need to fix the recurrences in the loop. These PHI 4089 // nodes are currently empty because we did not want to introduce cycles. 4090 // This is the second stage of vectorizing recurrences. 4091 fixCrossIterationPHIs(State); 4092 4093 // Forget the original basic block. 4094 PSE.getSE()->forgetLoop(OrigLoop); 4095 4096 // If we inserted an edge from the middle block to the unique exit block, 4097 // update uses outside the loop (phis) to account for the newly inserted 4098 // edge. 4099 if (!Cost->requiresScalarEpilogue(VF)) { 4100 // Fix-up external users of the induction variables. 4101 for (auto &Entry : Legal->getInductionVars()) 4102 fixupIVUsers(Entry.first, Entry.second, 4103 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4104 IVEndValues[Entry.first], LoopMiddleBlock); 4105 4106 fixLCSSAPHIs(State); 4107 } 4108 4109 for (Instruction *PI : PredicatedInstructions) 4110 sinkScalarOperands(&*PI); 4111 4112 // Remove redundant induction instructions. 4113 cse(LoopVectorBody); 4114 4115 // Set/update profile weights for the vector and remainder loops as original 4116 // loop iterations are now distributed among them. Note that original loop 4117 // represented by LoopScalarBody becomes remainder loop after vectorization. 4118 // 4119 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 4120 // end up getting slightly roughened result but that should be OK since 4121 // profile is not inherently precise anyway. Note also possible bypass of 4122 // vector code caused by legality checks is ignored, assigning all the weight 4123 // to the vector loop, optimistically. 4124 // 4125 // For scalable vectorization we can't know at compile time how many iterations 4126 // of the loop are handled in one vector iteration, so instead assume a pessimistic 4127 // vscale of '1'. 4128 setProfileInfoAfterUnrolling( 4129 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 4130 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 4131 } 4132 4133 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 4134 // In order to support recurrences we need to be able to vectorize Phi nodes. 4135 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4136 // stage #2: We now need to fix the recurrences by adding incoming edges to 4137 // the currently empty PHI nodes. At this point every instruction in the 4138 // original loop is widened to a vector form so we can use them to construct 4139 // the incoming edges. 4140 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); 4141 for (VPRecipeBase &R : Header->phis()) { 4142 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 4143 fixReduction(ReductionPhi, State); 4144 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 4145 fixFirstOrderRecurrence(FOR, State); 4146 } 4147 } 4148 4149 void InnerLoopVectorizer::fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, 4150 VPTransformState &State) { 4151 // This is the second phase of vectorizing first-order recurrences. An 4152 // overview of the transformation is described below. Suppose we have the 4153 // following loop. 4154 // 4155 // for (int i = 0; i < n; ++i) 4156 // b[i] = a[i] - a[i - 1]; 4157 // 4158 // There is a first-order recurrence on "a". For this loop, the shorthand 4159 // scalar IR looks like: 4160 // 4161 // scalar.ph: 4162 // s_init = a[-1] 4163 // br scalar.body 4164 // 4165 // scalar.body: 4166 // i = phi [0, scalar.ph], [i+1, scalar.body] 4167 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4168 // s2 = a[i] 4169 // b[i] = s2 - s1 4170 // br cond, scalar.body, ... 4171 // 4172 // In this example, s1 is a recurrence because it's value depends on the 4173 // previous iteration. In the first phase of vectorization, we created a 4174 // vector phi v1 for s1. We now complete the vectorization and produce the 4175 // shorthand vector IR shown below (for VF = 4, UF = 1). 4176 // 4177 // vector.ph: 4178 // v_init = vector(..., ..., ..., a[-1]) 4179 // br vector.body 4180 // 4181 // vector.body 4182 // i = phi [0, vector.ph], [i+4, vector.body] 4183 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4184 // v2 = a[i, i+1, i+2, i+3]; 4185 // v3 = vector(v1(3), v2(0, 1, 2)) 4186 // b[i, i+1, i+2, i+3] = v2 - v3 4187 // br cond, vector.body, middle.block 4188 // 4189 // middle.block: 4190 // x = v2(3) 4191 // br scalar.ph 4192 // 4193 // scalar.ph: 4194 // s_init = phi [x, middle.block], [a[-1], otherwise] 4195 // br scalar.body 4196 // 4197 // After execution completes the vector loop, we extract the next value of 4198 // the recurrence (x) to use as the initial value in the scalar loop. 4199 4200 // Extract the last vector element in the middle block. This will be the 4201 // initial value for the recurrence when jumping to the scalar loop. 4202 VPValue *PreviousDef = PhiR->getBackedgeValue(); 4203 Value *Incoming = State.get(PreviousDef, UF - 1); 4204 auto *ExtractForScalar = Incoming; 4205 auto *IdxTy = Builder.getInt32Ty(); 4206 if (VF.isVector()) { 4207 auto *One = ConstantInt::get(IdxTy, 1); 4208 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4209 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4210 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 4211 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 4212 "vector.recur.extract"); 4213 } 4214 // Extract the second last element in the middle block if the 4215 // Phi is used outside the loop. We need to extract the phi itself 4216 // and not the last element (the phi update in the current iteration). This 4217 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4218 // when the scalar loop is not run at all. 4219 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4220 if (VF.isVector()) { 4221 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4222 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 4223 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4224 Incoming, Idx, "vector.recur.extract.for.phi"); 4225 } else if (UF > 1) 4226 // When loop is unrolled without vectorizing, initialize 4227 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 4228 // of `Incoming`. This is analogous to the vectorized case above: extracting 4229 // the second last element when VF > 1. 4230 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4231 4232 // Fix the initial value of the original recurrence in the scalar loop. 4233 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4234 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 4235 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4236 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 4237 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4238 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4239 Start->addIncoming(Incoming, BB); 4240 } 4241 4242 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4243 Phi->setName("scalar.recur"); 4244 4245 // Finally, fix users of the recurrence outside the loop. The users will need 4246 // either the last value of the scalar recurrence or the last value of the 4247 // vector recurrence we extracted in the middle block. Since the loop is in 4248 // LCSSA form, we just need to find all the phi nodes for the original scalar 4249 // recurrence in the exit block, and then add an edge for the middle block. 4250 // Note that LCSSA does not imply single entry when the original scalar loop 4251 // had multiple exiting edges (as we always run the last iteration in the 4252 // scalar epilogue); in that case, there is no edge from middle to exit and 4253 // and thus no phis which needed updated. 4254 if (!Cost->requiresScalarEpilogue(VF)) 4255 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4256 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) 4257 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4258 } 4259 4260 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 4261 VPTransformState &State) { 4262 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 4263 // Get it's reduction variable descriptor. 4264 assert(Legal->isReductionVariable(OrigPhi) && 4265 "Unable to find the reduction variable"); 4266 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 4267 4268 RecurKind RK = RdxDesc.getRecurrenceKind(); 4269 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4270 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4271 setDebugLocFromInst(ReductionStartValue); 4272 4273 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 4274 // This is the vector-clone of the value that leaves the loop. 4275 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4276 4277 // Wrap flags are in general invalid after vectorization, clear them. 4278 clearReductionWrapFlags(RdxDesc, State); 4279 4280 // Before each round, move the insertion point right between 4281 // the PHIs and the values we are going to write. 4282 // This allows us to write both PHINodes and the extractelement 4283 // instructions. 4284 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4285 4286 setDebugLocFromInst(LoopExitInst); 4287 4288 Type *PhiTy = OrigPhi->getType(); 4289 // If tail is folded by masking, the vector value to leave the loop should be 4290 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4291 // instead of the former. For an inloop reduction the reduction will already 4292 // be predicated, and does not need to be handled here. 4293 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 4294 for (unsigned Part = 0; Part < UF; ++Part) { 4295 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4296 Value *Sel = nullptr; 4297 for (User *U : VecLoopExitInst->users()) { 4298 if (isa<SelectInst>(U)) { 4299 assert(!Sel && "Reduction exit feeding two selects"); 4300 Sel = U; 4301 } else 4302 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4303 } 4304 assert(Sel && "Reduction exit feeds no select"); 4305 State.reset(LoopExitInstDef, Sel, Part); 4306 4307 // If the target can create a predicated operator for the reduction at no 4308 // extra cost in the loop (for example a predicated vadd), it can be 4309 // cheaper for the select to remain in the loop than be sunk out of it, 4310 // and so use the select value for the phi instead of the old 4311 // LoopExitValue. 4312 if (PreferPredicatedReductionSelect || 4313 TTI->preferPredicatedReductionSelect( 4314 RdxDesc.getOpcode(), PhiTy, 4315 TargetTransformInfo::ReductionFlags())) { 4316 auto *VecRdxPhi = 4317 cast<PHINode>(State.get(PhiR, Part)); 4318 VecRdxPhi->setIncomingValueForBlock( 4319 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4320 } 4321 } 4322 } 4323 4324 // If the vector reduction can be performed in a smaller type, we truncate 4325 // then extend the loop exit value to enable InstCombine to evaluate the 4326 // entire expression in the smaller type. 4327 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 4328 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 4329 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4330 Builder.SetInsertPoint( 4331 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4332 VectorParts RdxParts(UF); 4333 for (unsigned Part = 0; Part < UF; ++Part) { 4334 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4335 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4336 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4337 : Builder.CreateZExt(Trunc, VecTy); 4338 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 4339 if (U != Trunc) { 4340 U->replaceUsesOfWith(RdxParts[Part], Extnd); 4341 RdxParts[Part] = Extnd; 4342 } 4343 } 4344 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4345 for (unsigned Part = 0; Part < UF; ++Part) { 4346 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4347 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4348 } 4349 } 4350 4351 // Reduce all of the unrolled parts into a single vector. 4352 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4353 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4354 4355 // The middle block terminator has already been assigned a DebugLoc here (the 4356 // OrigLoop's single latch terminator). We want the whole middle block to 4357 // appear to execute on this line because: (a) it is all compiler generated, 4358 // (b) these instructions are always executed after evaluating the latch 4359 // conditional branch, and (c) other passes may add new predecessors which 4360 // terminate on this line. This is the easiest way to ensure we don't 4361 // accidentally cause an extra step back into the loop while debugging. 4362 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 4363 if (PhiR->isOrdered()) 4364 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 4365 else { 4366 // Floating-point operations should have some FMF to enable the reduction. 4367 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4368 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4369 for (unsigned Part = 1; Part < UF; ++Part) { 4370 Value *RdxPart = State.get(LoopExitInstDef, Part); 4371 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4372 ReducedPartRdx = Builder.CreateBinOp( 4373 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4374 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 4375 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 4376 ReducedPartRdx, RdxPart); 4377 else 4378 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4379 } 4380 } 4381 4382 // Create the reduction after the loop. Note that inloop reductions create the 4383 // target reduction in the loop using a Reduction recipe. 4384 if (VF.isVector() && !PhiR->isInLoop()) { 4385 ReducedPartRdx = 4386 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 4387 // If the reduction can be performed in a smaller type, we need to extend 4388 // the reduction to the wider type before we branch to the original loop. 4389 if (PhiTy != RdxDesc.getRecurrenceType()) 4390 ReducedPartRdx = RdxDesc.isSigned() 4391 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4392 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4393 } 4394 4395 // Create a phi node that merges control-flow from the backedge-taken check 4396 // block and the middle block. 4397 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4398 LoopScalarPreHeader->getTerminator()); 4399 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4400 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4401 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4402 4403 // Now, we need to fix the users of the reduction variable 4404 // inside and outside of the scalar remainder loop. 4405 4406 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4407 // in the exit blocks. See comment on analogous loop in 4408 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4409 if (!Cost->requiresScalarEpilogue(VF)) 4410 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4411 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) 4412 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4413 4414 // Fix the scalar loop reduction variable with the incoming reduction sum 4415 // from the vector body and from the backedge value. 4416 int IncomingEdgeBlockIdx = 4417 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4418 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4419 // Pick the other block. 4420 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4421 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4422 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4423 } 4424 4425 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4426 VPTransformState &State) { 4427 RecurKind RK = RdxDesc.getRecurrenceKind(); 4428 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4429 return; 4430 4431 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4432 assert(LoopExitInstr && "null loop exit instruction"); 4433 SmallVector<Instruction *, 8> Worklist; 4434 SmallPtrSet<Instruction *, 8> Visited; 4435 Worklist.push_back(LoopExitInstr); 4436 Visited.insert(LoopExitInstr); 4437 4438 while (!Worklist.empty()) { 4439 Instruction *Cur = Worklist.pop_back_val(); 4440 if (isa<OverflowingBinaryOperator>(Cur)) 4441 for (unsigned Part = 0; Part < UF; ++Part) { 4442 // FIXME: Should not rely on getVPValue at this point. 4443 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4444 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4445 } 4446 4447 for (User *U : Cur->users()) { 4448 Instruction *UI = cast<Instruction>(U); 4449 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4450 Visited.insert(UI).second) 4451 Worklist.push_back(UI); 4452 } 4453 } 4454 } 4455 4456 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4457 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4458 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4459 // Some phis were already hand updated by the reduction and recurrence 4460 // code above, leave them alone. 4461 continue; 4462 4463 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4464 // Non-instruction incoming values will have only one value. 4465 4466 VPLane Lane = VPLane::getFirstLane(); 4467 if (isa<Instruction>(IncomingValue) && 4468 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4469 VF)) 4470 Lane = VPLane::getLastLaneForVF(VF); 4471 4472 // Can be a loop invariant incoming value or the last scalar value to be 4473 // extracted from the vectorized loop. 4474 // FIXME: Should not rely on getVPValue at this point. 4475 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4476 Value *lastIncomingValue = 4477 OrigLoop->isLoopInvariant(IncomingValue) 4478 ? IncomingValue 4479 : State.get(State.Plan->getVPValue(IncomingValue, true), 4480 VPIteration(UF - 1, Lane)); 4481 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4482 } 4483 } 4484 4485 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4486 // The basic block and loop containing the predicated instruction. 4487 auto *PredBB = PredInst->getParent(); 4488 auto *VectorLoop = LI->getLoopFor(PredBB); 4489 4490 // Initialize a worklist with the operands of the predicated instruction. 4491 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4492 4493 // Holds instructions that we need to analyze again. An instruction may be 4494 // reanalyzed if we don't yet know if we can sink it or not. 4495 SmallVector<Instruction *, 8> InstsToReanalyze; 4496 4497 // Returns true if a given use occurs in the predicated block. Phi nodes use 4498 // their operands in their corresponding predecessor blocks. 4499 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4500 auto *I = cast<Instruction>(U.getUser()); 4501 BasicBlock *BB = I->getParent(); 4502 if (auto *Phi = dyn_cast<PHINode>(I)) 4503 BB = Phi->getIncomingBlock( 4504 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4505 return BB == PredBB; 4506 }; 4507 4508 // Iteratively sink the scalarized operands of the predicated instruction 4509 // into the block we created for it. When an instruction is sunk, it's 4510 // operands are then added to the worklist. The algorithm ends after one pass 4511 // through the worklist doesn't sink a single instruction. 4512 bool Changed; 4513 do { 4514 // Add the instructions that need to be reanalyzed to the worklist, and 4515 // reset the changed indicator. 4516 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4517 InstsToReanalyze.clear(); 4518 Changed = false; 4519 4520 while (!Worklist.empty()) { 4521 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4522 4523 // We can't sink an instruction if it is a phi node, is not in the loop, 4524 // or may have side effects. 4525 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4526 I->mayHaveSideEffects()) 4527 continue; 4528 4529 // If the instruction is already in PredBB, check if we can sink its 4530 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4531 // sinking the scalar instruction I, hence it appears in PredBB; but it 4532 // may have failed to sink I's operands (recursively), which we try 4533 // (again) here. 4534 if (I->getParent() == PredBB) { 4535 Worklist.insert(I->op_begin(), I->op_end()); 4536 continue; 4537 } 4538 4539 // It's legal to sink the instruction if all its uses occur in the 4540 // predicated block. Otherwise, there's nothing to do yet, and we may 4541 // need to reanalyze the instruction. 4542 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4543 InstsToReanalyze.push_back(I); 4544 continue; 4545 } 4546 4547 // Move the instruction to the beginning of the predicated block, and add 4548 // it's operands to the worklist. 4549 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4550 Worklist.insert(I->op_begin(), I->op_end()); 4551 4552 // The sinking may have enabled other instructions to be sunk, so we will 4553 // need to iterate. 4554 Changed = true; 4555 } 4556 } while (Changed); 4557 } 4558 4559 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4560 for (PHINode *OrigPhi : OrigPHIsToFix) { 4561 VPWidenPHIRecipe *VPPhi = 4562 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4563 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4564 // Make sure the builder has a valid insert point. 4565 Builder.SetInsertPoint(NewPhi); 4566 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4567 VPValue *Inc = VPPhi->getIncomingValue(i); 4568 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4569 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4570 } 4571 } 4572 } 4573 4574 bool InnerLoopVectorizer::useOrderedReductions(RecurrenceDescriptor &RdxDesc) { 4575 return Cost->useOrderedReductions(RdxDesc); 4576 } 4577 4578 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4579 VPWidenPHIRecipe *PhiR, 4580 VPTransformState &State) { 4581 PHINode *P = cast<PHINode>(PN); 4582 if (EnableVPlanNativePath) { 4583 // Currently we enter here in the VPlan-native path for non-induction 4584 // PHIs where all control flow is uniform. We simply widen these PHIs. 4585 // Create a vector phi with no operands - the vector phi operands will be 4586 // set at the end of vector code generation. 4587 Type *VecTy = (State.VF.isScalar()) 4588 ? PN->getType() 4589 : VectorType::get(PN->getType(), State.VF); 4590 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4591 State.set(PhiR, VecPhi, 0); 4592 OrigPHIsToFix.push_back(P); 4593 4594 return; 4595 } 4596 4597 assert(PN->getParent() == OrigLoop->getHeader() && 4598 "Non-header phis should have been handled elsewhere"); 4599 4600 // In order to support recurrences we need to be able to vectorize Phi nodes. 4601 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4602 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4603 // this value when we vectorize all of the instructions that use the PHI. 4604 4605 assert(!Legal->isReductionVariable(P) && 4606 "reductions should be handled elsewhere"); 4607 4608 setDebugLocFromInst(P); 4609 4610 // This PHINode must be an induction variable. 4611 // Make sure that we know about it. 4612 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4613 4614 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4615 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4616 4617 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4618 // which can be found from the original scalar operations. 4619 switch (II.getKind()) { 4620 case InductionDescriptor::IK_NoInduction: 4621 llvm_unreachable("Unknown induction"); 4622 case InductionDescriptor::IK_IntInduction: 4623 case InductionDescriptor::IK_FpInduction: 4624 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4625 case InductionDescriptor::IK_PtrInduction: { 4626 // Handle the pointer induction variable case. 4627 assert(P->getType()->isPointerTy() && "Unexpected type."); 4628 4629 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4630 // This is the normalized GEP that starts counting at zero. 4631 Value *PtrInd = 4632 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4633 // Determine the number of scalars we need to generate for each unroll 4634 // iteration. If the instruction is uniform, we only need to generate the 4635 // first lane. Otherwise, we generate all VF values. 4636 bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); 4637 assert((IsUniform || !State.VF.isScalable()) && 4638 "Cannot scalarize a scalable VF"); 4639 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 4640 4641 for (unsigned Part = 0; Part < UF; ++Part) { 4642 Value *PartStart = 4643 createStepForVF(Builder, PtrInd->getType(), VF, Part); 4644 4645 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4646 Value *Idx = Builder.CreateAdd( 4647 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 4648 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4649 Value *SclrGep = 4650 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4651 SclrGep->setName("next.gep"); 4652 State.set(PhiR, SclrGep, VPIteration(Part, Lane)); 4653 } 4654 } 4655 return; 4656 } 4657 assert(isa<SCEVConstant>(II.getStep()) && 4658 "Induction step not a SCEV constant!"); 4659 Type *PhiType = II.getStep()->getType(); 4660 4661 // Build a pointer phi 4662 Value *ScalarStartValue = II.getStartValue(); 4663 Type *ScStValueType = ScalarStartValue->getType(); 4664 PHINode *NewPointerPhi = 4665 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4666 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4667 4668 // A pointer induction, performed by using a gep 4669 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4670 Instruction *InductionLoc = LoopLatch->getTerminator(); 4671 const SCEV *ScalarStep = II.getStep(); 4672 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4673 Value *ScalarStepValue = 4674 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4675 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); 4676 Value *NumUnrolledElems = 4677 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 4678 Value *InductionGEP = GetElementPtrInst::Create( 4679 II.getElementType(), NewPointerPhi, 4680 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 4681 InductionLoc); 4682 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4683 4684 // Create UF many actual address geps that use the pointer 4685 // phi as base and a vectorized version of the step value 4686 // (<step*0, ..., step*N>) as offset. 4687 for (unsigned Part = 0; Part < State.UF; ++Part) { 4688 Type *VecPhiType = VectorType::get(PhiType, State.VF); 4689 Value *StartOffsetScalar = 4690 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 4691 Value *StartOffset = 4692 Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 4693 // Create a vector of consecutive numbers from zero to VF. 4694 StartOffset = 4695 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); 4696 4697 Value *GEP = Builder.CreateGEP( 4698 II.getElementType(), NewPointerPhi, 4699 Builder.CreateMul( 4700 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), 4701 "vector.gep")); 4702 State.set(PhiR, GEP, Part); 4703 } 4704 } 4705 } 4706 } 4707 4708 /// A helper function for checking whether an integer division-related 4709 /// instruction may divide by zero (in which case it must be predicated if 4710 /// executed conditionally in the scalar code). 4711 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4712 /// Non-zero divisors that are non compile-time constants will not be 4713 /// converted into multiplication, so we will still end up scalarizing 4714 /// the division, but can do so w/o predication. 4715 static bool mayDivideByZero(Instruction &I) { 4716 assert((I.getOpcode() == Instruction::UDiv || 4717 I.getOpcode() == Instruction::SDiv || 4718 I.getOpcode() == Instruction::URem || 4719 I.getOpcode() == Instruction::SRem) && 4720 "Unexpected instruction"); 4721 Value *Divisor = I.getOperand(1); 4722 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4723 return !CInt || CInt->isZero(); 4724 } 4725 4726 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4727 VPUser &ArgOperands, 4728 VPTransformState &State) { 4729 assert(!isa<DbgInfoIntrinsic>(I) && 4730 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4731 setDebugLocFromInst(&I); 4732 4733 Module *M = I.getParent()->getParent()->getParent(); 4734 auto *CI = cast<CallInst>(&I); 4735 4736 SmallVector<Type *, 4> Tys; 4737 for (Value *ArgOperand : CI->args()) 4738 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4739 4740 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4741 4742 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4743 // version of the instruction. 4744 // Is it beneficial to perform intrinsic call compared to lib call? 4745 bool NeedToScalarize = false; 4746 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4747 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4748 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4749 assert((UseVectorIntrinsic || !NeedToScalarize) && 4750 "Instruction should be scalarized elsewhere."); 4751 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4752 "Either the intrinsic cost or vector call cost must be valid"); 4753 4754 for (unsigned Part = 0; Part < UF; ++Part) { 4755 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 4756 SmallVector<Value *, 4> Args; 4757 for (auto &I : enumerate(ArgOperands.operands())) { 4758 // Some intrinsics have a scalar argument - don't replace it with a 4759 // vector. 4760 Value *Arg; 4761 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4762 Arg = State.get(I.value(), Part); 4763 else { 4764 Arg = State.get(I.value(), VPIteration(0, 0)); 4765 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 4766 TysForDecl.push_back(Arg->getType()); 4767 } 4768 Args.push_back(Arg); 4769 } 4770 4771 Function *VectorF; 4772 if (UseVectorIntrinsic) { 4773 // Use vector version of the intrinsic. 4774 if (VF.isVector()) 4775 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4776 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4777 assert(VectorF && "Can't retrieve vector intrinsic."); 4778 } else { 4779 // Use vector version of the function call. 4780 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4781 #ifndef NDEBUG 4782 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4783 "Can't create vector function."); 4784 #endif 4785 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4786 } 4787 SmallVector<OperandBundleDef, 1> OpBundles; 4788 CI->getOperandBundlesAsDefs(OpBundles); 4789 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4790 4791 if (isa<FPMathOperator>(V)) 4792 V->copyFastMathFlags(CI); 4793 4794 State.set(Def, V, Part); 4795 addMetadata(V, &I); 4796 } 4797 } 4798 4799 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4800 // We should not collect Scalars more than once per VF. Right now, this 4801 // function is called from collectUniformsAndScalars(), which already does 4802 // this check. Collecting Scalars for VF=1 does not make any sense. 4803 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4804 "This function should not be visited twice for the same VF"); 4805 4806 SmallSetVector<Instruction *, 8> Worklist; 4807 4808 // These sets are used to seed the analysis with pointers used by memory 4809 // accesses that will remain scalar. 4810 SmallSetVector<Instruction *, 8> ScalarPtrs; 4811 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4812 auto *Latch = TheLoop->getLoopLatch(); 4813 4814 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4815 // The pointer operands of loads and stores will be scalar as long as the 4816 // memory access is not a gather or scatter operation. The value operand of a 4817 // store will remain scalar if the store is scalarized. 4818 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4819 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4820 assert(WideningDecision != CM_Unknown && 4821 "Widening decision should be ready at this moment"); 4822 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4823 if (Ptr == Store->getValueOperand()) 4824 return WideningDecision == CM_Scalarize; 4825 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4826 "Ptr is neither a value or pointer operand"); 4827 return WideningDecision != CM_GatherScatter; 4828 }; 4829 4830 // A helper that returns true if the given value is a bitcast or 4831 // getelementptr instruction contained in the loop. 4832 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4833 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4834 isa<GetElementPtrInst>(V)) && 4835 !TheLoop->isLoopInvariant(V); 4836 }; 4837 4838 // A helper that evaluates a memory access's use of a pointer. If the use will 4839 // be a scalar use and the pointer is only used by memory accesses, we place 4840 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4841 // PossibleNonScalarPtrs. 4842 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4843 // We only care about bitcast and getelementptr instructions contained in 4844 // the loop. 4845 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4846 return; 4847 4848 // If the pointer has already been identified as scalar (e.g., if it was 4849 // also identified as uniform), there's nothing to do. 4850 auto *I = cast<Instruction>(Ptr); 4851 if (Worklist.count(I)) 4852 return; 4853 4854 // If the use of the pointer will be a scalar use, and all users of the 4855 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4856 // place the pointer in PossibleNonScalarPtrs. 4857 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4858 return isa<LoadInst>(U) || isa<StoreInst>(U); 4859 })) 4860 ScalarPtrs.insert(I); 4861 else 4862 PossibleNonScalarPtrs.insert(I); 4863 }; 4864 4865 // We seed the scalars analysis with three classes of instructions: (1) 4866 // instructions marked uniform-after-vectorization and (2) bitcast, 4867 // getelementptr and (pointer) phi instructions used by memory accesses 4868 // requiring a scalar use. 4869 // 4870 // (1) Add to the worklist all instructions that have been identified as 4871 // uniform-after-vectorization. 4872 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4873 4874 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4875 // memory accesses requiring a scalar use. The pointer operands of loads and 4876 // stores will be scalar as long as the memory accesses is not a gather or 4877 // scatter operation. The value operand of a store will remain scalar if the 4878 // store is scalarized. 4879 for (auto *BB : TheLoop->blocks()) 4880 for (auto &I : *BB) { 4881 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4882 evaluatePtrUse(Load, Load->getPointerOperand()); 4883 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4884 evaluatePtrUse(Store, Store->getPointerOperand()); 4885 evaluatePtrUse(Store, Store->getValueOperand()); 4886 } 4887 } 4888 for (auto *I : ScalarPtrs) 4889 if (!PossibleNonScalarPtrs.count(I)) { 4890 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4891 Worklist.insert(I); 4892 } 4893 4894 // Insert the forced scalars. 4895 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4896 // induction variable when the PHI user is scalarized. 4897 auto ForcedScalar = ForcedScalars.find(VF); 4898 if (ForcedScalar != ForcedScalars.end()) 4899 for (auto *I : ForcedScalar->second) 4900 Worklist.insert(I); 4901 4902 // Expand the worklist by looking through any bitcasts and getelementptr 4903 // instructions we've already identified as scalar. This is similar to the 4904 // expansion step in collectLoopUniforms(); however, here we're only 4905 // expanding to include additional bitcasts and getelementptr instructions. 4906 unsigned Idx = 0; 4907 while (Idx != Worklist.size()) { 4908 Instruction *Dst = Worklist[Idx++]; 4909 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4910 continue; 4911 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4912 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4913 auto *J = cast<Instruction>(U); 4914 return !TheLoop->contains(J) || Worklist.count(J) || 4915 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4916 isScalarUse(J, Src)); 4917 })) { 4918 Worklist.insert(Src); 4919 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4920 } 4921 } 4922 4923 // An induction variable will remain scalar if all users of the induction 4924 // variable and induction variable update remain scalar. 4925 for (auto &Induction : Legal->getInductionVars()) { 4926 auto *Ind = Induction.first; 4927 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4928 4929 // If tail-folding is applied, the primary induction variable will be used 4930 // to feed a vector compare. 4931 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 4932 continue; 4933 4934 // Returns true if \p Indvar is a pointer induction that is used directly by 4935 // load/store instruction \p I. 4936 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar, 4937 Instruction *I) { 4938 return Induction.second.getKind() == 4939 InductionDescriptor::IK_PtrInduction && 4940 (isa<LoadInst>(I) || isa<StoreInst>(I)) && 4941 Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar); 4942 }; 4943 4944 // Determine if all users of the induction variable are scalar after 4945 // vectorization. 4946 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4947 auto *I = cast<Instruction>(U); 4948 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4949 IsDirectLoadStoreFromPtrIndvar(Ind, I); 4950 }); 4951 if (!ScalarInd) 4952 continue; 4953 4954 // Determine if all users of the induction variable update instruction are 4955 // scalar after vectorization. 4956 auto ScalarIndUpdate = 4957 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4958 auto *I = cast<Instruction>(U); 4959 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4960 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I); 4961 }); 4962 if (!ScalarIndUpdate) 4963 continue; 4964 4965 // The induction variable and its update instruction will remain scalar. 4966 Worklist.insert(Ind); 4967 Worklist.insert(IndUpdate); 4968 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4969 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4970 << "\n"); 4971 } 4972 4973 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4974 } 4975 4976 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const { 4977 if (!blockNeedsPredicationForAnyReason(I->getParent())) 4978 return false; 4979 switch(I->getOpcode()) { 4980 default: 4981 break; 4982 case Instruction::Load: 4983 case Instruction::Store: { 4984 if (!Legal->isMaskRequired(I)) 4985 return false; 4986 auto *Ptr = getLoadStorePointerOperand(I); 4987 auto *Ty = getLoadStoreType(I); 4988 const Align Alignment = getLoadStoreAlignment(I); 4989 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4990 TTI.isLegalMaskedGather(Ty, Alignment)) 4991 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4992 TTI.isLegalMaskedScatter(Ty, Alignment)); 4993 } 4994 case Instruction::UDiv: 4995 case Instruction::SDiv: 4996 case Instruction::SRem: 4997 case Instruction::URem: 4998 return mayDivideByZero(*I); 4999 } 5000 return false; 5001 } 5002 5003 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5004 Instruction *I, ElementCount VF) { 5005 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5006 assert(getWideningDecision(I, VF) == CM_Unknown && 5007 "Decision should not be set yet."); 5008 auto *Group = getInterleavedAccessGroup(I); 5009 assert(Group && "Must have a group."); 5010 5011 // If the instruction's allocated size doesn't equal it's type size, it 5012 // requires padding and will be scalarized. 5013 auto &DL = I->getModule()->getDataLayout(); 5014 auto *ScalarTy = getLoadStoreType(I); 5015 if (hasIrregularType(ScalarTy, DL)) 5016 return false; 5017 5018 // Check if masking is required. 5019 // A Group may need masking for one of two reasons: it resides in a block that 5020 // needs predication, or it was decided to use masking to deal with gaps 5021 // (either a gap at the end of a load-access that may result in a speculative 5022 // load, or any gaps in a store-access). 5023 bool PredicatedAccessRequiresMasking = 5024 blockNeedsPredicationForAnyReason(I->getParent()) && 5025 Legal->isMaskRequired(I); 5026 bool LoadAccessWithGapsRequiresEpilogMasking = 5027 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 5028 !isScalarEpilogueAllowed(); 5029 bool StoreAccessWithGapsRequiresMasking = 5030 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 5031 if (!PredicatedAccessRequiresMasking && 5032 !LoadAccessWithGapsRequiresEpilogMasking && 5033 !StoreAccessWithGapsRequiresMasking) 5034 return true; 5035 5036 // If masked interleaving is required, we expect that the user/target had 5037 // enabled it, because otherwise it either wouldn't have been created or 5038 // it should have been invalidated by the CostModel. 5039 assert(useMaskedInterleavedAccesses(TTI) && 5040 "Masked interleave-groups for predicated accesses are not enabled."); 5041 5042 if (Group->isReverse()) 5043 return false; 5044 5045 auto *Ty = getLoadStoreType(I); 5046 const Align Alignment = getLoadStoreAlignment(I); 5047 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5048 : TTI.isLegalMaskedStore(Ty, Alignment); 5049 } 5050 5051 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5052 Instruction *I, ElementCount VF) { 5053 // Get and ensure we have a valid memory instruction. 5054 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 5055 5056 auto *Ptr = getLoadStorePointerOperand(I); 5057 auto *ScalarTy = getLoadStoreType(I); 5058 5059 // In order to be widened, the pointer should be consecutive, first of all. 5060 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 5061 return false; 5062 5063 // If the instruction is a store located in a predicated block, it will be 5064 // scalarized. 5065 if (isScalarWithPredication(I)) 5066 return false; 5067 5068 // If the instruction's allocated size doesn't equal it's type size, it 5069 // requires padding and will be scalarized. 5070 auto &DL = I->getModule()->getDataLayout(); 5071 if (hasIrregularType(ScalarTy, DL)) 5072 return false; 5073 5074 return true; 5075 } 5076 5077 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5078 // We should not collect Uniforms more than once per VF. Right now, 5079 // this function is called from collectUniformsAndScalars(), which 5080 // already does this check. Collecting Uniforms for VF=1 does not make any 5081 // sense. 5082 5083 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5084 "This function should not be visited twice for the same VF"); 5085 5086 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5087 // not analyze again. Uniforms.count(VF) will return 1. 5088 Uniforms[VF].clear(); 5089 5090 // We now know that the loop is vectorizable! 5091 // Collect instructions inside the loop that will remain uniform after 5092 // vectorization. 5093 5094 // Global values, params and instructions outside of current loop are out of 5095 // scope. 5096 auto isOutOfScope = [&](Value *V) -> bool { 5097 Instruction *I = dyn_cast<Instruction>(V); 5098 return (!I || !TheLoop->contains(I)); 5099 }; 5100 5101 // Worklist containing uniform instructions demanding lane 0. 5102 SetVector<Instruction *> Worklist; 5103 BasicBlock *Latch = TheLoop->getLoopLatch(); 5104 5105 // Add uniform instructions demanding lane 0 to the worklist. Instructions 5106 // that are scalar with predication must not be considered uniform after 5107 // vectorization, because that would create an erroneous replicating region 5108 // where only a single instance out of VF should be formed. 5109 // TODO: optimize such seldom cases if found important, see PR40816. 5110 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5111 if (isOutOfScope(I)) { 5112 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5113 << *I << "\n"); 5114 return; 5115 } 5116 if (isScalarWithPredication(I)) { 5117 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5118 << *I << "\n"); 5119 return; 5120 } 5121 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5122 Worklist.insert(I); 5123 }; 5124 5125 // Start with the conditional branch. If the branch condition is an 5126 // instruction contained in the loop that is only used by the branch, it is 5127 // uniform. 5128 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5129 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5130 addToWorklistIfAllowed(Cmp); 5131 5132 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5133 InstWidening WideningDecision = getWideningDecision(I, VF); 5134 assert(WideningDecision != CM_Unknown && 5135 "Widening decision should be ready at this moment"); 5136 5137 // A uniform memory op is itself uniform. We exclude uniform stores 5138 // here as they demand the last lane, not the first one. 5139 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5140 assert(WideningDecision == CM_Scalarize); 5141 return true; 5142 } 5143 5144 return (WideningDecision == CM_Widen || 5145 WideningDecision == CM_Widen_Reverse || 5146 WideningDecision == CM_Interleave); 5147 }; 5148 5149 5150 // Returns true if Ptr is the pointer operand of a memory access instruction 5151 // I, and I is known to not require scalarization. 5152 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5153 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5154 }; 5155 5156 // Holds a list of values which are known to have at least one uniform use. 5157 // Note that there may be other uses which aren't uniform. A "uniform use" 5158 // here is something which only demands lane 0 of the unrolled iterations; 5159 // it does not imply that all lanes produce the same value (e.g. this is not 5160 // the usual meaning of uniform) 5161 SetVector<Value *> HasUniformUse; 5162 5163 // Scan the loop for instructions which are either a) known to have only 5164 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5165 for (auto *BB : TheLoop->blocks()) 5166 for (auto &I : *BB) { 5167 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 5168 switch (II->getIntrinsicID()) { 5169 case Intrinsic::sideeffect: 5170 case Intrinsic::experimental_noalias_scope_decl: 5171 case Intrinsic::assume: 5172 case Intrinsic::lifetime_start: 5173 case Intrinsic::lifetime_end: 5174 if (TheLoop->hasLoopInvariantOperands(&I)) 5175 addToWorklistIfAllowed(&I); 5176 break; 5177 default: 5178 break; 5179 } 5180 } 5181 5182 // ExtractValue instructions must be uniform, because the operands are 5183 // known to be loop-invariant. 5184 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 5185 assert(isOutOfScope(EVI->getAggregateOperand()) && 5186 "Expected aggregate value to be loop invariant"); 5187 addToWorklistIfAllowed(EVI); 5188 continue; 5189 } 5190 5191 // If there's no pointer operand, there's nothing to do. 5192 auto *Ptr = getLoadStorePointerOperand(&I); 5193 if (!Ptr) 5194 continue; 5195 5196 // A uniform memory op is itself uniform. We exclude uniform stores 5197 // here as they demand the last lane, not the first one. 5198 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5199 addToWorklistIfAllowed(&I); 5200 5201 if (isUniformDecision(&I, VF)) { 5202 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5203 HasUniformUse.insert(Ptr); 5204 } 5205 } 5206 5207 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5208 // demanding) users. Since loops are assumed to be in LCSSA form, this 5209 // disallows uses outside the loop as well. 5210 for (auto *V : HasUniformUse) { 5211 if (isOutOfScope(V)) 5212 continue; 5213 auto *I = cast<Instruction>(V); 5214 auto UsersAreMemAccesses = 5215 llvm::all_of(I->users(), [&](User *U) -> bool { 5216 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5217 }); 5218 if (UsersAreMemAccesses) 5219 addToWorklistIfAllowed(I); 5220 } 5221 5222 // Expand Worklist in topological order: whenever a new instruction 5223 // is added , its users should be already inside Worklist. It ensures 5224 // a uniform instruction will only be used by uniform instructions. 5225 unsigned idx = 0; 5226 while (idx != Worklist.size()) { 5227 Instruction *I = Worklist[idx++]; 5228 5229 for (auto OV : I->operand_values()) { 5230 // isOutOfScope operands cannot be uniform instructions. 5231 if (isOutOfScope(OV)) 5232 continue; 5233 // First order recurrence Phi's should typically be considered 5234 // non-uniform. 5235 auto *OP = dyn_cast<PHINode>(OV); 5236 if (OP && Legal->isFirstOrderRecurrence(OP)) 5237 continue; 5238 // If all the users of the operand are uniform, then add the 5239 // operand into the uniform worklist. 5240 auto *OI = cast<Instruction>(OV); 5241 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5242 auto *J = cast<Instruction>(U); 5243 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5244 })) 5245 addToWorklistIfAllowed(OI); 5246 } 5247 } 5248 5249 // For an instruction to be added into Worklist above, all its users inside 5250 // the loop should also be in Worklist. However, this condition cannot be 5251 // true for phi nodes that form a cyclic dependence. We must process phi 5252 // nodes separately. An induction variable will remain uniform if all users 5253 // of the induction variable and induction variable update remain uniform. 5254 // The code below handles both pointer and non-pointer induction variables. 5255 for (auto &Induction : Legal->getInductionVars()) { 5256 auto *Ind = Induction.first; 5257 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5258 5259 // Determine if all users of the induction variable are uniform after 5260 // vectorization. 5261 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5262 auto *I = cast<Instruction>(U); 5263 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5264 isVectorizedMemAccessUse(I, Ind); 5265 }); 5266 if (!UniformInd) 5267 continue; 5268 5269 // Determine if all users of the induction variable update instruction are 5270 // uniform after vectorization. 5271 auto UniformIndUpdate = 5272 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5273 auto *I = cast<Instruction>(U); 5274 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5275 isVectorizedMemAccessUse(I, IndUpdate); 5276 }); 5277 if (!UniformIndUpdate) 5278 continue; 5279 5280 // The induction variable and its update instruction will remain uniform. 5281 addToWorklistIfAllowed(Ind); 5282 addToWorklistIfAllowed(IndUpdate); 5283 } 5284 5285 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5286 } 5287 5288 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5289 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5290 5291 if (Legal->getRuntimePointerChecking()->Need) { 5292 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5293 "runtime pointer checks needed. Enable vectorization of this " 5294 "loop with '#pragma clang loop vectorize(enable)' when " 5295 "compiling with -Os/-Oz", 5296 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5297 return true; 5298 } 5299 5300 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5301 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5302 "runtime SCEV checks needed. Enable vectorization of this " 5303 "loop with '#pragma clang loop vectorize(enable)' when " 5304 "compiling with -Os/-Oz", 5305 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5306 return true; 5307 } 5308 5309 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5310 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5311 reportVectorizationFailure("Runtime stride check for small trip count", 5312 "runtime stride == 1 checks needed. Enable vectorization of " 5313 "this loop without such check by compiling with -Os/-Oz", 5314 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5315 return true; 5316 } 5317 5318 return false; 5319 } 5320 5321 ElementCount 5322 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 5323 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 5324 return ElementCount::getScalable(0); 5325 5326 if (Hints->isScalableVectorizationDisabled()) { 5327 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 5328 "ScalableVectorizationDisabled", ORE, TheLoop); 5329 return ElementCount::getScalable(0); 5330 } 5331 5332 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 5333 5334 auto MaxScalableVF = ElementCount::getScalable( 5335 std::numeric_limits<ElementCount::ScalarTy>::max()); 5336 5337 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 5338 // FIXME: While for scalable vectors this is currently sufficient, this should 5339 // be replaced by a more detailed mechanism that filters out specific VFs, 5340 // instead of invalidating vectorization for a whole set of VFs based on the 5341 // MaxVF. 5342 5343 // Disable scalable vectorization if the loop contains unsupported reductions. 5344 if (!canVectorizeReductions(MaxScalableVF)) { 5345 reportVectorizationInfo( 5346 "Scalable vectorization not supported for the reduction " 5347 "operations found in this loop.", 5348 "ScalableVFUnfeasible", ORE, TheLoop); 5349 return ElementCount::getScalable(0); 5350 } 5351 5352 // Disable scalable vectorization if the loop contains any instructions 5353 // with element types not supported for scalable vectors. 5354 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 5355 return !Ty->isVoidTy() && 5356 !this->TTI.isElementTypeLegalForScalableVector(Ty); 5357 })) { 5358 reportVectorizationInfo("Scalable vectorization is not supported " 5359 "for all element types found in this loop.", 5360 "ScalableVFUnfeasible", ORE, TheLoop); 5361 return ElementCount::getScalable(0); 5362 } 5363 5364 if (Legal->isSafeForAnyVectorWidth()) 5365 return MaxScalableVF; 5366 5367 // Limit MaxScalableVF by the maximum safe dependence distance. 5368 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5369 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) 5370 MaxVScale = 5371 TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax(); 5372 MaxScalableVF = ElementCount::getScalable( 5373 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5374 if (!MaxScalableVF) 5375 reportVectorizationInfo( 5376 "Max legal vector width too small, scalable vectorization " 5377 "unfeasible.", 5378 "ScalableVFUnfeasible", ORE, TheLoop); 5379 5380 return MaxScalableVF; 5381 } 5382 5383 FixedScalableVFPair 5384 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5385 ElementCount UserVF) { 5386 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5387 unsigned SmallestType, WidestType; 5388 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5389 5390 // Get the maximum safe dependence distance in bits computed by LAA. 5391 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5392 // the memory accesses that is most restrictive (involved in the smallest 5393 // dependence distance). 5394 unsigned MaxSafeElements = 5395 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 5396 5397 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 5398 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 5399 5400 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 5401 << ".\n"); 5402 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 5403 << ".\n"); 5404 5405 // First analyze the UserVF, fall back if the UserVF should be ignored. 5406 if (UserVF) { 5407 auto MaxSafeUserVF = 5408 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 5409 5410 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 5411 // If `VF=vscale x N` is safe, then so is `VF=N` 5412 if (UserVF.isScalable()) 5413 return FixedScalableVFPair( 5414 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 5415 else 5416 return UserVF; 5417 } 5418 5419 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 5420 5421 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 5422 // is better to ignore the hint and let the compiler choose a suitable VF. 5423 if (!UserVF.isScalable()) { 5424 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5425 << " is unsafe, clamping to max safe VF=" 5426 << MaxSafeFixedVF << ".\n"); 5427 ORE->emit([&]() { 5428 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5429 TheLoop->getStartLoc(), 5430 TheLoop->getHeader()) 5431 << "User-specified vectorization factor " 5432 << ore::NV("UserVectorizationFactor", UserVF) 5433 << " is unsafe, clamping to maximum safe vectorization factor " 5434 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 5435 }); 5436 return MaxSafeFixedVF; 5437 } 5438 5439 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 5440 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5441 << " is ignored because scalable vectors are not " 5442 "available.\n"); 5443 ORE->emit([&]() { 5444 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5445 TheLoop->getStartLoc(), 5446 TheLoop->getHeader()) 5447 << "User-specified vectorization factor " 5448 << ore::NV("UserVectorizationFactor", UserVF) 5449 << " is ignored because the target does not support scalable " 5450 "vectors. The compiler will pick a more suitable value."; 5451 }); 5452 } else { 5453 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5454 << " is unsafe. Ignoring scalable UserVF.\n"); 5455 ORE->emit([&]() { 5456 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5457 TheLoop->getStartLoc(), 5458 TheLoop->getHeader()) 5459 << "User-specified vectorization factor " 5460 << ore::NV("UserVectorizationFactor", UserVF) 5461 << " is unsafe. Ignoring the hint to let the compiler pick a " 5462 "more suitable value."; 5463 }); 5464 } 5465 } 5466 5467 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5468 << " / " << WidestType << " bits.\n"); 5469 5470 FixedScalableVFPair Result(ElementCount::getFixed(1), 5471 ElementCount::getScalable(0)); 5472 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5473 WidestType, MaxSafeFixedVF)) 5474 Result.FixedVF = MaxVF; 5475 5476 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5477 WidestType, MaxSafeScalableVF)) 5478 if (MaxVF.isScalable()) { 5479 Result.ScalableVF = MaxVF; 5480 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5481 << "\n"); 5482 } 5483 5484 return Result; 5485 } 5486 5487 FixedScalableVFPair 5488 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5489 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5490 // TODO: It may by useful to do since it's still likely to be dynamically 5491 // uniform if the target can skip. 5492 reportVectorizationFailure( 5493 "Not inserting runtime ptr check for divergent target", 5494 "runtime pointer checks needed. Not enabled for divergent target", 5495 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5496 return FixedScalableVFPair::getNone(); 5497 } 5498 5499 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5500 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5501 if (TC == 1) { 5502 reportVectorizationFailure("Single iteration (non) loop", 5503 "loop trip count is one, irrelevant for vectorization", 5504 "SingleIterationLoop", ORE, TheLoop); 5505 return FixedScalableVFPair::getNone(); 5506 } 5507 5508 switch (ScalarEpilogueStatus) { 5509 case CM_ScalarEpilogueAllowed: 5510 return computeFeasibleMaxVF(TC, UserVF); 5511 case CM_ScalarEpilogueNotAllowedUsePredicate: 5512 LLVM_FALLTHROUGH; 5513 case CM_ScalarEpilogueNotNeededUsePredicate: 5514 LLVM_DEBUG( 5515 dbgs() << "LV: vector predicate hint/switch found.\n" 5516 << "LV: Not allowing scalar epilogue, creating predicated " 5517 << "vector loop.\n"); 5518 break; 5519 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5520 // fallthrough as a special case of OptForSize 5521 case CM_ScalarEpilogueNotAllowedOptSize: 5522 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5523 LLVM_DEBUG( 5524 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5525 else 5526 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5527 << "count.\n"); 5528 5529 // Bail if runtime checks are required, which are not good when optimising 5530 // for size. 5531 if (runtimeChecksRequired()) 5532 return FixedScalableVFPair::getNone(); 5533 5534 break; 5535 } 5536 5537 // The only loops we can vectorize without a scalar epilogue, are loops with 5538 // a bottom-test and a single exiting block. We'd have to handle the fact 5539 // that not every instruction executes on the last iteration. This will 5540 // require a lane mask which varies through the vector loop body. (TODO) 5541 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5542 // If there was a tail-folding hint/switch, but we can't fold the tail by 5543 // masking, fallback to a vectorization with a scalar epilogue. 5544 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5545 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5546 "scalar epilogue instead.\n"); 5547 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5548 return computeFeasibleMaxVF(TC, UserVF); 5549 } 5550 return FixedScalableVFPair::getNone(); 5551 } 5552 5553 // Now try the tail folding 5554 5555 // Invalidate interleave groups that require an epilogue if we can't mask 5556 // the interleave-group. 5557 if (!useMaskedInterleavedAccesses(TTI)) { 5558 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5559 "No decisions should have been taken at this point"); 5560 // Note: There is no need to invalidate any cost modeling decisions here, as 5561 // non where taken so far. 5562 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5563 } 5564 5565 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF); 5566 // Avoid tail folding if the trip count is known to be a multiple of any VF 5567 // we chose. 5568 // FIXME: The condition below pessimises the case for fixed-width vectors, 5569 // when scalable VFs are also candidates for vectorization. 5570 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5571 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5572 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5573 "MaxFixedVF must be a power of 2"); 5574 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5575 : MaxFixedVF.getFixedValue(); 5576 ScalarEvolution *SE = PSE.getSE(); 5577 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5578 const SCEV *ExitCount = SE->getAddExpr( 5579 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5580 const SCEV *Rem = SE->getURemExpr( 5581 SE->applyLoopGuards(ExitCount, TheLoop), 5582 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5583 if (Rem->isZero()) { 5584 // Accept MaxFixedVF if we do not have a tail. 5585 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5586 return MaxFactors; 5587 } 5588 } 5589 5590 // For scalable vectors, don't use tail folding as this is currently not yet 5591 // supported. The code is likely to have ended up here if the tripcount is 5592 // low, in which case it makes sense not to use scalable vectors. 5593 if (MaxFactors.ScalableVF.isVector()) 5594 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5595 5596 // If we don't know the precise trip count, or if the trip count that we 5597 // found modulo the vectorization factor is not zero, try to fold the tail 5598 // by masking. 5599 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5600 if (Legal->prepareToFoldTailByMasking()) { 5601 FoldTailByMasking = true; 5602 return MaxFactors; 5603 } 5604 5605 // If there was a tail-folding hint/switch, but we can't fold the tail by 5606 // masking, fallback to a vectorization with a scalar epilogue. 5607 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5608 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5609 "scalar epilogue instead.\n"); 5610 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5611 return MaxFactors; 5612 } 5613 5614 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5615 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5616 return FixedScalableVFPair::getNone(); 5617 } 5618 5619 if (TC == 0) { 5620 reportVectorizationFailure( 5621 "Unable to calculate the loop count due to complex control flow", 5622 "unable to calculate the loop count due to complex control flow", 5623 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5624 return FixedScalableVFPair::getNone(); 5625 } 5626 5627 reportVectorizationFailure( 5628 "Cannot optimize for size and vectorize at the same time.", 5629 "cannot optimize for size and vectorize at the same time. " 5630 "Enable vectorization of this loop with '#pragma clang loop " 5631 "vectorize(enable)' when compiling with -Os/-Oz", 5632 "NoTailLoopWithOptForSize", ORE, TheLoop); 5633 return FixedScalableVFPair::getNone(); 5634 } 5635 5636 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5637 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5638 const ElementCount &MaxSafeVF) { 5639 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5640 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5641 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5642 : TargetTransformInfo::RGK_FixedWidthVector); 5643 5644 // Convenience function to return the minimum of two ElementCounts. 5645 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5646 assert((LHS.isScalable() == RHS.isScalable()) && 5647 "Scalable flags must match"); 5648 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5649 }; 5650 5651 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5652 // Note that both WidestRegister and WidestType may not be a powers of 2. 5653 auto MaxVectorElementCount = ElementCount::get( 5654 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5655 ComputeScalableMaxVF); 5656 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5657 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5658 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5659 5660 if (!MaxVectorElementCount) { 5661 LLVM_DEBUG(dbgs() << "LV: The target has no " 5662 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5663 << " vector registers.\n"); 5664 return ElementCount::getFixed(1); 5665 } 5666 5667 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5668 if (ConstTripCount && 5669 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5670 isPowerOf2_32(ConstTripCount)) { 5671 // We need to clamp the VF to be the ConstTripCount. There is no point in 5672 // choosing a higher viable VF as done in the loop below. If 5673 // MaxVectorElementCount is scalable, we only fall back on a fixed VF when 5674 // the TC is less than or equal to the known number of lanes. 5675 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5676 << ConstTripCount << "\n"); 5677 return TripCountEC; 5678 } 5679 5680 ElementCount MaxVF = MaxVectorElementCount; 5681 if (TTI.shouldMaximizeVectorBandwidth() || 5682 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5683 auto MaxVectorElementCountMaxBW = ElementCount::get( 5684 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5685 ComputeScalableMaxVF); 5686 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5687 5688 // Collect all viable vectorization factors larger than the default MaxVF 5689 // (i.e. MaxVectorElementCount). 5690 SmallVector<ElementCount, 8> VFs; 5691 for (ElementCount VS = MaxVectorElementCount * 2; 5692 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5693 VFs.push_back(VS); 5694 5695 // For each VF calculate its register usage. 5696 auto RUs = calculateRegisterUsage(VFs); 5697 5698 // Select the largest VF which doesn't require more registers than existing 5699 // ones. 5700 for (int i = RUs.size() - 1; i >= 0; --i) { 5701 bool Selected = true; 5702 for (auto &pair : RUs[i].MaxLocalUsers) { 5703 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5704 if (pair.second > TargetNumRegisters) 5705 Selected = false; 5706 } 5707 if (Selected) { 5708 MaxVF = VFs[i]; 5709 break; 5710 } 5711 } 5712 if (ElementCount MinVF = 5713 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5714 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5715 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5716 << ") with target's minimum: " << MinVF << '\n'); 5717 MaxVF = MinVF; 5718 } 5719 } 5720 } 5721 return MaxVF; 5722 } 5723 5724 bool LoopVectorizationCostModel::isMoreProfitable( 5725 const VectorizationFactor &A, const VectorizationFactor &B) const { 5726 InstructionCost CostA = A.Cost; 5727 InstructionCost CostB = B.Cost; 5728 5729 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 5730 5731 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 5732 MaxTripCount) { 5733 // If we are folding the tail and the trip count is a known (possibly small) 5734 // constant, the trip count will be rounded up to an integer number of 5735 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 5736 // which we compare directly. When not folding the tail, the total cost will 5737 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 5738 // approximated with the per-lane cost below instead of using the tripcount 5739 // as here. 5740 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 5741 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 5742 return RTCostA < RTCostB; 5743 } 5744 5745 // Improve estimate for the vector width if it is scalable. 5746 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 5747 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 5748 if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) { 5749 if (A.Width.isScalable()) 5750 EstimatedWidthA *= VScale.getValue(); 5751 if (B.Width.isScalable()) 5752 EstimatedWidthB *= VScale.getValue(); 5753 } 5754 5755 // When set to preferred, for now assume vscale may be larger than 1 (or the 5756 // one being tuned for), so that scalable vectorization is slightly favorable 5757 // over fixed-width vectorization. 5758 if (Hints->isScalableVectorizationPreferred()) 5759 if (A.Width.isScalable() && !B.Width.isScalable()) 5760 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 5761 5762 // To avoid the need for FP division: 5763 // (CostA / A.Width) < (CostB / B.Width) 5764 // <=> (CostA * B.Width) < (CostB * A.Width) 5765 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 5766 } 5767 5768 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 5769 const ElementCountSet &VFCandidates) { 5770 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5771 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5772 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5773 assert(VFCandidates.count(ElementCount::getFixed(1)) && 5774 "Expected Scalar VF to be a candidate"); 5775 5776 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 5777 VectorizationFactor ChosenFactor = ScalarCost; 5778 5779 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5780 if (ForceVectorization && VFCandidates.size() > 1) { 5781 // Ignore scalar width, because the user explicitly wants vectorization. 5782 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5783 // evaluation. 5784 ChosenFactor.Cost = InstructionCost::getMax(); 5785 } 5786 5787 SmallVector<InstructionVFPair> InvalidCosts; 5788 for (const auto &i : VFCandidates) { 5789 // The cost for scalar VF=1 is already calculated, so ignore it. 5790 if (i.isScalar()) 5791 continue; 5792 5793 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 5794 VectorizationFactor Candidate(i, C.first); 5795 5796 #ifndef NDEBUG 5797 unsigned AssumedMinimumVscale = 1; 5798 if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) 5799 AssumedMinimumVscale = VScale.getValue(); 5800 unsigned Width = 5801 Candidate.Width.isScalable() 5802 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 5803 : Candidate.Width.getFixedValue(); 5804 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5805 << " costs: " << (Candidate.Cost / Width)); 5806 if (i.isScalable()) 5807 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 5808 << AssumedMinimumVscale << ")"); 5809 LLVM_DEBUG(dbgs() << ".\n"); 5810 #endif 5811 5812 if (!C.second && !ForceVectorization) { 5813 LLVM_DEBUG( 5814 dbgs() << "LV: Not considering vector loop of width " << i 5815 << " because it will not generate any vector instructions.\n"); 5816 continue; 5817 } 5818 5819 // If profitable add it to ProfitableVF list. 5820 if (isMoreProfitable(Candidate, ScalarCost)) 5821 ProfitableVFs.push_back(Candidate); 5822 5823 if (isMoreProfitable(Candidate, ChosenFactor)) 5824 ChosenFactor = Candidate; 5825 } 5826 5827 // Emit a report of VFs with invalid costs in the loop. 5828 if (!InvalidCosts.empty()) { 5829 // Group the remarks per instruction, keeping the instruction order from 5830 // InvalidCosts. 5831 std::map<Instruction *, unsigned> Numbering; 5832 unsigned I = 0; 5833 for (auto &Pair : InvalidCosts) 5834 if (!Numbering.count(Pair.first)) 5835 Numbering[Pair.first] = I++; 5836 5837 // Sort the list, first on instruction(number) then on VF. 5838 llvm::sort(InvalidCosts, 5839 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 5840 if (Numbering[A.first] != Numbering[B.first]) 5841 return Numbering[A.first] < Numbering[B.first]; 5842 ElementCountComparator ECC; 5843 return ECC(A.second, B.second); 5844 }); 5845 5846 // For a list of ordered instruction-vf pairs: 5847 // [(load, vf1), (load, vf2), (store, vf1)] 5848 // Group the instructions together to emit separate remarks for: 5849 // load (vf1, vf2) 5850 // store (vf1) 5851 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 5852 auto Subset = ArrayRef<InstructionVFPair>(); 5853 do { 5854 if (Subset.empty()) 5855 Subset = Tail.take_front(1); 5856 5857 Instruction *I = Subset.front().first; 5858 5859 // If the next instruction is different, or if there are no other pairs, 5860 // emit a remark for the collated subset. e.g. 5861 // [(load, vf1), (load, vf2))] 5862 // to emit: 5863 // remark: invalid costs for 'load' at VF=(vf, vf2) 5864 if (Subset == Tail || Tail[Subset.size()].first != I) { 5865 std::string OutString; 5866 raw_string_ostream OS(OutString); 5867 assert(!Subset.empty() && "Unexpected empty range"); 5868 OS << "Instruction with invalid costs prevented vectorization at VF=("; 5869 for (auto &Pair : Subset) 5870 OS << (Pair.second == Subset.front().second ? "" : ", ") 5871 << Pair.second; 5872 OS << "):"; 5873 if (auto *CI = dyn_cast<CallInst>(I)) 5874 OS << " call to " << CI->getCalledFunction()->getName(); 5875 else 5876 OS << " " << I->getOpcodeName(); 5877 OS.flush(); 5878 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 5879 Tail = Tail.drop_front(Subset.size()); 5880 Subset = {}; 5881 } else 5882 // Grow the subset by one element 5883 Subset = Tail.take_front(Subset.size() + 1); 5884 } while (!Tail.empty()); 5885 } 5886 5887 if (!EnableCondStoresVectorization && NumPredStores) { 5888 reportVectorizationFailure("There are conditional stores.", 5889 "store that is conditionally executed prevents vectorization", 5890 "ConditionalStore", ORE, TheLoop); 5891 ChosenFactor = ScalarCost; 5892 } 5893 5894 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 5895 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 5896 << "LV: Vectorization seems to be not beneficial, " 5897 << "but was forced by a user.\n"); 5898 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 5899 return ChosenFactor; 5900 } 5901 5902 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5903 const Loop &L, ElementCount VF) const { 5904 // Cross iteration phis such as reductions need special handling and are 5905 // currently unsupported. 5906 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 5907 return Legal->isFirstOrderRecurrence(&Phi) || 5908 Legal->isReductionVariable(&Phi); 5909 })) 5910 return false; 5911 5912 // Phis with uses outside of the loop require special handling and are 5913 // currently unsupported. 5914 for (auto &Entry : Legal->getInductionVars()) { 5915 // Look for uses of the value of the induction at the last iteration. 5916 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5917 for (User *U : PostInc->users()) 5918 if (!L.contains(cast<Instruction>(U))) 5919 return false; 5920 // Look for uses of penultimate value of the induction. 5921 for (User *U : Entry.first->users()) 5922 if (!L.contains(cast<Instruction>(U))) 5923 return false; 5924 } 5925 5926 // Induction variables that are widened require special handling that is 5927 // currently not supported. 5928 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5929 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5930 this->isProfitableToScalarize(Entry.first, VF)); 5931 })) 5932 return false; 5933 5934 // Epilogue vectorization code has not been auditted to ensure it handles 5935 // non-latch exits properly. It may be fine, but it needs auditted and 5936 // tested. 5937 if (L.getExitingBlock() != L.getLoopLatch()) 5938 return false; 5939 5940 return true; 5941 } 5942 5943 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5944 const ElementCount VF) const { 5945 // FIXME: We need a much better cost-model to take different parameters such 5946 // as register pressure, code size increase and cost of extra branches into 5947 // account. For now we apply a very crude heuristic and only consider loops 5948 // with vectorization factors larger than a certain value. 5949 // We also consider epilogue vectorization unprofitable for targets that don't 5950 // consider interleaving beneficial (eg. MVE). 5951 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5952 return false; 5953 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 5954 return true; 5955 return false; 5956 } 5957 5958 VectorizationFactor 5959 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5960 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5961 VectorizationFactor Result = VectorizationFactor::Disabled(); 5962 if (!EnableEpilogueVectorization) { 5963 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5964 return Result; 5965 } 5966 5967 if (!isScalarEpilogueAllowed()) { 5968 LLVM_DEBUG( 5969 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5970 "allowed.\n";); 5971 return Result; 5972 } 5973 5974 // Not really a cost consideration, but check for unsupported cases here to 5975 // simplify the logic. 5976 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5977 LLVM_DEBUG( 5978 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5979 "not a supported candidate.\n";); 5980 return Result; 5981 } 5982 5983 if (EpilogueVectorizationForceVF > 1) { 5984 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5985 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 5986 if (LVP.hasPlanWithVF(ForcedEC)) 5987 return {ForcedEC, 0}; 5988 else { 5989 LLVM_DEBUG( 5990 dbgs() 5991 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5992 return Result; 5993 } 5994 } 5995 5996 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5997 TheLoop->getHeader()->getParent()->hasMinSize()) { 5998 LLVM_DEBUG( 5999 dbgs() 6000 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 6001 return Result; 6002 } 6003 6004 auto FixedMainLoopVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 6005 if (MainLoopVF.isScalable()) 6006 LLVM_DEBUG( 6007 dbgs() << "LEV: Epilogue vectorization using scalable vectors not " 6008 "yet supported. Converting to fixed-width (VF=" 6009 << FixedMainLoopVF << ") instead\n"); 6010 6011 if (!isEpilogueVectorizationProfitable(FixedMainLoopVF)) { 6012 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 6013 "this loop\n"); 6014 return Result; 6015 } 6016 6017 for (auto &NextVF : ProfitableVFs) 6018 if (ElementCount::isKnownLT(NextVF.Width, FixedMainLoopVF) && 6019 (Result.Width.getFixedValue() == 1 || 6020 isMoreProfitable(NextVF, Result)) && 6021 LVP.hasPlanWithVF(NextVF.Width)) 6022 Result = NextVF; 6023 6024 if (Result != VectorizationFactor::Disabled()) 6025 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 6026 << Result.Width.getFixedValue() << "\n";); 6027 return Result; 6028 } 6029 6030 std::pair<unsigned, unsigned> 6031 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6032 unsigned MinWidth = -1U; 6033 unsigned MaxWidth = 8; 6034 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6035 for (Type *T : ElementTypesInLoop) { 6036 MinWidth = std::min<unsigned>( 6037 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 6038 MaxWidth = std::max<unsigned>( 6039 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 6040 } 6041 return {MinWidth, MaxWidth}; 6042 } 6043 6044 void LoopVectorizationCostModel::collectElementTypesForWidening() { 6045 ElementTypesInLoop.clear(); 6046 // For each block. 6047 for (BasicBlock *BB : TheLoop->blocks()) { 6048 // For each instruction in the loop. 6049 for (Instruction &I : BB->instructionsWithoutDebug()) { 6050 Type *T = I.getType(); 6051 6052 // Skip ignored values. 6053 if (ValuesToIgnore.count(&I)) 6054 continue; 6055 6056 // Only examine Loads, Stores and PHINodes. 6057 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6058 continue; 6059 6060 // Examine PHI nodes that are reduction variables. Update the type to 6061 // account for the recurrence type. 6062 if (auto *PN = dyn_cast<PHINode>(&I)) { 6063 if (!Legal->isReductionVariable(PN)) 6064 continue; 6065 const RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[PN]; 6066 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 6067 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 6068 RdxDesc.getRecurrenceType(), 6069 TargetTransformInfo::ReductionFlags())) 6070 continue; 6071 T = RdxDesc.getRecurrenceType(); 6072 } 6073 6074 // Examine the stored values. 6075 if (auto *ST = dyn_cast<StoreInst>(&I)) 6076 T = ST->getValueOperand()->getType(); 6077 6078 // Ignore loaded pointer types and stored pointer types that are not 6079 // vectorizable. 6080 // 6081 // FIXME: The check here attempts to predict whether a load or store will 6082 // be vectorized. We only know this for certain after a VF has 6083 // been selected. Here, we assume that if an access can be 6084 // vectorized, it will be. We should also look at extending this 6085 // optimization to non-pointer types. 6086 // 6087 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6088 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 6089 continue; 6090 6091 ElementTypesInLoop.insert(T); 6092 } 6093 } 6094 } 6095 6096 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 6097 unsigned LoopCost) { 6098 // -- The interleave heuristics -- 6099 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6100 // There are many micro-architectural considerations that we can't predict 6101 // at this level. For example, frontend pressure (on decode or fetch) due to 6102 // code size, or the number and capabilities of the execution ports. 6103 // 6104 // We use the following heuristics to select the interleave count: 6105 // 1. If the code has reductions, then we interleave to break the cross 6106 // iteration dependency. 6107 // 2. If the loop is really small, then we interleave to reduce the loop 6108 // overhead. 6109 // 3. We don't interleave if we think that we will spill registers to memory 6110 // due to the increased register pressure. 6111 6112 if (!isScalarEpilogueAllowed()) 6113 return 1; 6114 6115 // We used the distance for the interleave count. 6116 if (Legal->getMaxSafeDepDistBytes() != -1U) 6117 return 1; 6118 6119 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6120 const bool HasReductions = !Legal->getReductionVars().empty(); 6121 // Do not interleave loops with a relatively small known or estimated trip 6122 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6123 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6124 // because with the above conditions interleaving can expose ILP and break 6125 // cross iteration dependences for reductions. 6126 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6127 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6128 return 1; 6129 6130 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6131 // We divide by these constants so assume that we have at least one 6132 // instruction that uses at least one register. 6133 for (auto& pair : R.MaxLocalUsers) { 6134 pair.second = std::max(pair.second, 1U); 6135 } 6136 6137 // We calculate the interleave count using the following formula. 6138 // Subtract the number of loop invariants from the number of available 6139 // registers. These registers are used by all of the interleaved instances. 6140 // Next, divide the remaining registers by the number of registers that is 6141 // required by the loop, in order to estimate how many parallel instances 6142 // fit without causing spills. All of this is rounded down if necessary to be 6143 // a power of two. We want power of two interleave count to simplify any 6144 // addressing operations or alignment considerations. 6145 // We also want power of two interleave counts to ensure that the induction 6146 // variable of the vector loop wraps to zero, when tail is folded by masking; 6147 // this currently happens when OptForSize, in which case IC is set to 1 above. 6148 unsigned IC = UINT_MAX; 6149 6150 for (auto& pair : R.MaxLocalUsers) { 6151 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6152 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6153 << " registers of " 6154 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6155 if (VF.isScalar()) { 6156 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6157 TargetNumRegisters = ForceTargetNumScalarRegs; 6158 } else { 6159 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6160 TargetNumRegisters = ForceTargetNumVectorRegs; 6161 } 6162 unsigned MaxLocalUsers = pair.second; 6163 unsigned LoopInvariantRegs = 0; 6164 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6165 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6166 6167 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6168 // Don't count the induction variable as interleaved. 6169 if (EnableIndVarRegisterHeur) { 6170 TmpIC = 6171 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6172 std::max(1U, (MaxLocalUsers - 1))); 6173 } 6174 6175 IC = std::min(IC, TmpIC); 6176 } 6177 6178 // Clamp the interleave ranges to reasonable counts. 6179 unsigned MaxInterleaveCount = 6180 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6181 6182 // Check if the user has overridden the max. 6183 if (VF.isScalar()) { 6184 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6185 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6186 } else { 6187 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6188 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6189 } 6190 6191 // If trip count is known or estimated compile time constant, limit the 6192 // interleave count to be less than the trip count divided by VF, provided it 6193 // is at least 1. 6194 // 6195 // For scalable vectors we can't know if interleaving is beneficial. It may 6196 // not be beneficial for small loops if none of the lanes in the second vector 6197 // iterations is enabled. However, for larger loops, there is likely to be a 6198 // similar benefit as for fixed-width vectors. For now, we choose to leave 6199 // the InterleaveCount as if vscale is '1', although if some information about 6200 // the vector is known (e.g. min vector size), we can make a better decision. 6201 if (BestKnownTC) { 6202 MaxInterleaveCount = 6203 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6204 // Make sure MaxInterleaveCount is greater than 0. 6205 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6206 } 6207 6208 assert(MaxInterleaveCount > 0 && 6209 "Maximum interleave count must be greater than 0"); 6210 6211 // Clamp the calculated IC to be between the 1 and the max interleave count 6212 // that the target and trip count allows. 6213 if (IC > MaxInterleaveCount) 6214 IC = MaxInterleaveCount; 6215 else 6216 // Make sure IC is greater than 0. 6217 IC = std::max(1u, IC); 6218 6219 assert(IC > 0 && "Interleave count must be greater than 0."); 6220 6221 // If we did not calculate the cost for VF (because the user selected the VF) 6222 // then we calculate the cost of VF here. 6223 if (LoopCost == 0) { 6224 InstructionCost C = expectedCost(VF).first; 6225 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 6226 LoopCost = *C.getValue(); 6227 } 6228 6229 assert(LoopCost && "Non-zero loop cost expected"); 6230 6231 // Interleave if we vectorized this loop and there is a reduction that could 6232 // benefit from interleaving. 6233 if (VF.isVector() && HasReductions) { 6234 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6235 return IC; 6236 } 6237 6238 // Note that if we've already vectorized the loop we will have done the 6239 // runtime check and so interleaving won't require further checks. 6240 bool InterleavingRequiresRuntimePointerCheck = 6241 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6242 6243 // We want to interleave small loops in order to reduce the loop overhead and 6244 // potentially expose ILP opportunities. 6245 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6246 << "LV: IC is " << IC << '\n' 6247 << "LV: VF is " << VF << '\n'); 6248 const bool AggressivelyInterleaveReductions = 6249 TTI.enableAggressiveInterleaving(HasReductions); 6250 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6251 // We assume that the cost overhead is 1 and we use the cost model 6252 // to estimate the cost of the loop and interleave until the cost of the 6253 // loop overhead is about 5% of the cost of the loop. 6254 unsigned SmallIC = 6255 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6256 6257 // Interleave until store/load ports (estimated by max interleave count) are 6258 // saturated. 6259 unsigned NumStores = Legal->getNumStores(); 6260 unsigned NumLoads = Legal->getNumLoads(); 6261 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6262 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6263 6264 // There is little point in interleaving for reductions containing selects 6265 // and compares when VF=1 since it may just create more overhead than it's 6266 // worth for loops with small trip counts. This is because we still have to 6267 // do the final reduction after the loop. 6268 bool HasSelectCmpReductions = 6269 HasReductions && 6270 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6271 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6272 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 6273 RdxDesc.getRecurrenceKind()); 6274 }); 6275 if (HasSelectCmpReductions) { 6276 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 6277 return 1; 6278 } 6279 6280 // If we have a scalar reduction (vector reductions are already dealt with 6281 // by this point), we can increase the critical path length if the loop 6282 // we're interleaving is inside another loop. For tree-wise reductions 6283 // set the limit to 2, and for ordered reductions it's best to disable 6284 // interleaving entirely. 6285 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6286 bool HasOrderedReductions = 6287 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6288 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6289 return RdxDesc.isOrdered(); 6290 }); 6291 if (HasOrderedReductions) { 6292 LLVM_DEBUG( 6293 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 6294 return 1; 6295 } 6296 6297 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6298 SmallIC = std::min(SmallIC, F); 6299 StoresIC = std::min(StoresIC, F); 6300 LoadsIC = std::min(LoadsIC, F); 6301 } 6302 6303 if (EnableLoadStoreRuntimeInterleave && 6304 std::max(StoresIC, LoadsIC) > SmallIC) { 6305 LLVM_DEBUG( 6306 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6307 return std::max(StoresIC, LoadsIC); 6308 } 6309 6310 // If there are scalar reductions and TTI has enabled aggressive 6311 // interleaving for reductions, we will interleave to expose ILP. 6312 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6313 AggressivelyInterleaveReductions) { 6314 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6315 // Interleave no less than SmallIC but not as aggressive as the normal IC 6316 // to satisfy the rare situation when resources are too limited. 6317 return std::max(IC / 2, SmallIC); 6318 } else { 6319 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6320 return SmallIC; 6321 } 6322 } 6323 6324 // Interleave if this is a large loop (small loops are already dealt with by 6325 // this point) that could benefit from interleaving. 6326 if (AggressivelyInterleaveReductions) { 6327 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6328 return IC; 6329 } 6330 6331 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6332 return 1; 6333 } 6334 6335 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6336 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6337 // This function calculates the register usage by measuring the highest number 6338 // of values that are alive at a single location. Obviously, this is a very 6339 // rough estimation. We scan the loop in a topological order in order and 6340 // assign a number to each instruction. We use RPO to ensure that defs are 6341 // met before their users. We assume that each instruction that has in-loop 6342 // users starts an interval. We record every time that an in-loop value is 6343 // used, so we have a list of the first and last occurrences of each 6344 // instruction. Next, we transpose this data structure into a multi map that 6345 // holds the list of intervals that *end* at a specific location. This multi 6346 // map allows us to perform a linear search. We scan the instructions linearly 6347 // and record each time that a new interval starts, by placing it in a set. 6348 // If we find this value in the multi-map then we remove it from the set. 6349 // The max register usage is the maximum size of the set. 6350 // We also search for instructions that are defined outside the loop, but are 6351 // used inside the loop. We need this number separately from the max-interval 6352 // usage number because when we unroll, loop-invariant values do not take 6353 // more register. 6354 LoopBlocksDFS DFS(TheLoop); 6355 DFS.perform(LI); 6356 6357 RegisterUsage RU; 6358 6359 // Each 'key' in the map opens a new interval. The values 6360 // of the map are the index of the 'last seen' usage of the 6361 // instruction that is the key. 6362 using IntervalMap = DenseMap<Instruction *, unsigned>; 6363 6364 // Maps instruction to its index. 6365 SmallVector<Instruction *, 64> IdxToInstr; 6366 // Marks the end of each interval. 6367 IntervalMap EndPoint; 6368 // Saves the list of instruction indices that are used in the loop. 6369 SmallPtrSet<Instruction *, 8> Ends; 6370 // Saves the list of values that are used in the loop but are 6371 // defined outside the loop, such as arguments and constants. 6372 SmallPtrSet<Value *, 8> LoopInvariants; 6373 6374 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6375 for (Instruction &I : BB->instructionsWithoutDebug()) { 6376 IdxToInstr.push_back(&I); 6377 6378 // Save the end location of each USE. 6379 for (Value *U : I.operands()) { 6380 auto *Instr = dyn_cast<Instruction>(U); 6381 6382 // Ignore non-instruction values such as arguments, constants, etc. 6383 if (!Instr) 6384 continue; 6385 6386 // If this instruction is outside the loop then record it and continue. 6387 if (!TheLoop->contains(Instr)) { 6388 LoopInvariants.insert(Instr); 6389 continue; 6390 } 6391 6392 // Overwrite previous end points. 6393 EndPoint[Instr] = IdxToInstr.size(); 6394 Ends.insert(Instr); 6395 } 6396 } 6397 } 6398 6399 // Saves the list of intervals that end with the index in 'key'. 6400 using InstrList = SmallVector<Instruction *, 2>; 6401 DenseMap<unsigned, InstrList> TransposeEnds; 6402 6403 // Transpose the EndPoints to a list of values that end at each index. 6404 for (auto &Interval : EndPoint) 6405 TransposeEnds[Interval.second].push_back(Interval.first); 6406 6407 SmallPtrSet<Instruction *, 8> OpenIntervals; 6408 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6409 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6410 6411 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6412 6413 // A lambda that gets the register usage for the given type and VF. 6414 const auto &TTICapture = TTI; 6415 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 6416 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6417 return 0; 6418 InstructionCost::CostType RegUsage = 6419 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 6420 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 6421 "Nonsensical values for register usage."); 6422 return RegUsage; 6423 }; 6424 6425 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6426 Instruction *I = IdxToInstr[i]; 6427 6428 // Remove all of the instructions that end at this location. 6429 InstrList &List = TransposeEnds[i]; 6430 for (Instruction *ToRemove : List) 6431 OpenIntervals.erase(ToRemove); 6432 6433 // Ignore instructions that are never used within the loop. 6434 if (!Ends.count(I)) 6435 continue; 6436 6437 // Skip ignored values. 6438 if (ValuesToIgnore.count(I)) 6439 continue; 6440 6441 // For each VF find the maximum usage of registers. 6442 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6443 // Count the number of live intervals. 6444 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6445 6446 if (VFs[j].isScalar()) { 6447 for (auto Inst : OpenIntervals) { 6448 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6449 if (RegUsage.find(ClassID) == RegUsage.end()) 6450 RegUsage[ClassID] = 1; 6451 else 6452 RegUsage[ClassID] += 1; 6453 } 6454 } else { 6455 collectUniformsAndScalars(VFs[j]); 6456 for (auto Inst : OpenIntervals) { 6457 // Skip ignored values for VF > 1. 6458 if (VecValuesToIgnore.count(Inst)) 6459 continue; 6460 if (isScalarAfterVectorization(Inst, VFs[j])) { 6461 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6462 if (RegUsage.find(ClassID) == RegUsage.end()) 6463 RegUsage[ClassID] = 1; 6464 else 6465 RegUsage[ClassID] += 1; 6466 } else { 6467 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6468 if (RegUsage.find(ClassID) == RegUsage.end()) 6469 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6470 else 6471 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6472 } 6473 } 6474 } 6475 6476 for (auto& pair : RegUsage) { 6477 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6478 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6479 else 6480 MaxUsages[j][pair.first] = pair.second; 6481 } 6482 } 6483 6484 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6485 << OpenIntervals.size() << '\n'); 6486 6487 // Add the current instruction to the list of open intervals. 6488 OpenIntervals.insert(I); 6489 } 6490 6491 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6492 SmallMapVector<unsigned, unsigned, 4> Invariant; 6493 6494 for (auto Inst : LoopInvariants) { 6495 unsigned Usage = 6496 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6497 unsigned ClassID = 6498 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6499 if (Invariant.find(ClassID) == Invariant.end()) 6500 Invariant[ClassID] = Usage; 6501 else 6502 Invariant[ClassID] += Usage; 6503 } 6504 6505 LLVM_DEBUG({ 6506 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6507 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6508 << " item\n"; 6509 for (const auto &pair : MaxUsages[i]) { 6510 dbgs() << "LV(REG): RegisterClass: " 6511 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6512 << " registers\n"; 6513 } 6514 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6515 << " item\n"; 6516 for (const auto &pair : Invariant) { 6517 dbgs() << "LV(REG): RegisterClass: " 6518 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6519 << " registers\n"; 6520 } 6521 }); 6522 6523 RU.LoopInvariantRegs = Invariant; 6524 RU.MaxLocalUsers = MaxUsages[i]; 6525 RUs[i] = RU; 6526 } 6527 6528 return RUs; 6529 } 6530 6531 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6532 // TODO: Cost model for emulated masked load/store is completely 6533 // broken. This hack guides the cost model to use an artificially 6534 // high enough value to practically disable vectorization with such 6535 // operations, except where previously deployed legality hack allowed 6536 // using very low cost values. This is to avoid regressions coming simply 6537 // from moving "masked load/store" check from legality to cost model. 6538 // Masked Load/Gather emulation was previously never allowed. 6539 // Limited number of Masked Store/Scatter emulation was allowed. 6540 assert(isPredicatedInst(I) && 6541 "Expecting a scalar emulated instruction"); 6542 return isa<LoadInst>(I) || 6543 (isa<StoreInst>(I) && 6544 NumPredStores > NumberOfStoresToPredicate); 6545 } 6546 6547 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6548 // If we aren't vectorizing the loop, or if we've already collected the 6549 // instructions to scalarize, there's nothing to do. Collection may already 6550 // have occurred if we have a user-selected VF and are now computing the 6551 // expected cost for interleaving. 6552 if (VF.isScalar() || VF.isZero() || 6553 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6554 return; 6555 6556 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6557 // not profitable to scalarize any instructions, the presence of VF in the 6558 // map will indicate that we've analyzed it already. 6559 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6560 6561 // Find all the instructions that are scalar with predication in the loop and 6562 // determine if it would be better to not if-convert the blocks they are in. 6563 // If so, we also record the instructions to scalarize. 6564 for (BasicBlock *BB : TheLoop->blocks()) { 6565 if (!blockNeedsPredicationForAnyReason(BB)) 6566 continue; 6567 for (Instruction &I : *BB) 6568 if (isScalarWithPredication(&I)) { 6569 ScalarCostsTy ScalarCosts; 6570 // Do not apply discount if scalable, because that would lead to 6571 // invalid scalarization costs. 6572 // Do not apply discount logic if hacked cost is needed 6573 // for emulated masked memrefs. 6574 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I) && 6575 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6576 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6577 // Remember that BB will remain after vectorization. 6578 PredicatedBBsAfterVectorization.insert(BB); 6579 } 6580 } 6581 } 6582 6583 int LoopVectorizationCostModel::computePredInstDiscount( 6584 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6585 assert(!isUniformAfterVectorization(PredInst, VF) && 6586 "Instruction marked uniform-after-vectorization will be predicated"); 6587 6588 // Initialize the discount to zero, meaning that the scalar version and the 6589 // vector version cost the same. 6590 InstructionCost Discount = 0; 6591 6592 // Holds instructions to analyze. The instructions we visit are mapped in 6593 // ScalarCosts. Those instructions are the ones that would be scalarized if 6594 // we find that the scalar version costs less. 6595 SmallVector<Instruction *, 8> Worklist; 6596 6597 // Returns true if the given instruction can be scalarized. 6598 auto canBeScalarized = [&](Instruction *I) -> bool { 6599 // We only attempt to scalarize instructions forming a single-use chain 6600 // from the original predicated block that would otherwise be vectorized. 6601 // Although not strictly necessary, we give up on instructions we know will 6602 // already be scalar to avoid traversing chains that are unlikely to be 6603 // beneficial. 6604 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6605 isScalarAfterVectorization(I, VF)) 6606 return false; 6607 6608 // If the instruction is scalar with predication, it will be analyzed 6609 // separately. We ignore it within the context of PredInst. 6610 if (isScalarWithPredication(I)) 6611 return false; 6612 6613 // If any of the instruction's operands are uniform after vectorization, 6614 // the instruction cannot be scalarized. This prevents, for example, a 6615 // masked load from being scalarized. 6616 // 6617 // We assume we will only emit a value for lane zero of an instruction 6618 // marked uniform after vectorization, rather than VF identical values. 6619 // Thus, if we scalarize an instruction that uses a uniform, we would 6620 // create uses of values corresponding to the lanes we aren't emitting code 6621 // for. This behavior can be changed by allowing getScalarValue to clone 6622 // the lane zero values for uniforms rather than asserting. 6623 for (Use &U : I->operands()) 6624 if (auto *J = dyn_cast<Instruction>(U.get())) 6625 if (isUniformAfterVectorization(J, VF)) 6626 return false; 6627 6628 // Otherwise, we can scalarize the instruction. 6629 return true; 6630 }; 6631 6632 // Compute the expected cost discount from scalarizing the entire expression 6633 // feeding the predicated instruction. We currently only consider expressions 6634 // that are single-use instruction chains. 6635 Worklist.push_back(PredInst); 6636 while (!Worklist.empty()) { 6637 Instruction *I = Worklist.pop_back_val(); 6638 6639 // If we've already analyzed the instruction, there's nothing to do. 6640 if (ScalarCosts.find(I) != ScalarCosts.end()) 6641 continue; 6642 6643 // Compute the cost of the vector instruction. Note that this cost already 6644 // includes the scalarization overhead of the predicated instruction. 6645 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6646 6647 // Compute the cost of the scalarized instruction. This cost is the cost of 6648 // the instruction as if it wasn't if-converted and instead remained in the 6649 // predicated block. We will scale this cost by block probability after 6650 // computing the scalarization overhead. 6651 InstructionCost ScalarCost = 6652 VF.getFixedValue() * 6653 getInstructionCost(I, ElementCount::getFixed(1)).first; 6654 6655 // Compute the scalarization overhead of needed insertelement instructions 6656 // and phi nodes. 6657 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6658 ScalarCost += TTI.getScalarizationOverhead( 6659 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6660 APInt::getAllOnes(VF.getFixedValue()), true, false); 6661 ScalarCost += 6662 VF.getFixedValue() * 6663 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6664 } 6665 6666 // Compute the scalarization overhead of needed extractelement 6667 // instructions. For each of the instruction's operands, if the operand can 6668 // be scalarized, add it to the worklist; otherwise, account for the 6669 // overhead. 6670 for (Use &U : I->operands()) 6671 if (auto *J = dyn_cast<Instruction>(U.get())) { 6672 assert(VectorType::isValidElementType(J->getType()) && 6673 "Instruction has non-scalar type"); 6674 if (canBeScalarized(J)) 6675 Worklist.push_back(J); 6676 else if (needsExtract(J, VF)) { 6677 ScalarCost += TTI.getScalarizationOverhead( 6678 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6679 APInt::getAllOnes(VF.getFixedValue()), false, true); 6680 } 6681 } 6682 6683 // Scale the total scalar cost by block probability. 6684 ScalarCost /= getReciprocalPredBlockProb(); 6685 6686 // Compute the discount. A non-negative discount means the vector version 6687 // of the instruction costs more, and scalarizing would be beneficial. 6688 Discount += VectorCost - ScalarCost; 6689 ScalarCosts[I] = ScalarCost; 6690 } 6691 6692 return *Discount.getValue(); 6693 } 6694 6695 LoopVectorizationCostModel::VectorizationCostTy 6696 LoopVectorizationCostModel::expectedCost( 6697 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6698 VectorizationCostTy Cost; 6699 6700 // For each block. 6701 for (BasicBlock *BB : TheLoop->blocks()) { 6702 VectorizationCostTy BlockCost; 6703 6704 // For each instruction in the old loop. 6705 for (Instruction &I : BB->instructionsWithoutDebug()) { 6706 // Skip ignored values. 6707 if (ValuesToIgnore.count(&I) || 6708 (VF.isVector() && VecValuesToIgnore.count(&I))) 6709 continue; 6710 6711 VectorizationCostTy C = getInstructionCost(&I, VF); 6712 6713 // Check if we should override the cost. 6714 if (C.first.isValid() && 6715 ForceTargetInstructionCost.getNumOccurrences() > 0) 6716 C.first = InstructionCost(ForceTargetInstructionCost); 6717 6718 // Keep a list of instructions with invalid costs. 6719 if (Invalid && !C.first.isValid()) 6720 Invalid->emplace_back(&I, VF); 6721 6722 BlockCost.first += C.first; 6723 BlockCost.second |= C.second; 6724 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6725 << " for VF " << VF << " For instruction: " << I 6726 << '\n'); 6727 } 6728 6729 // If we are vectorizing a predicated block, it will have been 6730 // if-converted. This means that the block's instructions (aside from 6731 // stores and instructions that may divide by zero) will now be 6732 // unconditionally executed. For the scalar case, we may not always execute 6733 // the predicated block, if it is an if-else block. Thus, scale the block's 6734 // cost by the probability of executing it. blockNeedsPredication from 6735 // Legal is used so as to not include all blocks in tail folded loops. 6736 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6737 BlockCost.first /= getReciprocalPredBlockProb(); 6738 6739 Cost.first += BlockCost.first; 6740 Cost.second |= BlockCost.second; 6741 } 6742 6743 return Cost; 6744 } 6745 6746 /// Gets Address Access SCEV after verifying that the access pattern 6747 /// is loop invariant except the induction variable dependence. 6748 /// 6749 /// This SCEV can be sent to the Target in order to estimate the address 6750 /// calculation cost. 6751 static const SCEV *getAddressAccessSCEV( 6752 Value *Ptr, 6753 LoopVectorizationLegality *Legal, 6754 PredicatedScalarEvolution &PSE, 6755 const Loop *TheLoop) { 6756 6757 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6758 if (!Gep) 6759 return nullptr; 6760 6761 // We are looking for a gep with all loop invariant indices except for one 6762 // which should be an induction variable. 6763 auto SE = PSE.getSE(); 6764 unsigned NumOperands = Gep->getNumOperands(); 6765 for (unsigned i = 1; i < NumOperands; ++i) { 6766 Value *Opd = Gep->getOperand(i); 6767 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6768 !Legal->isInductionVariable(Opd)) 6769 return nullptr; 6770 } 6771 6772 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6773 return PSE.getSCEV(Ptr); 6774 } 6775 6776 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6777 return Legal->hasStride(I->getOperand(0)) || 6778 Legal->hasStride(I->getOperand(1)); 6779 } 6780 6781 InstructionCost 6782 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6783 ElementCount VF) { 6784 assert(VF.isVector() && 6785 "Scalarization cost of instruction implies vectorization."); 6786 if (VF.isScalable()) 6787 return InstructionCost::getInvalid(); 6788 6789 Type *ValTy = getLoadStoreType(I); 6790 auto SE = PSE.getSE(); 6791 6792 unsigned AS = getLoadStoreAddressSpace(I); 6793 Value *Ptr = getLoadStorePointerOperand(I); 6794 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6795 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost` 6796 // that it is being called from this specific place. 6797 6798 // Figure out whether the access is strided and get the stride value 6799 // if it's known in compile time 6800 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6801 6802 // Get the cost of the scalar memory instruction and address computation. 6803 InstructionCost Cost = 6804 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6805 6806 // Don't pass *I here, since it is scalar but will actually be part of a 6807 // vectorized loop where the user of it is a vectorized instruction. 6808 const Align Alignment = getLoadStoreAlignment(I); 6809 Cost += VF.getKnownMinValue() * 6810 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6811 AS, TTI::TCK_RecipThroughput); 6812 6813 // Get the overhead of the extractelement and insertelement instructions 6814 // we might create due to scalarization. 6815 Cost += getScalarizationOverhead(I, VF); 6816 6817 // If we have a predicated load/store, it will need extra i1 extracts and 6818 // conditional branches, but may not be executed for each vector lane. Scale 6819 // the cost by the probability of executing the predicated block. 6820 if (isPredicatedInst(I)) { 6821 Cost /= getReciprocalPredBlockProb(); 6822 6823 // Add the cost of an i1 extract and a branch 6824 auto *Vec_i1Ty = 6825 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6826 Cost += TTI.getScalarizationOverhead( 6827 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 6828 /*Insert=*/false, /*Extract=*/true); 6829 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6830 6831 if (useEmulatedMaskMemRefHack(I)) 6832 // Artificially setting to a high enough value to practically disable 6833 // vectorization with such operations. 6834 Cost = 3000000; 6835 } 6836 6837 return Cost; 6838 } 6839 6840 InstructionCost 6841 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6842 ElementCount VF) { 6843 Type *ValTy = getLoadStoreType(I); 6844 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6845 Value *Ptr = getLoadStorePointerOperand(I); 6846 unsigned AS = getLoadStoreAddressSpace(I); 6847 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 6848 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6849 6850 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6851 "Stride should be 1 or -1 for consecutive memory access"); 6852 const Align Alignment = getLoadStoreAlignment(I); 6853 InstructionCost Cost = 0; 6854 if (Legal->isMaskRequired(I)) 6855 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6856 CostKind); 6857 else 6858 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6859 CostKind, I); 6860 6861 bool Reverse = ConsecutiveStride < 0; 6862 if (Reverse) 6863 Cost += 6864 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6865 return Cost; 6866 } 6867 6868 InstructionCost 6869 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6870 ElementCount VF) { 6871 assert(Legal->isUniformMemOp(*I)); 6872 6873 Type *ValTy = getLoadStoreType(I); 6874 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6875 const Align Alignment = getLoadStoreAlignment(I); 6876 unsigned AS = getLoadStoreAddressSpace(I); 6877 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6878 if (isa<LoadInst>(I)) { 6879 return TTI.getAddressComputationCost(ValTy) + 6880 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6881 CostKind) + 6882 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6883 } 6884 StoreInst *SI = cast<StoreInst>(I); 6885 6886 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6887 return TTI.getAddressComputationCost(ValTy) + 6888 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6889 CostKind) + 6890 (isLoopInvariantStoreValue 6891 ? 0 6892 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6893 VF.getKnownMinValue() - 1)); 6894 } 6895 6896 InstructionCost 6897 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6898 ElementCount VF) { 6899 Type *ValTy = getLoadStoreType(I); 6900 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6901 const Align Alignment = getLoadStoreAlignment(I); 6902 const Value *Ptr = getLoadStorePointerOperand(I); 6903 6904 return TTI.getAddressComputationCost(VectorTy) + 6905 TTI.getGatherScatterOpCost( 6906 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6907 TargetTransformInfo::TCK_RecipThroughput, I); 6908 } 6909 6910 InstructionCost 6911 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6912 ElementCount VF) { 6913 // TODO: Once we have support for interleaving with scalable vectors 6914 // we can calculate the cost properly here. 6915 if (VF.isScalable()) 6916 return InstructionCost::getInvalid(); 6917 6918 Type *ValTy = getLoadStoreType(I); 6919 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6920 unsigned AS = getLoadStoreAddressSpace(I); 6921 6922 auto Group = getInterleavedAccessGroup(I); 6923 assert(Group && "Fail to get an interleaved access group."); 6924 6925 unsigned InterleaveFactor = Group->getFactor(); 6926 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6927 6928 // Holds the indices of existing members in the interleaved group. 6929 SmallVector<unsigned, 4> Indices; 6930 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 6931 if (Group->getMember(IF)) 6932 Indices.push_back(IF); 6933 6934 // Calculate the cost of the whole interleaved group. 6935 bool UseMaskForGaps = 6936 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 6937 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 6938 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6939 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6940 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6941 6942 if (Group->isReverse()) { 6943 // TODO: Add support for reversed masked interleaved access. 6944 assert(!Legal->isMaskRequired(I) && 6945 "Reverse masked interleaved access not supported."); 6946 Cost += 6947 Group->getNumMembers() * 6948 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6949 } 6950 return Cost; 6951 } 6952 6953 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 6954 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6955 using namespace llvm::PatternMatch; 6956 // Early exit for no inloop reductions 6957 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6958 return None; 6959 auto *VectorTy = cast<VectorType>(Ty); 6960 6961 // We are looking for a pattern of, and finding the minimal acceptable cost: 6962 // reduce(mul(ext(A), ext(B))) or 6963 // reduce(mul(A, B)) or 6964 // reduce(ext(A)) or 6965 // reduce(A). 6966 // The basic idea is that we walk down the tree to do that, finding the root 6967 // reduction instruction in InLoopReductionImmediateChains. From there we find 6968 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6969 // of the components. If the reduction cost is lower then we return it for the 6970 // reduction instruction and 0 for the other instructions in the pattern. If 6971 // it is not we return an invalid cost specifying the orignal cost method 6972 // should be used. 6973 Instruction *RetI = I; 6974 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 6975 if (!RetI->hasOneUser()) 6976 return None; 6977 RetI = RetI->user_back(); 6978 } 6979 if (match(RetI, m_Mul(m_Value(), m_Value())) && 6980 RetI->user_back()->getOpcode() == Instruction::Add) { 6981 if (!RetI->hasOneUser()) 6982 return None; 6983 RetI = RetI->user_back(); 6984 } 6985 6986 // Test if the found instruction is a reduction, and if not return an invalid 6987 // cost specifying the parent to use the original cost modelling. 6988 if (!InLoopReductionImmediateChains.count(RetI)) 6989 return None; 6990 6991 // Find the reduction this chain is a part of and calculate the basic cost of 6992 // the reduction on its own. 6993 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6994 Instruction *ReductionPhi = LastChain; 6995 while (!isa<PHINode>(ReductionPhi)) 6996 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6997 6998 const RecurrenceDescriptor &RdxDesc = 6999 Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; 7000 7001 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 7002 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 7003 7004 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a 7005 // normal fmul instruction to the cost of the fadd reduction. 7006 if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd) 7007 BaseCost += 7008 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind); 7009 7010 // If we're using ordered reductions then we can just return the base cost 7011 // here, since getArithmeticReductionCost calculates the full ordered 7012 // reduction cost when FP reassociation is not allowed. 7013 if (useOrderedReductions(RdxDesc)) 7014 return BaseCost; 7015 7016 // Get the operand that was not the reduction chain and match it to one of the 7017 // patterns, returning the better cost if it is found. 7018 Instruction *RedOp = RetI->getOperand(1) == LastChain 7019 ? dyn_cast<Instruction>(RetI->getOperand(0)) 7020 : dyn_cast<Instruction>(RetI->getOperand(1)); 7021 7022 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 7023 7024 Instruction *Op0, *Op1; 7025 if (RedOp && 7026 match(RedOp, 7027 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 7028 match(Op0, m_ZExtOrSExt(m_Value())) && 7029 Op0->getOpcode() == Op1->getOpcode() && 7030 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7031 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 7032 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 7033 7034 // Matched reduce(ext(mul(ext(A), ext(B))) 7035 // Note that the extend opcodes need to all match, or if A==B they will have 7036 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 7037 // which is equally fine. 7038 bool IsUnsigned = isa<ZExtInst>(Op0); 7039 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7040 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 7041 7042 InstructionCost ExtCost = 7043 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 7044 TTI::CastContextHint::None, CostKind, Op0); 7045 InstructionCost MulCost = 7046 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 7047 InstructionCost Ext2Cost = 7048 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 7049 TTI::CastContextHint::None, CostKind, RedOp); 7050 7051 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7052 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7053 CostKind); 7054 7055 if (RedCost.isValid() && 7056 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 7057 return I == RetI ? RedCost : 0; 7058 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 7059 !TheLoop->isLoopInvariant(RedOp)) { 7060 // Matched reduce(ext(A)) 7061 bool IsUnsigned = isa<ZExtInst>(RedOp); 7062 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 7063 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7064 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7065 CostKind); 7066 7067 InstructionCost ExtCost = 7068 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 7069 TTI::CastContextHint::None, CostKind, RedOp); 7070 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 7071 return I == RetI ? RedCost : 0; 7072 } else if (RedOp && 7073 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 7074 if (match(Op0, m_ZExtOrSExt(m_Value())) && 7075 Op0->getOpcode() == Op1->getOpcode() && 7076 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7077 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 7078 bool IsUnsigned = isa<ZExtInst>(Op0); 7079 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7080 // Matched reduce(mul(ext, ext)) 7081 InstructionCost ExtCost = 7082 TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, 7083 TTI::CastContextHint::None, CostKind, Op0); 7084 InstructionCost MulCost = 7085 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7086 7087 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7088 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7089 CostKind); 7090 7091 if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) 7092 return I == RetI ? RedCost : 0; 7093 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 7094 // Matched reduce(mul()) 7095 InstructionCost MulCost = 7096 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7097 7098 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7099 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 7100 CostKind); 7101 7102 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 7103 return I == RetI ? RedCost : 0; 7104 } 7105 } 7106 7107 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 7108 } 7109 7110 InstructionCost 7111 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7112 ElementCount VF) { 7113 // Calculate scalar cost only. Vectorization cost should be ready at this 7114 // moment. 7115 if (VF.isScalar()) { 7116 Type *ValTy = getLoadStoreType(I); 7117 const Align Alignment = getLoadStoreAlignment(I); 7118 unsigned AS = getLoadStoreAddressSpace(I); 7119 7120 return TTI.getAddressComputationCost(ValTy) + 7121 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 7122 TTI::TCK_RecipThroughput, I); 7123 } 7124 return getWideningCost(I, VF); 7125 } 7126 7127 LoopVectorizationCostModel::VectorizationCostTy 7128 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7129 ElementCount VF) { 7130 // If we know that this instruction will remain uniform, check the cost of 7131 // the scalar version. 7132 if (isUniformAfterVectorization(I, VF)) 7133 VF = ElementCount::getFixed(1); 7134 7135 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7136 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7137 7138 // Forced scalars do not have any scalarization overhead. 7139 auto ForcedScalar = ForcedScalars.find(VF); 7140 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7141 auto InstSet = ForcedScalar->second; 7142 if (InstSet.count(I)) 7143 return VectorizationCostTy( 7144 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7145 VF.getKnownMinValue()), 7146 false); 7147 } 7148 7149 Type *VectorTy; 7150 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7151 7152 bool TypeNotScalarized = false; 7153 if (VF.isVector() && VectorTy->isVectorTy()) { 7154 unsigned NumParts = TTI.getNumberOfParts(VectorTy); 7155 if (NumParts) 7156 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 7157 else 7158 C = InstructionCost::getInvalid(); 7159 } 7160 return VectorizationCostTy(C, TypeNotScalarized); 7161 } 7162 7163 InstructionCost 7164 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7165 ElementCount VF) const { 7166 7167 // There is no mechanism yet to create a scalable scalarization loop, 7168 // so this is currently Invalid. 7169 if (VF.isScalable()) 7170 return InstructionCost::getInvalid(); 7171 7172 if (VF.isScalar()) 7173 return 0; 7174 7175 InstructionCost Cost = 0; 7176 Type *RetTy = ToVectorTy(I->getType(), VF); 7177 if (!RetTy->isVoidTy() && 7178 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7179 Cost += TTI.getScalarizationOverhead( 7180 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 7181 false); 7182 7183 // Some targets keep addresses scalar. 7184 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7185 return Cost; 7186 7187 // Some targets support efficient element stores. 7188 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7189 return Cost; 7190 7191 // Collect operands to consider. 7192 CallInst *CI = dyn_cast<CallInst>(I); 7193 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 7194 7195 // Skip operands that do not require extraction/scalarization and do not incur 7196 // any overhead. 7197 SmallVector<Type *> Tys; 7198 for (auto *V : filterExtractingOperands(Ops, VF)) 7199 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 7200 return Cost + TTI.getOperandsScalarizationOverhead( 7201 filterExtractingOperands(Ops, VF), Tys); 7202 } 7203 7204 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7205 if (VF.isScalar()) 7206 return; 7207 NumPredStores = 0; 7208 for (BasicBlock *BB : TheLoop->blocks()) { 7209 // For each instruction in the old loop. 7210 for (Instruction &I : *BB) { 7211 Value *Ptr = getLoadStorePointerOperand(&I); 7212 if (!Ptr) 7213 continue; 7214 7215 // TODO: We should generate better code and update the cost model for 7216 // predicated uniform stores. Today they are treated as any other 7217 // predicated store (see added test cases in 7218 // invariant-store-vectorization.ll). 7219 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 7220 NumPredStores++; 7221 7222 if (Legal->isUniformMemOp(I)) { 7223 // TODO: Avoid replicating loads and stores instead of 7224 // relying on instcombine to remove them. 7225 // Load: Scalar load + broadcast 7226 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7227 InstructionCost Cost; 7228 if (isa<StoreInst>(&I) && VF.isScalable() && 7229 isLegalGatherOrScatter(&I)) { 7230 Cost = getGatherScatterCost(&I, VF); 7231 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 7232 } else { 7233 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 7234 "Cannot yet scalarize uniform stores"); 7235 Cost = getUniformMemOpCost(&I, VF); 7236 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7237 } 7238 continue; 7239 } 7240 7241 // We assume that widening is the best solution when possible. 7242 if (memoryInstructionCanBeWidened(&I, VF)) { 7243 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7244 int ConsecutiveStride = Legal->isConsecutivePtr( 7245 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 7246 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7247 "Expected consecutive stride."); 7248 InstWidening Decision = 7249 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7250 setWideningDecision(&I, VF, Decision, Cost); 7251 continue; 7252 } 7253 7254 // Choose between Interleaving, Gather/Scatter or Scalarization. 7255 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7256 unsigned NumAccesses = 1; 7257 if (isAccessInterleaved(&I)) { 7258 auto Group = getInterleavedAccessGroup(&I); 7259 assert(Group && "Fail to get an interleaved access group."); 7260 7261 // Make one decision for the whole group. 7262 if (getWideningDecision(&I, VF) != CM_Unknown) 7263 continue; 7264 7265 NumAccesses = Group->getNumMembers(); 7266 if (interleavedAccessCanBeWidened(&I, VF)) 7267 InterleaveCost = getInterleaveGroupCost(&I, VF); 7268 } 7269 7270 InstructionCost GatherScatterCost = 7271 isLegalGatherOrScatter(&I) 7272 ? getGatherScatterCost(&I, VF) * NumAccesses 7273 : InstructionCost::getInvalid(); 7274 7275 InstructionCost ScalarizationCost = 7276 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7277 7278 // Choose better solution for the current VF, 7279 // write down this decision and use it during vectorization. 7280 InstructionCost Cost; 7281 InstWidening Decision; 7282 if (InterleaveCost <= GatherScatterCost && 7283 InterleaveCost < ScalarizationCost) { 7284 Decision = CM_Interleave; 7285 Cost = InterleaveCost; 7286 } else if (GatherScatterCost < ScalarizationCost) { 7287 Decision = CM_GatherScatter; 7288 Cost = GatherScatterCost; 7289 } else { 7290 Decision = CM_Scalarize; 7291 Cost = ScalarizationCost; 7292 } 7293 // If the instructions belongs to an interleave group, the whole group 7294 // receives the same decision. The whole group receives the cost, but 7295 // the cost will actually be assigned to one instruction. 7296 if (auto Group = getInterleavedAccessGroup(&I)) 7297 setWideningDecision(Group, VF, Decision, Cost); 7298 else 7299 setWideningDecision(&I, VF, Decision, Cost); 7300 } 7301 } 7302 7303 // Make sure that any load of address and any other address computation 7304 // remains scalar unless there is gather/scatter support. This avoids 7305 // inevitable extracts into address registers, and also has the benefit of 7306 // activating LSR more, since that pass can't optimize vectorized 7307 // addresses. 7308 if (TTI.prefersVectorizedAddressing()) 7309 return; 7310 7311 // Start with all scalar pointer uses. 7312 SmallPtrSet<Instruction *, 8> AddrDefs; 7313 for (BasicBlock *BB : TheLoop->blocks()) 7314 for (Instruction &I : *BB) { 7315 Instruction *PtrDef = 7316 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7317 if (PtrDef && TheLoop->contains(PtrDef) && 7318 getWideningDecision(&I, VF) != CM_GatherScatter) 7319 AddrDefs.insert(PtrDef); 7320 } 7321 7322 // Add all instructions used to generate the addresses. 7323 SmallVector<Instruction *, 4> Worklist; 7324 append_range(Worklist, AddrDefs); 7325 while (!Worklist.empty()) { 7326 Instruction *I = Worklist.pop_back_val(); 7327 for (auto &Op : I->operands()) 7328 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7329 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7330 AddrDefs.insert(InstOp).second) 7331 Worklist.push_back(InstOp); 7332 } 7333 7334 for (auto *I : AddrDefs) { 7335 if (isa<LoadInst>(I)) { 7336 // Setting the desired widening decision should ideally be handled in 7337 // by cost functions, but since this involves the task of finding out 7338 // if the loaded register is involved in an address computation, it is 7339 // instead changed here when we know this is the case. 7340 InstWidening Decision = getWideningDecision(I, VF); 7341 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7342 // Scalarize a widened load of address. 7343 setWideningDecision( 7344 I, VF, CM_Scalarize, 7345 (VF.getKnownMinValue() * 7346 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7347 else if (auto Group = getInterleavedAccessGroup(I)) { 7348 // Scalarize an interleave group of address loads. 7349 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7350 if (Instruction *Member = Group->getMember(I)) 7351 setWideningDecision( 7352 Member, VF, CM_Scalarize, 7353 (VF.getKnownMinValue() * 7354 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7355 } 7356 } 7357 } else 7358 // Make sure I gets scalarized and a cost estimate without 7359 // scalarization overhead. 7360 ForcedScalars[VF].insert(I); 7361 } 7362 } 7363 7364 InstructionCost 7365 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7366 Type *&VectorTy) { 7367 Type *RetTy = I->getType(); 7368 if (canTruncateToMinimalBitwidth(I, VF)) 7369 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7370 auto SE = PSE.getSE(); 7371 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7372 7373 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 7374 ElementCount VF) -> bool { 7375 if (VF.isScalar()) 7376 return true; 7377 7378 auto Scalarized = InstsToScalarize.find(VF); 7379 assert(Scalarized != InstsToScalarize.end() && 7380 "VF not yet analyzed for scalarization profitability"); 7381 return !Scalarized->second.count(I) && 7382 llvm::all_of(I->users(), [&](User *U) { 7383 auto *UI = cast<Instruction>(U); 7384 return !Scalarized->second.count(UI); 7385 }); 7386 }; 7387 (void) hasSingleCopyAfterVectorization; 7388 7389 if (isScalarAfterVectorization(I, VF)) { 7390 // With the exception of GEPs and PHIs, after scalarization there should 7391 // only be one copy of the instruction generated in the loop. This is 7392 // because the VF is either 1, or any instructions that need scalarizing 7393 // have already been dealt with by the the time we get here. As a result, 7394 // it means we don't have to multiply the instruction cost by VF. 7395 assert(I->getOpcode() == Instruction::GetElementPtr || 7396 I->getOpcode() == Instruction::PHI || 7397 (I->getOpcode() == Instruction::BitCast && 7398 I->getType()->isPointerTy()) || 7399 hasSingleCopyAfterVectorization(I, VF)); 7400 VectorTy = RetTy; 7401 } else 7402 VectorTy = ToVectorTy(RetTy, VF); 7403 7404 // TODO: We need to estimate the cost of intrinsic calls. 7405 switch (I->getOpcode()) { 7406 case Instruction::GetElementPtr: 7407 // We mark this instruction as zero-cost because the cost of GEPs in 7408 // vectorized code depends on whether the corresponding memory instruction 7409 // is scalarized or not. Therefore, we handle GEPs with the memory 7410 // instruction cost. 7411 return 0; 7412 case Instruction::Br: { 7413 // In cases of scalarized and predicated instructions, there will be VF 7414 // predicated blocks in the vectorized loop. Each branch around these 7415 // blocks requires also an extract of its vector compare i1 element. 7416 bool ScalarPredicatedBB = false; 7417 BranchInst *BI = cast<BranchInst>(I); 7418 if (VF.isVector() && BI->isConditional() && 7419 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7420 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7421 ScalarPredicatedBB = true; 7422 7423 if (ScalarPredicatedBB) { 7424 // Not possible to scalarize scalable vector with predicated instructions. 7425 if (VF.isScalable()) 7426 return InstructionCost::getInvalid(); 7427 // Return cost for branches around scalarized and predicated blocks. 7428 auto *Vec_i1Ty = 7429 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7430 return ( 7431 TTI.getScalarizationOverhead( 7432 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7433 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7434 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7435 // The back-edge branch will remain, as will all scalar branches. 7436 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7437 else 7438 // This branch will be eliminated by if-conversion. 7439 return 0; 7440 // Note: We currently assume zero cost for an unconditional branch inside 7441 // a predicated block since it will become a fall-through, although we 7442 // may decide in the future to call TTI for all branches. 7443 } 7444 case Instruction::PHI: { 7445 auto *Phi = cast<PHINode>(I); 7446 7447 // First-order recurrences are replaced by vector shuffles inside the loop. 7448 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7449 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7450 return TTI.getShuffleCost( 7451 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7452 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7453 7454 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7455 // converted into select instructions. We require N - 1 selects per phi 7456 // node, where N is the number of incoming values. 7457 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7458 return (Phi->getNumIncomingValues() - 1) * 7459 TTI.getCmpSelInstrCost( 7460 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7461 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7462 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7463 7464 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7465 } 7466 case Instruction::UDiv: 7467 case Instruction::SDiv: 7468 case Instruction::URem: 7469 case Instruction::SRem: 7470 // If we have a predicated instruction, it may not be executed for each 7471 // vector lane. Get the scalarization cost and scale this amount by the 7472 // probability of executing the predicated block. If the instruction is not 7473 // predicated, we fall through to the next case. 7474 if (VF.isVector() && isScalarWithPredication(I)) { 7475 InstructionCost Cost = 0; 7476 7477 // These instructions have a non-void type, so account for the phi nodes 7478 // that we will create. This cost is likely to be zero. The phi node 7479 // cost, if any, should be scaled by the block probability because it 7480 // models a copy at the end of each predicated block. 7481 Cost += VF.getKnownMinValue() * 7482 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7483 7484 // The cost of the non-predicated instruction. 7485 Cost += VF.getKnownMinValue() * 7486 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7487 7488 // The cost of insertelement and extractelement instructions needed for 7489 // scalarization. 7490 Cost += getScalarizationOverhead(I, VF); 7491 7492 // Scale the cost by the probability of executing the predicated blocks. 7493 // This assumes the predicated block for each vector lane is equally 7494 // likely. 7495 return Cost / getReciprocalPredBlockProb(); 7496 } 7497 LLVM_FALLTHROUGH; 7498 case Instruction::Add: 7499 case Instruction::FAdd: 7500 case Instruction::Sub: 7501 case Instruction::FSub: 7502 case Instruction::Mul: 7503 case Instruction::FMul: 7504 case Instruction::FDiv: 7505 case Instruction::FRem: 7506 case Instruction::Shl: 7507 case Instruction::LShr: 7508 case Instruction::AShr: 7509 case Instruction::And: 7510 case Instruction::Or: 7511 case Instruction::Xor: { 7512 // Since we will replace the stride by 1 the multiplication should go away. 7513 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7514 return 0; 7515 7516 // Detect reduction patterns 7517 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7518 return *RedCost; 7519 7520 // Certain instructions can be cheaper to vectorize if they have a constant 7521 // second vector operand. One example of this are shifts on x86. 7522 Value *Op2 = I->getOperand(1); 7523 TargetTransformInfo::OperandValueProperties Op2VP; 7524 TargetTransformInfo::OperandValueKind Op2VK = 7525 TTI.getOperandInfo(Op2, Op2VP); 7526 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7527 Op2VK = TargetTransformInfo::OK_UniformValue; 7528 7529 SmallVector<const Value *, 4> Operands(I->operand_values()); 7530 return TTI.getArithmeticInstrCost( 7531 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7532 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7533 } 7534 case Instruction::FNeg: { 7535 return TTI.getArithmeticInstrCost( 7536 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7537 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7538 TargetTransformInfo::OP_None, I->getOperand(0), I); 7539 } 7540 case Instruction::Select: { 7541 SelectInst *SI = cast<SelectInst>(I); 7542 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7543 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7544 7545 const Value *Op0, *Op1; 7546 using namespace llvm::PatternMatch; 7547 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7548 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7549 // select x, y, false --> x & y 7550 // select x, true, y --> x | y 7551 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7552 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7553 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7554 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7555 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7556 Op1->getType()->getScalarSizeInBits() == 1); 7557 7558 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7559 return TTI.getArithmeticInstrCost( 7560 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7561 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7562 } 7563 7564 Type *CondTy = SI->getCondition()->getType(); 7565 if (!ScalarCond) 7566 CondTy = VectorType::get(CondTy, VF); 7567 7568 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 7569 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition())) 7570 Pred = Cmp->getPredicate(); 7571 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred, 7572 CostKind, I); 7573 } 7574 case Instruction::ICmp: 7575 case Instruction::FCmp: { 7576 Type *ValTy = I->getOperand(0)->getType(); 7577 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7578 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7579 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7580 VectorTy = ToVectorTy(ValTy, VF); 7581 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7582 cast<CmpInst>(I)->getPredicate(), CostKind, 7583 I); 7584 } 7585 case Instruction::Store: 7586 case Instruction::Load: { 7587 ElementCount Width = VF; 7588 if (Width.isVector()) { 7589 InstWidening Decision = getWideningDecision(I, Width); 7590 assert(Decision != CM_Unknown && 7591 "CM decision should be taken at this point"); 7592 if (Decision == CM_Scalarize) 7593 Width = ElementCount::getFixed(1); 7594 } 7595 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7596 return getMemoryInstructionCost(I, VF); 7597 } 7598 case Instruction::BitCast: 7599 if (I->getType()->isPointerTy()) 7600 return 0; 7601 LLVM_FALLTHROUGH; 7602 case Instruction::ZExt: 7603 case Instruction::SExt: 7604 case Instruction::FPToUI: 7605 case Instruction::FPToSI: 7606 case Instruction::FPExt: 7607 case Instruction::PtrToInt: 7608 case Instruction::IntToPtr: 7609 case Instruction::SIToFP: 7610 case Instruction::UIToFP: 7611 case Instruction::Trunc: 7612 case Instruction::FPTrunc: { 7613 // Computes the CastContextHint from a Load/Store instruction. 7614 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7615 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7616 "Expected a load or a store!"); 7617 7618 if (VF.isScalar() || !TheLoop->contains(I)) 7619 return TTI::CastContextHint::Normal; 7620 7621 switch (getWideningDecision(I, VF)) { 7622 case LoopVectorizationCostModel::CM_GatherScatter: 7623 return TTI::CastContextHint::GatherScatter; 7624 case LoopVectorizationCostModel::CM_Interleave: 7625 return TTI::CastContextHint::Interleave; 7626 case LoopVectorizationCostModel::CM_Scalarize: 7627 case LoopVectorizationCostModel::CM_Widen: 7628 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7629 : TTI::CastContextHint::Normal; 7630 case LoopVectorizationCostModel::CM_Widen_Reverse: 7631 return TTI::CastContextHint::Reversed; 7632 case LoopVectorizationCostModel::CM_Unknown: 7633 llvm_unreachable("Instr did not go through cost modelling?"); 7634 } 7635 7636 llvm_unreachable("Unhandled case!"); 7637 }; 7638 7639 unsigned Opcode = I->getOpcode(); 7640 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7641 // For Trunc, the context is the only user, which must be a StoreInst. 7642 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7643 if (I->hasOneUse()) 7644 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7645 CCH = ComputeCCH(Store); 7646 } 7647 // For Z/Sext, the context is the operand, which must be a LoadInst. 7648 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7649 Opcode == Instruction::FPExt) { 7650 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7651 CCH = ComputeCCH(Load); 7652 } 7653 7654 // We optimize the truncation of induction variables having constant 7655 // integer steps. The cost of these truncations is the same as the scalar 7656 // operation. 7657 if (isOptimizableIVTruncate(I, VF)) { 7658 auto *Trunc = cast<TruncInst>(I); 7659 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7660 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7661 } 7662 7663 // Detect reduction patterns 7664 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7665 return *RedCost; 7666 7667 Type *SrcScalarTy = I->getOperand(0)->getType(); 7668 Type *SrcVecTy = 7669 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7670 if (canTruncateToMinimalBitwidth(I, VF)) { 7671 // This cast is going to be shrunk. This may remove the cast or it might 7672 // turn it into slightly different cast. For example, if MinBW == 16, 7673 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7674 // 7675 // Calculate the modified src and dest types. 7676 Type *MinVecTy = VectorTy; 7677 if (Opcode == Instruction::Trunc) { 7678 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7679 VectorTy = 7680 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7681 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7682 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7683 VectorTy = 7684 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7685 } 7686 } 7687 7688 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7689 } 7690 case Instruction::Call: { 7691 if (RecurrenceDescriptor::isFMulAddIntrinsic(I)) 7692 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7693 return *RedCost; 7694 bool NeedToScalarize; 7695 CallInst *CI = cast<CallInst>(I); 7696 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7697 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7698 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7699 return std::min(CallCost, IntrinsicCost); 7700 } 7701 return CallCost; 7702 } 7703 case Instruction::ExtractValue: 7704 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7705 case Instruction::Alloca: 7706 // We cannot easily widen alloca to a scalable alloca, as 7707 // the result would need to be a vector of pointers. 7708 if (VF.isScalable()) 7709 return InstructionCost::getInvalid(); 7710 LLVM_FALLTHROUGH; 7711 default: 7712 // This opcode is unknown. Assume that it is the same as 'mul'. 7713 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7714 } // end of switch. 7715 } 7716 7717 char LoopVectorize::ID = 0; 7718 7719 static const char lv_name[] = "Loop Vectorization"; 7720 7721 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7722 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7723 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7724 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7725 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7726 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7727 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7728 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7729 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7730 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7731 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7732 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7733 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7734 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7735 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7736 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7737 7738 namespace llvm { 7739 7740 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7741 7742 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7743 bool VectorizeOnlyWhenForced) { 7744 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7745 } 7746 7747 } // end namespace llvm 7748 7749 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7750 // Check if the pointer operand of a load or store instruction is 7751 // consecutive. 7752 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7753 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 7754 return false; 7755 } 7756 7757 void LoopVectorizationCostModel::collectValuesToIgnore() { 7758 // Ignore ephemeral values. 7759 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7760 7761 // Ignore type-promoting instructions we identified during reduction 7762 // detection. 7763 for (auto &Reduction : Legal->getReductionVars()) { 7764 RecurrenceDescriptor &RedDes = Reduction.second; 7765 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7766 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7767 } 7768 // Ignore type-casting instructions we identified during induction 7769 // detection. 7770 for (auto &Induction : Legal->getInductionVars()) { 7771 InductionDescriptor &IndDes = Induction.second; 7772 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7773 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7774 } 7775 } 7776 7777 void LoopVectorizationCostModel::collectInLoopReductions() { 7778 for (auto &Reduction : Legal->getReductionVars()) { 7779 PHINode *Phi = Reduction.first; 7780 RecurrenceDescriptor &RdxDesc = Reduction.second; 7781 7782 // We don't collect reductions that are type promoted (yet). 7783 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7784 continue; 7785 7786 // If the target would prefer this reduction to happen "in-loop", then we 7787 // want to record it as such. 7788 unsigned Opcode = RdxDesc.getOpcode(); 7789 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7790 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7791 TargetTransformInfo::ReductionFlags())) 7792 continue; 7793 7794 // Check that we can correctly put the reductions into the loop, by 7795 // finding the chain of operations that leads from the phi to the loop 7796 // exit value. 7797 SmallVector<Instruction *, 4> ReductionOperations = 7798 RdxDesc.getReductionOpChain(Phi, TheLoop); 7799 bool InLoop = !ReductionOperations.empty(); 7800 if (InLoop) { 7801 InLoopReductionChains[Phi] = ReductionOperations; 7802 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7803 Instruction *LastChain = Phi; 7804 for (auto *I : ReductionOperations) { 7805 InLoopReductionImmediateChains[I] = LastChain; 7806 LastChain = I; 7807 } 7808 } 7809 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7810 << " reduction for phi: " << *Phi << "\n"); 7811 } 7812 } 7813 7814 // TODO: we could return a pair of values that specify the max VF and 7815 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7816 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7817 // doesn't have a cost model that can choose which plan to execute if 7818 // more than one is generated. 7819 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7820 LoopVectorizationCostModel &CM) { 7821 unsigned WidestType; 7822 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7823 return WidestVectorRegBits / WidestType; 7824 } 7825 7826 VectorizationFactor 7827 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7828 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7829 ElementCount VF = UserVF; 7830 // Outer loop handling: They may require CFG and instruction level 7831 // transformations before even evaluating whether vectorization is profitable. 7832 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7833 // the vectorization pipeline. 7834 if (!OrigLoop->isInnermost()) { 7835 // If the user doesn't provide a vectorization factor, determine a 7836 // reasonable one. 7837 if (UserVF.isZero()) { 7838 VF = ElementCount::getFixed(determineVPlanVF( 7839 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7840 .getFixedSize(), 7841 CM)); 7842 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7843 7844 // Make sure we have a VF > 1 for stress testing. 7845 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7846 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7847 << "overriding computed VF.\n"); 7848 VF = ElementCount::getFixed(4); 7849 } 7850 } 7851 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7852 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7853 "VF needs to be a power of two"); 7854 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7855 << "VF " << VF << " to build VPlans.\n"); 7856 buildVPlans(VF, VF); 7857 7858 // For VPlan build stress testing, we bail out after VPlan construction. 7859 if (VPlanBuildStressTest) 7860 return VectorizationFactor::Disabled(); 7861 7862 return {VF, 0 /*Cost*/}; 7863 } 7864 7865 LLVM_DEBUG( 7866 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7867 "VPlan-native path.\n"); 7868 return VectorizationFactor::Disabled(); 7869 } 7870 7871 Optional<VectorizationFactor> 7872 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7873 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7874 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 7875 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 7876 return None; 7877 7878 // Invalidate interleave groups if all blocks of loop will be predicated. 7879 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 7880 !useMaskedInterleavedAccesses(*TTI)) { 7881 LLVM_DEBUG( 7882 dbgs() 7883 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7884 "which requires masked-interleaved support.\n"); 7885 if (CM.InterleaveInfo.invalidateGroups()) 7886 // Invalidating interleave groups also requires invalidating all decisions 7887 // based on them, which includes widening decisions and uniform and scalar 7888 // values. 7889 CM.invalidateCostModelingDecisions(); 7890 } 7891 7892 ElementCount MaxUserVF = 7893 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 7894 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 7895 if (!UserVF.isZero() && UserVFIsLegal) { 7896 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 7897 "VF needs to be a power of two"); 7898 // Collect the instructions (and their associated costs) that will be more 7899 // profitable to scalarize. 7900 if (CM.selectUserVectorizationFactor(UserVF)) { 7901 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7902 CM.collectInLoopReductions(); 7903 buildVPlansWithVPRecipes(UserVF, UserVF); 7904 LLVM_DEBUG(printPlans(dbgs())); 7905 return {{UserVF, 0}}; 7906 } else 7907 reportVectorizationInfo("UserVF ignored because of invalid costs.", 7908 "InvalidCost", ORE, OrigLoop); 7909 } 7910 7911 // Populate the set of Vectorization Factor Candidates. 7912 ElementCountSet VFCandidates; 7913 for (auto VF = ElementCount::getFixed(1); 7914 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 7915 VFCandidates.insert(VF); 7916 for (auto VF = ElementCount::getScalable(1); 7917 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 7918 VFCandidates.insert(VF); 7919 7920 for (const auto &VF : VFCandidates) { 7921 // Collect Uniform and Scalar instructions after vectorization with VF. 7922 CM.collectUniformsAndScalars(VF); 7923 7924 // Collect the instructions (and their associated costs) that will be more 7925 // profitable to scalarize. 7926 if (VF.isVector()) 7927 CM.collectInstsToScalarize(VF); 7928 } 7929 7930 CM.collectInLoopReductions(); 7931 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 7932 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 7933 7934 LLVM_DEBUG(printPlans(dbgs())); 7935 if (!MaxFactors.hasVector()) 7936 return VectorizationFactor::Disabled(); 7937 7938 // Select the optimal vectorization factor. 7939 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 7940 7941 // Check if it is profitable to vectorize with runtime checks. 7942 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 7943 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 7944 bool PragmaThresholdReached = 7945 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 7946 bool ThresholdReached = 7947 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 7948 if ((ThresholdReached && !Hints.allowReordering()) || 7949 PragmaThresholdReached) { 7950 ORE->emit([&]() { 7951 return OptimizationRemarkAnalysisAliasing( 7952 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 7953 OrigLoop->getHeader()) 7954 << "loop not vectorized: cannot prove it is safe to reorder " 7955 "memory operations"; 7956 }); 7957 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 7958 Hints.emitRemarkWithHints(); 7959 return VectorizationFactor::Disabled(); 7960 } 7961 } 7962 return SelectedVF; 7963 } 7964 7965 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 7966 assert(count_if(VPlans, 7967 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 7968 1 && 7969 "Best VF has not a single VPlan."); 7970 7971 for (const VPlanPtr &Plan : VPlans) { 7972 if (Plan->hasVF(VF)) 7973 return *Plan.get(); 7974 } 7975 llvm_unreachable("No plan found!"); 7976 } 7977 7978 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 7979 VPlan &BestVPlan, 7980 InnerLoopVectorizer &ILV, 7981 DominatorTree *DT) { 7982 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 7983 << '\n'); 7984 7985 // Perform the actual loop transformation. 7986 7987 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7988 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 7989 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 7990 State.TripCount = ILV.getOrCreateTripCount(nullptr); 7991 State.CanonicalIV = ILV.Induction; 7992 ILV.collectPoisonGeneratingRecipes(State); 7993 7994 ILV.printDebugTracesAtStart(); 7995 7996 //===------------------------------------------------===// 7997 // 7998 // Notice: any optimization or new instruction that go 7999 // into the code below should also be implemented in 8000 // the cost-model. 8001 // 8002 //===------------------------------------------------===// 8003 8004 // 2. Copy and widen instructions from the old loop into the new loop. 8005 BestVPlan.execute(&State); 8006 8007 // 3. Fix the vectorized code: take care of header phi's, live-outs, 8008 // predication, updating analyses. 8009 ILV.fixVectorizedLoop(State); 8010 8011 ILV.printDebugTracesAtEnd(); 8012 } 8013 8014 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 8015 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 8016 for (const auto &Plan : VPlans) 8017 if (PrintVPlansInDotFormat) 8018 Plan->printDOT(O); 8019 else 8020 Plan->print(O); 8021 } 8022 #endif 8023 8024 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 8025 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 8026 8027 // We create new control-flow for the vectorized loop, so the original exit 8028 // conditions will be dead after vectorization if it's only used by the 8029 // terminator 8030 SmallVector<BasicBlock*> ExitingBlocks; 8031 OrigLoop->getExitingBlocks(ExitingBlocks); 8032 for (auto *BB : ExitingBlocks) { 8033 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 8034 if (!Cmp || !Cmp->hasOneUse()) 8035 continue; 8036 8037 // TODO: we should introduce a getUniqueExitingBlocks on Loop 8038 if (!DeadInstructions.insert(Cmp).second) 8039 continue; 8040 8041 // The operands of the icmp is often a dead trunc, used by IndUpdate. 8042 // TODO: can recurse through operands in general 8043 for (Value *Op : Cmp->operands()) { 8044 if (isa<TruncInst>(Op) && Op->hasOneUse()) 8045 DeadInstructions.insert(cast<Instruction>(Op)); 8046 } 8047 } 8048 8049 // We create new "steps" for induction variable updates to which the original 8050 // induction variables map. An original update instruction will be dead if 8051 // all its users except the induction variable are dead. 8052 auto *Latch = OrigLoop->getLoopLatch(); 8053 for (auto &Induction : Legal->getInductionVars()) { 8054 PHINode *Ind = Induction.first; 8055 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 8056 8057 // If the tail is to be folded by masking, the primary induction variable, 8058 // if exists, isn't dead: it will be used for masking. Don't kill it. 8059 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 8060 continue; 8061 8062 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 8063 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 8064 })) 8065 DeadInstructions.insert(IndUpdate); 8066 8067 // We record as "Dead" also the type-casting instructions we had identified 8068 // during induction analysis. We don't need any handling for them in the 8069 // vectorized loop because we have proven that, under a proper runtime 8070 // test guarding the vectorized loop, the value of the phi, and the casted 8071 // value of the phi, are the same. The last instruction in this casting chain 8072 // will get its scalar/vector/widened def from the scalar/vector/widened def 8073 // of the respective phi node. Any other casts in the induction def-use chain 8074 // have no other uses outside the phi update chain, and will be ignored. 8075 InductionDescriptor &IndDes = Induction.second; 8076 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 8077 DeadInstructions.insert(Casts.begin(), Casts.end()); 8078 } 8079 } 8080 8081 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 8082 8083 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 8084 8085 Value *InnerLoopUnroller::getStepVector(Value *Val, Value *StartIdx, 8086 Value *Step, 8087 Instruction::BinaryOps BinOp) { 8088 // When unrolling and the VF is 1, we only need to add a simple scalar. 8089 Type *Ty = Val->getType(); 8090 assert(!Ty->isVectorTy() && "Val must be a scalar"); 8091 8092 if (Ty->isFloatingPointTy()) { 8093 // Floating-point operations inherit FMF via the builder's flags. 8094 Value *MulOp = Builder.CreateFMul(StartIdx, Step); 8095 return Builder.CreateBinOp(BinOp, Val, MulOp); 8096 } 8097 return Builder.CreateAdd(Val, Builder.CreateMul(StartIdx, Step), "induction"); 8098 } 8099 8100 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 8101 SmallVector<Metadata *, 4> MDs; 8102 // Reserve first location for self reference to the LoopID metadata node. 8103 MDs.push_back(nullptr); 8104 bool IsUnrollMetadata = false; 8105 MDNode *LoopID = L->getLoopID(); 8106 if (LoopID) { 8107 // First find existing loop unrolling disable metadata. 8108 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 8109 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 8110 if (MD) { 8111 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 8112 IsUnrollMetadata = 8113 S && S->getString().startswith("llvm.loop.unroll.disable"); 8114 } 8115 MDs.push_back(LoopID->getOperand(i)); 8116 } 8117 } 8118 8119 if (!IsUnrollMetadata) { 8120 // Add runtime unroll disable metadata. 8121 LLVMContext &Context = L->getHeader()->getContext(); 8122 SmallVector<Metadata *, 1> DisableOperands; 8123 DisableOperands.push_back( 8124 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 8125 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 8126 MDs.push_back(DisableNode); 8127 MDNode *NewLoopID = MDNode::get(Context, MDs); 8128 // Set operand 0 to refer to the loop id itself. 8129 NewLoopID->replaceOperandWith(0, NewLoopID); 8130 L->setLoopID(NewLoopID); 8131 } 8132 } 8133 8134 //===--------------------------------------------------------------------===// 8135 // EpilogueVectorizerMainLoop 8136 //===--------------------------------------------------------------------===// 8137 8138 /// This function is partially responsible for generating the control flow 8139 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8140 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 8141 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8142 Loop *Lp = createVectorLoopSkeleton(""); 8143 8144 // Generate the code to check the minimum iteration count of the vector 8145 // epilogue (see below). 8146 EPI.EpilogueIterationCountCheck = 8147 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 8148 EPI.EpilogueIterationCountCheck->setName("iter.check"); 8149 8150 // Generate the code to check any assumptions that we've made for SCEV 8151 // expressions. 8152 EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); 8153 8154 // Generate the code that checks at runtime if arrays overlap. We put the 8155 // checks into a separate block to make the more common case of few elements 8156 // faster. 8157 EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 8158 8159 // Generate the iteration count check for the main loop, *after* the check 8160 // for the epilogue loop, so that the path-length is shorter for the case 8161 // that goes directly through the vector epilogue. The longer-path length for 8162 // the main loop is compensated for, by the gain from vectorizing the larger 8163 // trip count. Note: the branch will get updated later on when we vectorize 8164 // the epilogue. 8165 EPI.MainLoopIterationCountCheck = 8166 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 8167 8168 // Generate the induction variable. 8169 OldInduction = Legal->getPrimaryInduction(); 8170 Type *IdxTy = Legal->getWidestInductionType(); 8171 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8172 8173 IRBuilder<> B(&*Lp->getLoopPreheader()->getFirstInsertionPt()); 8174 Value *Step = getRuntimeVF(B, IdxTy, VF * UF); 8175 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8176 EPI.VectorTripCount = CountRoundDown; 8177 Induction = 8178 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8179 getDebugLocFromInstOrOperands(OldInduction)); 8180 8181 // Skip induction resume value creation here because they will be created in 8182 // the second pass. If we created them here, they wouldn't be used anyway, 8183 // because the vplan in the second pass still contains the inductions from the 8184 // original loop. 8185 8186 return completeLoopSkeleton(Lp, OrigLoopID); 8187 } 8188 8189 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 8190 LLVM_DEBUG({ 8191 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 8192 << "Main Loop VF:" << EPI.MainLoopVF 8193 << ", Main Loop UF:" << EPI.MainLoopUF 8194 << ", Epilogue Loop VF:" << EPI.EpilogueVF 8195 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8196 }); 8197 } 8198 8199 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 8200 DEBUG_WITH_TYPE(VerboseDebug, { 8201 dbgs() << "intermediate fn:\n" 8202 << *OrigLoop->getHeader()->getParent() << "\n"; 8203 }); 8204 } 8205 8206 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 8207 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 8208 assert(L && "Expected valid Loop."); 8209 assert(Bypass && "Expected valid bypass basic block."); 8210 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 8211 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 8212 Value *Count = getOrCreateTripCount(L); 8213 // Reuse existing vector loop preheader for TC checks. 8214 // Note that new preheader block is generated for vector loop. 8215 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 8216 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8217 8218 // Generate code to check if the loop's trip count is less than VF * UF of the 8219 // main vector loop. 8220 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 8221 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8222 8223 Value *CheckMinIters = Builder.CreateICmp( 8224 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 8225 "min.iters.check"); 8226 8227 if (!ForEpilogue) 8228 TCCheckBlock->setName("vector.main.loop.iter.check"); 8229 8230 // Create new preheader for vector loop. 8231 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8232 DT, LI, nullptr, "vector.ph"); 8233 8234 if (ForEpilogue) { 8235 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8236 DT->getNode(Bypass)->getIDom()) && 8237 "TC check is expected to dominate Bypass"); 8238 8239 // Update dominator for Bypass & LoopExit. 8240 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8241 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8242 // For loops with multiple exits, there's no edge from the middle block 8243 // to exit blocks (as the epilogue must run) and thus no need to update 8244 // the immediate dominator of the exit blocks. 8245 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8246 8247 LoopBypassBlocks.push_back(TCCheckBlock); 8248 8249 // Save the trip count so we don't have to regenerate it in the 8250 // vec.epilog.iter.check. This is safe to do because the trip count 8251 // generated here dominates the vector epilog iter check. 8252 EPI.TripCount = Count; 8253 } 8254 8255 ReplaceInstWithInst( 8256 TCCheckBlock->getTerminator(), 8257 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8258 8259 return TCCheckBlock; 8260 } 8261 8262 //===--------------------------------------------------------------------===// 8263 // EpilogueVectorizerEpilogueLoop 8264 //===--------------------------------------------------------------------===// 8265 8266 /// This function is partially responsible for generating the control flow 8267 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8268 BasicBlock * 8269 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8270 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8271 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8272 8273 // Now, compare the remaining count and if there aren't enough iterations to 8274 // execute the vectorized epilogue skip to the scalar part. 8275 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8276 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8277 LoopVectorPreHeader = 8278 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8279 LI, nullptr, "vec.epilog.ph"); 8280 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8281 VecEpilogueIterationCountCheck); 8282 8283 // Adjust the control flow taking the state info from the main loop 8284 // vectorization into account. 8285 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8286 "expected this to be saved from the previous pass."); 8287 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8288 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8289 8290 DT->changeImmediateDominator(LoopVectorPreHeader, 8291 EPI.MainLoopIterationCountCheck); 8292 8293 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8294 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8295 8296 if (EPI.SCEVSafetyCheck) 8297 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8298 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8299 if (EPI.MemSafetyCheck) 8300 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8301 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8302 8303 DT->changeImmediateDominator( 8304 VecEpilogueIterationCountCheck, 8305 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8306 8307 DT->changeImmediateDominator(LoopScalarPreHeader, 8308 EPI.EpilogueIterationCountCheck); 8309 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8310 // If there is an epilogue which must run, there's no edge from the 8311 // middle block to exit blocks and thus no need to update the immediate 8312 // dominator of the exit blocks. 8313 DT->changeImmediateDominator(LoopExitBlock, 8314 EPI.EpilogueIterationCountCheck); 8315 8316 // Keep track of bypass blocks, as they feed start values to the induction 8317 // phis in the scalar loop preheader. 8318 if (EPI.SCEVSafetyCheck) 8319 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8320 if (EPI.MemSafetyCheck) 8321 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8322 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8323 8324 // Generate a resume induction for the vector epilogue and put it in the 8325 // vector epilogue preheader 8326 Type *IdxTy = Legal->getWidestInductionType(); 8327 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8328 LoopVectorPreHeader->getFirstNonPHI()); 8329 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8330 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8331 EPI.MainLoopIterationCountCheck); 8332 8333 // Generate the induction variable. 8334 OldInduction = Legal->getPrimaryInduction(); 8335 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8336 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8337 Value *StartIdx = EPResumeVal; 8338 Induction = 8339 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8340 getDebugLocFromInstOrOperands(OldInduction)); 8341 8342 // Generate induction resume values. These variables save the new starting 8343 // indexes for the scalar loop. They are used to test if there are any tail 8344 // iterations left once the vector loop has completed. 8345 // Note that when the vectorized epilogue is skipped due to iteration count 8346 // check, then the resume value for the induction variable comes from 8347 // the trip count of the main vector loop, hence passing the AdditionalBypass 8348 // argument. 8349 createInductionResumeValues(Lp, CountRoundDown, 8350 {VecEpilogueIterationCountCheck, 8351 EPI.VectorTripCount} /* AdditionalBypass */); 8352 8353 AddRuntimeUnrollDisableMetaData(Lp); 8354 return completeLoopSkeleton(Lp, OrigLoopID); 8355 } 8356 8357 BasicBlock * 8358 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8359 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8360 8361 assert(EPI.TripCount && 8362 "Expected trip count to have been safed in the first pass."); 8363 assert( 8364 (!isa<Instruction>(EPI.TripCount) || 8365 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8366 "saved trip count does not dominate insertion point."); 8367 Value *TC = EPI.TripCount; 8368 IRBuilder<> Builder(Insert->getTerminator()); 8369 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8370 8371 // Generate code to check if the loop's trip count is less than VF * UF of the 8372 // vector epilogue loop. 8373 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 8374 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8375 8376 Value *CheckMinIters = 8377 Builder.CreateICmp(P, Count, 8378 createStepForVF(Builder, Count->getType(), 8379 EPI.EpilogueVF, EPI.EpilogueUF), 8380 "min.epilog.iters.check"); 8381 8382 ReplaceInstWithInst( 8383 Insert->getTerminator(), 8384 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8385 8386 LoopBypassBlocks.push_back(Insert); 8387 return Insert; 8388 } 8389 8390 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8391 LLVM_DEBUG({ 8392 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8393 << "Epilogue Loop VF:" << EPI.EpilogueVF 8394 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8395 }); 8396 } 8397 8398 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8399 DEBUG_WITH_TYPE(VerboseDebug, { 8400 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n"; 8401 }); 8402 } 8403 8404 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8405 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8406 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8407 bool PredicateAtRangeStart = Predicate(Range.Start); 8408 8409 for (ElementCount TmpVF = Range.Start * 2; 8410 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8411 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8412 Range.End = TmpVF; 8413 break; 8414 } 8415 8416 return PredicateAtRangeStart; 8417 } 8418 8419 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8420 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8421 /// of VF's starting at a given VF and extending it as much as possible. Each 8422 /// vectorization decision can potentially shorten this sub-range during 8423 /// buildVPlan(). 8424 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8425 ElementCount MaxVF) { 8426 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8427 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8428 VFRange SubRange = {VF, MaxVFPlusOne}; 8429 VPlans.push_back(buildVPlan(SubRange)); 8430 VF = SubRange.End; 8431 } 8432 } 8433 8434 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8435 VPlanPtr &Plan) { 8436 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8437 8438 // Look for cached value. 8439 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8440 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8441 if (ECEntryIt != EdgeMaskCache.end()) 8442 return ECEntryIt->second; 8443 8444 VPValue *SrcMask = createBlockInMask(Src, Plan); 8445 8446 // The terminator has to be a branch inst! 8447 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8448 assert(BI && "Unexpected terminator found"); 8449 8450 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8451 return EdgeMaskCache[Edge] = SrcMask; 8452 8453 // If source is an exiting block, we know the exit edge is dynamically dead 8454 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8455 // adding uses of an otherwise potentially dead instruction. 8456 if (OrigLoop->isLoopExiting(Src)) 8457 return EdgeMaskCache[Edge] = SrcMask; 8458 8459 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8460 assert(EdgeMask && "No Edge Mask found for condition"); 8461 8462 if (BI->getSuccessor(0) != Dst) 8463 EdgeMask = Builder.createNot(EdgeMask); 8464 8465 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8466 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8467 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8468 // The select version does not introduce new UB if SrcMask is false and 8469 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8470 VPValue *False = Plan->getOrAddVPValue( 8471 ConstantInt::getFalse(BI->getCondition()->getType())); 8472 EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False); 8473 } 8474 8475 return EdgeMaskCache[Edge] = EdgeMask; 8476 } 8477 8478 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8479 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8480 8481 // Look for cached value. 8482 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8483 if (BCEntryIt != BlockMaskCache.end()) 8484 return BCEntryIt->second; 8485 8486 // All-one mask is modelled as no-mask following the convention for masked 8487 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8488 VPValue *BlockMask = nullptr; 8489 8490 if (OrigLoop->getHeader() == BB) { 8491 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8492 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8493 8494 // Create the block in mask as the first non-phi instruction in the block. 8495 VPBuilder::InsertPointGuard Guard(Builder); 8496 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8497 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8498 8499 // Introduce the early-exit compare IV <= BTC to form header block mask. 8500 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8501 // Start by constructing the desired canonical IV. 8502 VPValue *IV = nullptr; 8503 if (Legal->getPrimaryInduction()) 8504 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8505 else { 8506 auto *IVRecipe = new VPWidenCanonicalIVRecipe(); 8507 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8508 IV = IVRecipe; 8509 } 8510 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8511 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8512 8513 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8514 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8515 // as a second argument, we only pass the IV here and extract the 8516 // tripcount from the transform state where codegen of the VP instructions 8517 // happen. 8518 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8519 } else { 8520 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8521 } 8522 return BlockMaskCache[BB] = BlockMask; 8523 } 8524 8525 // This is the block mask. We OR all incoming edges. 8526 for (auto *Predecessor : predecessors(BB)) { 8527 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8528 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8529 return BlockMaskCache[BB] = EdgeMask; 8530 8531 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8532 BlockMask = EdgeMask; 8533 continue; 8534 } 8535 8536 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8537 } 8538 8539 return BlockMaskCache[BB] = BlockMask; 8540 } 8541 8542 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8543 ArrayRef<VPValue *> Operands, 8544 VFRange &Range, 8545 VPlanPtr &Plan) { 8546 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8547 "Must be called with either a load or store"); 8548 8549 auto willWiden = [&](ElementCount VF) -> bool { 8550 if (VF.isScalar()) 8551 return false; 8552 LoopVectorizationCostModel::InstWidening Decision = 8553 CM.getWideningDecision(I, VF); 8554 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8555 "CM decision should be taken at this point."); 8556 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8557 return true; 8558 if (CM.isScalarAfterVectorization(I, VF) || 8559 CM.isProfitableToScalarize(I, VF)) 8560 return false; 8561 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8562 }; 8563 8564 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8565 return nullptr; 8566 8567 VPValue *Mask = nullptr; 8568 if (Legal->isMaskRequired(I)) 8569 Mask = createBlockInMask(I->getParent(), Plan); 8570 8571 // Determine if the pointer operand of the access is either consecutive or 8572 // reverse consecutive. 8573 LoopVectorizationCostModel::InstWidening Decision = 8574 CM.getWideningDecision(I, Range.Start); 8575 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8576 bool Consecutive = 8577 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8578 8579 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8580 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8581 Consecutive, Reverse); 8582 8583 StoreInst *Store = cast<StoreInst>(I); 8584 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8585 Mask, Consecutive, Reverse); 8586 } 8587 8588 VPWidenIntOrFpInductionRecipe * 8589 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, 8590 ArrayRef<VPValue *> Operands) const { 8591 // Check if this is an integer or fp induction. If so, build the recipe that 8592 // produces its scalar and vector values. 8593 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8594 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8595 II.getKind() == InductionDescriptor::IK_FpInduction) { 8596 assert(II.getStartValue() == 8597 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8598 const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); 8599 return new VPWidenIntOrFpInductionRecipe( 8600 Phi, Operands[0], Casts.empty() ? nullptr : Casts.front()); 8601 } 8602 8603 return nullptr; 8604 } 8605 8606 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8607 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8608 VPlan &Plan) const { 8609 // Optimize the special case where the source is a constant integer 8610 // induction variable. Notice that we can only optimize the 'trunc' case 8611 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8612 // (c) other casts depend on pointer size. 8613 8614 // Determine whether \p K is a truncation based on an induction variable that 8615 // can be optimized. 8616 auto isOptimizableIVTruncate = 8617 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8618 return [=](ElementCount VF) -> bool { 8619 return CM.isOptimizableIVTruncate(K, VF); 8620 }; 8621 }; 8622 8623 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8624 isOptimizableIVTruncate(I), Range)) { 8625 8626 InductionDescriptor II = 8627 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 8628 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8629 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8630 Start, I); 8631 } 8632 return nullptr; 8633 } 8634 8635 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8636 ArrayRef<VPValue *> Operands, 8637 VPlanPtr &Plan) { 8638 // If all incoming values are equal, the incoming VPValue can be used directly 8639 // instead of creating a new VPBlendRecipe. 8640 VPValue *FirstIncoming = Operands[0]; 8641 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8642 return FirstIncoming == Inc; 8643 })) { 8644 return Operands[0]; 8645 } 8646 8647 // We know that all PHIs in non-header blocks are converted into selects, so 8648 // we don't have to worry about the insertion order and we can just use the 8649 // builder. At this point we generate the predication tree. There may be 8650 // duplications since this is a simple recursive scan, but future 8651 // optimizations will clean it up. 8652 SmallVector<VPValue *, 2> OperandsWithMask; 8653 unsigned NumIncoming = Phi->getNumIncomingValues(); 8654 8655 for (unsigned In = 0; In < NumIncoming; In++) { 8656 VPValue *EdgeMask = 8657 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8658 assert((EdgeMask || NumIncoming == 1) && 8659 "Multiple predecessors with one having a full mask"); 8660 OperandsWithMask.push_back(Operands[In]); 8661 if (EdgeMask) 8662 OperandsWithMask.push_back(EdgeMask); 8663 } 8664 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8665 } 8666 8667 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8668 ArrayRef<VPValue *> Operands, 8669 VFRange &Range) const { 8670 8671 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8672 [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); }, 8673 Range); 8674 8675 if (IsPredicated) 8676 return nullptr; 8677 8678 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8679 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8680 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8681 ID == Intrinsic::pseudoprobe || 8682 ID == Intrinsic::experimental_noalias_scope_decl)) 8683 return nullptr; 8684 8685 auto willWiden = [&](ElementCount VF) -> bool { 8686 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8687 // The following case may be scalarized depending on the VF. 8688 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8689 // version of the instruction. 8690 // Is it beneficial to perform intrinsic call compared to lib call? 8691 bool NeedToScalarize = false; 8692 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8693 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8694 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8695 return UseVectorIntrinsic || !NeedToScalarize; 8696 }; 8697 8698 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8699 return nullptr; 8700 8701 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8702 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8703 } 8704 8705 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8706 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8707 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8708 // Instruction should be widened, unless it is scalar after vectorization, 8709 // scalarization is profitable or it is predicated. 8710 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8711 return CM.isScalarAfterVectorization(I, VF) || 8712 CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I); 8713 }; 8714 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8715 Range); 8716 } 8717 8718 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8719 ArrayRef<VPValue *> Operands) const { 8720 auto IsVectorizableOpcode = [](unsigned Opcode) { 8721 switch (Opcode) { 8722 case Instruction::Add: 8723 case Instruction::And: 8724 case Instruction::AShr: 8725 case Instruction::BitCast: 8726 case Instruction::FAdd: 8727 case Instruction::FCmp: 8728 case Instruction::FDiv: 8729 case Instruction::FMul: 8730 case Instruction::FNeg: 8731 case Instruction::FPExt: 8732 case Instruction::FPToSI: 8733 case Instruction::FPToUI: 8734 case Instruction::FPTrunc: 8735 case Instruction::FRem: 8736 case Instruction::FSub: 8737 case Instruction::ICmp: 8738 case Instruction::IntToPtr: 8739 case Instruction::LShr: 8740 case Instruction::Mul: 8741 case Instruction::Or: 8742 case Instruction::PtrToInt: 8743 case Instruction::SDiv: 8744 case Instruction::Select: 8745 case Instruction::SExt: 8746 case Instruction::Shl: 8747 case Instruction::SIToFP: 8748 case Instruction::SRem: 8749 case Instruction::Sub: 8750 case Instruction::Trunc: 8751 case Instruction::UDiv: 8752 case Instruction::UIToFP: 8753 case Instruction::URem: 8754 case Instruction::Xor: 8755 case Instruction::ZExt: 8756 return true; 8757 } 8758 return false; 8759 }; 8760 8761 if (!IsVectorizableOpcode(I->getOpcode())) 8762 return nullptr; 8763 8764 // Success: widen this instruction. 8765 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8766 } 8767 8768 void VPRecipeBuilder::fixHeaderPhis() { 8769 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8770 for (VPWidenPHIRecipe *R : PhisToFix) { 8771 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8772 VPRecipeBase *IncR = 8773 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8774 R->addOperand(IncR->getVPSingleValue()); 8775 } 8776 } 8777 8778 VPBasicBlock *VPRecipeBuilder::handleReplication( 8779 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8780 VPlanPtr &Plan) { 8781 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8782 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8783 Range); 8784 8785 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8786 [&](ElementCount VF) { return CM.isPredicatedInst(I, IsUniform); }, 8787 Range); 8788 8789 // Even if the instruction is not marked as uniform, there are certain 8790 // intrinsic calls that can be effectively treated as such, so we check for 8791 // them here. Conservatively, we only do this for scalable vectors, since 8792 // for fixed-width VFs we can always fall back on full scalarization. 8793 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 8794 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 8795 case Intrinsic::assume: 8796 case Intrinsic::lifetime_start: 8797 case Intrinsic::lifetime_end: 8798 // For scalable vectors if one of the operands is variant then we still 8799 // want to mark as uniform, which will generate one instruction for just 8800 // the first lane of the vector. We can't scalarize the call in the same 8801 // way as for fixed-width vectors because we don't know how many lanes 8802 // there are. 8803 // 8804 // The reasons for doing it this way for scalable vectors are: 8805 // 1. For the assume intrinsic generating the instruction for the first 8806 // lane is still be better than not generating any at all. For 8807 // example, the input may be a splat across all lanes. 8808 // 2. For the lifetime start/end intrinsics the pointer operand only 8809 // does anything useful when the input comes from a stack object, 8810 // which suggests it should always be uniform. For non-stack objects 8811 // the effect is to poison the object, which still allows us to 8812 // remove the call. 8813 IsUniform = true; 8814 break; 8815 default: 8816 break; 8817 } 8818 } 8819 8820 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8821 IsUniform, IsPredicated); 8822 setRecipe(I, Recipe); 8823 Plan->addVPValue(I, Recipe); 8824 8825 // Find if I uses a predicated instruction. If so, it will use its scalar 8826 // value. Avoid hoisting the insert-element which packs the scalar value into 8827 // a vector value, as that happens iff all users use the vector value. 8828 for (VPValue *Op : Recipe->operands()) { 8829 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8830 if (!PredR) 8831 continue; 8832 auto *RepR = 8833 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8834 assert(RepR->isPredicated() && 8835 "expected Replicate recipe to be predicated"); 8836 RepR->setAlsoPack(false); 8837 } 8838 8839 // Finalize the recipe for Instr, first if it is not predicated. 8840 if (!IsPredicated) { 8841 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8842 VPBB->appendRecipe(Recipe); 8843 return VPBB; 8844 } 8845 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8846 assert(VPBB->getSuccessors().empty() && 8847 "VPBB has successors when handling predicated replication."); 8848 // Record predicated instructions for above packing optimizations. 8849 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8850 VPBlockUtils::insertBlockAfter(Region, VPBB); 8851 auto *RegSucc = new VPBasicBlock(); 8852 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8853 return RegSucc; 8854 } 8855 8856 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8857 VPRecipeBase *PredRecipe, 8858 VPlanPtr &Plan) { 8859 // Instructions marked for predication are replicated and placed under an 8860 // if-then construct to prevent side-effects. 8861 8862 // Generate recipes to compute the block mask for this region. 8863 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8864 8865 // Build the triangular if-then region. 8866 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8867 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8868 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8869 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8870 auto *PHIRecipe = Instr->getType()->isVoidTy() 8871 ? nullptr 8872 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8873 if (PHIRecipe) { 8874 Plan->removeVPValueFor(Instr); 8875 Plan->addVPValue(Instr, PHIRecipe); 8876 } 8877 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8878 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8879 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8880 8881 // Note: first set Entry as region entry and then connect successors starting 8882 // from it in order, to propagate the "parent" of each VPBasicBlock. 8883 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8884 VPBlockUtils::connectBlocks(Pred, Exit); 8885 8886 return Region; 8887 } 8888 8889 VPRecipeOrVPValueTy 8890 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8891 ArrayRef<VPValue *> Operands, 8892 VFRange &Range, VPlanPtr &Plan) { 8893 // First, check for specific widening recipes that deal with calls, memory 8894 // operations, inductions and Phi nodes. 8895 if (auto *CI = dyn_cast<CallInst>(Instr)) 8896 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8897 8898 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8899 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8900 8901 VPRecipeBase *Recipe; 8902 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8903 if (Phi->getParent() != OrigLoop->getHeader()) 8904 return tryToBlend(Phi, Operands, Plan); 8905 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands))) 8906 return toVPRecipeResult(Recipe); 8907 8908 VPWidenPHIRecipe *PhiRecipe = nullptr; 8909 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 8910 VPValue *StartV = Operands[0]; 8911 if (Legal->isReductionVariable(Phi)) { 8912 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 8913 assert(RdxDesc.getRecurrenceStartValue() == 8914 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8915 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 8916 CM.isInLoopReduction(Phi), 8917 CM.useOrderedReductions(RdxDesc)); 8918 } else { 8919 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 8920 } 8921 8922 // Record the incoming value from the backedge, so we can add the incoming 8923 // value from the backedge after all recipes have been created. 8924 recordRecipeOf(cast<Instruction>( 8925 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8926 PhisToFix.push_back(PhiRecipe); 8927 } else { 8928 // TODO: record start and backedge value for remaining pointer induction 8929 // phis. 8930 assert(Phi->getType()->isPointerTy() && 8931 "only pointer phis should be handled here"); 8932 PhiRecipe = new VPWidenPHIRecipe(Phi); 8933 } 8934 8935 return toVPRecipeResult(PhiRecipe); 8936 } 8937 8938 if (isa<TruncInst>(Instr) && 8939 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8940 Range, *Plan))) 8941 return toVPRecipeResult(Recipe); 8942 8943 if (!shouldWiden(Instr, Range)) 8944 return nullptr; 8945 8946 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8947 return toVPRecipeResult(new VPWidenGEPRecipe( 8948 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8949 8950 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8951 bool InvariantCond = 8952 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8953 return toVPRecipeResult(new VPWidenSelectRecipe( 8954 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8955 } 8956 8957 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8958 } 8959 8960 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8961 ElementCount MaxVF) { 8962 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8963 8964 // Collect instructions from the original loop that will become trivially dead 8965 // in the vectorized loop. We don't need to vectorize these instructions. For 8966 // example, original induction update instructions can become dead because we 8967 // separately emit induction "steps" when generating code for the new loop. 8968 // Similarly, we create a new latch condition when setting up the structure 8969 // of the new loop, so the old one can become dead. 8970 SmallPtrSet<Instruction *, 4> DeadInstructions; 8971 collectTriviallyDeadInstructions(DeadInstructions); 8972 8973 // Add assume instructions we need to drop to DeadInstructions, to prevent 8974 // them from being added to the VPlan. 8975 // TODO: We only need to drop assumes in blocks that get flattend. If the 8976 // control flow is preserved, we should keep them. 8977 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8978 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8979 8980 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8981 // Dead instructions do not need sinking. Remove them from SinkAfter. 8982 for (Instruction *I : DeadInstructions) 8983 SinkAfter.erase(I); 8984 8985 // Cannot sink instructions after dead instructions (there won't be any 8986 // recipes for them). Instead, find the first non-dead previous instruction. 8987 for (auto &P : Legal->getSinkAfter()) { 8988 Instruction *SinkTarget = P.second; 8989 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 8990 (void)FirstInst; 8991 while (DeadInstructions.contains(SinkTarget)) { 8992 assert( 8993 SinkTarget != FirstInst && 8994 "Must find a live instruction (at least the one feeding the " 8995 "first-order recurrence PHI) before reaching beginning of the block"); 8996 SinkTarget = SinkTarget->getPrevNode(); 8997 assert(SinkTarget != P.first && 8998 "sink source equals target, no sinking required"); 8999 } 9000 P.second = SinkTarget; 9001 } 9002 9003 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 9004 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 9005 VFRange SubRange = {VF, MaxVFPlusOne}; 9006 VPlans.push_back( 9007 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 9008 VF = SubRange.End; 9009 } 9010 } 9011 9012 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 9013 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 9014 const MapVector<Instruction *, Instruction *> &SinkAfter) { 9015 9016 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 9017 9018 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 9019 9020 // --------------------------------------------------------------------------- 9021 // Pre-construction: record ingredients whose recipes we'll need to further 9022 // process after constructing the initial VPlan. 9023 // --------------------------------------------------------------------------- 9024 9025 // Mark instructions we'll need to sink later and their targets as 9026 // ingredients whose recipe we'll need to record. 9027 for (auto &Entry : SinkAfter) { 9028 RecipeBuilder.recordRecipeOf(Entry.first); 9029 RecipeBuilder.recordRecipeOf(Entry.second); 9030 } 9031 for (auto &Reduction : CM.getInLoopReductionChains()) { 9032 PHINode *Phi = Reduction.first; 9033 RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); 9034 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9035 9036 RecipeBuilder.recordRecipeOf(Phi); 9037 for (auto &R : ReductionOperations) { 9038 RecipeBuilder.recordRecipeOf(R); 9039 // For min/max reducitons, where we have a pair of icmp/select, we also 9040 // need to record the ICmp recipe, so it can be removed later. 9041 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9042 "Only min/max recurrences allowed for inloop reductions"); 9043 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 9044 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 9045 } 9046 } 9047 9048 // For each interleave group which is relevant for this (possibly trimmed) 9049 // Range, add it to the set of groups to be later applied to the VPlan and add 9050 // placeholders for its members' Recipes which we'll be replacing with a 9051 // single VPInterleaveRecipe. 9052 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 9053 auto applyIG = [IG, this](ElementCount VF) -> bool { 9054 return (VF.isVector() && // Query is illegal for VF == 1 9055 CM.getWideningDecision(IG->getInsertPos(), VF) == 9056 LoopVectorizationCostModel::CM_Interleave); 9057 }; 9058 if (!getDecisionAndClampRange(applyIG, Range)) 9059 continue; 9060 InterleaveGroups.insert(IG); 9061 for (unsigned i = 0; i < IG->getFactor(); i++) 9062 if (Instruction *Member = IG->getMember(i)) 9063 RecipeBuilder.recordRecipeOf(Member); 9064 }; 9065 9066 // --------------------------------------------------------------------------- 9067 // Build initial VPlan: Scan the body of the loop in a topological order to 9068 // visit each basic block after having visited its predecessor basic blocks. 9069 // --------------------------------------------------------------------------- 9070 9071 auto Plan = std::make_unique<VPlan>(); 9072 9073 // Scan the body of the loop in a topological order to visit each basic block 9074 // after having visited its predecessor basic blocks. 9075 LoopBlocksDFS DFS(OrigLoop); 9076 DFS.perform(LI); 9077 9078 VPBasicBlock *VPBB = nullptr; 9079 VPBasicBlock *HeaderVPBB = nullptr; 9080 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 9081 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 9082 // Relevant instructions from basic block BB will be grouped into VPRecipe 9083 // ingredients and fill a new VPBasicBlock. 9084 unsigned VPBBsForBB = 0; 9085 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 9086 if (VPBB) 9087 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 9088 else { 9089 auto *TopRegion = new VPRegionBlock("vector loop"); 9090 TopRegion->setEntry(FirstVPBBForBB); 9091 Plan->setEntry(TopRegion); 9092 HeaderVPBB = FirstVPBBForBB; 9093 } 9094 VPBB = FirstVPBBForBB; 9095 Builder.setInsertPoint(VPBB); 9096 9097 // Introduce each ingredient into VPlan. 9098 // TODO: Model and preserve debug instrinsics in VPlan. 9099 for (Instruction &I : BB->instructionsWithoutDebug()) { 9100 Instruction *Instr = &I; 9101 9102 // First filter out irrelevant instructions, to ensure no recipes are 9103 // built for them. 9104 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 9105 continue; 9106 9107 SmallVector<VPValue *, 4> Operands; 9108 auto *Phi = dyn_cast<PHINode>(Instr); 9109 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 9110 Operands.push_back(Plan->getOrAddVPValue( 9111 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 9112 } else { 9113 auto OpRange = Plan->mapToVPValues(Instr->operands()); 9114 Operands = {OpRange.begin(), OpRange.end()}; 9115 } 9116 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 9117 Instr, Operands, Range, Plan)) { 9118 // If Instr can be simplified to an existing VPValue, use it. 9119 if (RecipeOrValue.is<VPValue *>()) { 9120 auto *VPV = RecipeOrValue.get<VPValue *>(); 9121 Plan->addVPValue(Instr, VPV); 9122 // If the re-used value is a recipe, register the recipe for the 9123 // instruction, in case the recipe for Instr needs to be recorded. 9124 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 9125 RecipeBuilder.setRecipe(Instr, R); 9126 continue; 9127 } 9128 // Otherwise, add the new recipe. 9129 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 9130 for (auto *Def : Recipe->definedValues()) { 9131 auto *UV = Def->getUnderlyingValue(); 9132 Plan->addVPValue(UV, Def); 9133 } 9134 9135 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 9136 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 9137 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 9138 // of the header block. That can happen for truncates of induction 9139 // variables. Those recipes are moved to the phi section of the header 9140 // block after applying SinkAfter, which relies on the original 9141 // position of the trunc. 9142 assert(isa<TruncInst>(Instr)); 9143 InductionsToMove.push_back( 9144 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 9145 } 9146 RecipeBuilder.setRecipe(Instr, Recipe); 9147 VPBB->appendRecipe(Recipe); 9148 continue; 9149 } 9150 9151 // Otherwise, if all widening options failed, Instruction is to be 9152 // replicated. This may create a successor for VPBB. 9153 VPBasicBlock *NextVPBB = 9154 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 9155 if (NextVPBB != VPBB) { 9156 VPBB = NextVPBB; 9157 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 9158 : ""); 9159 } 9160 } 9161 } 9162 9163 assert(isa<VPRegionBlock>(Plan->getEntry()) && 9164 !Plan->getEntry()->getEntryBasicBlock()->empty() && 9165 "entry block must be set to a VPRegionBlock having a non-empty entry " 9166 "VPBasicBlock"); 9167 RecipeBuilder.fixHeaderPhis(); 9168 9169 // --------------------------------------------------------------------------- 9170 // Transform initial VPlan: Apply previously taken decisions, in order, to 9171 // bring the VPlan to its final state. 9172 // --------------------------------------------------------------------------- 9173 9174 // Apply Sink-After legal constraints. 9175 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 9176 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 9177 if (Region && Region->isReplicator()) { 9178 assert(Region->getNumSuccessors() == 1 && 9179 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 9180 assert(R->getParent()->size() == 1 && 9181 "A recipe in an original replicator region must be the only " 9182 "recipe in its block"); 9183 return Region; 9184 } 9185 return nullptr; 9186 }; 9187 for (auto &Entry : SinkAfter) { 9188 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 9189 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 9190 9191 auto *TargetRegion = GetReplicateRegion(Target); 9192 auto *SinkRegion = GetReplicateRegion(Sink); 9193 if (!SinkRegion) { 9194 // If the sink source is not a replicate region, sink the recipe directly. 9195 if (TargetRegion) { 9196 // The target is in a replication region, make sure to move Sink to 9197 // the block after it, not into the replication region itself. 9198 VPBasicBlock *NextBlock = 9199 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 9200 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 9201 } else 9202 Sink->moveAfter(Target); 9203 continue; 9204 } 9205 9206 // The sink source is in a replicate region. Unhook the region from the CFG. 9207 auto *SinkPred = SinkRegion->getSinglePredecessor(); 9208 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 9209 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 9210 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 9211 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 9212 9213 if (TargetRegion) { 9214 // The target recipe is also in a replicate region, move the sink region 9215 // after the target region. 9216 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 9217 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 9218 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 9219 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 9220 } else { 9221 // The sink source is in a replicate region, we need to move the whole 9222 // replicate region, which should only contain a single recipe in the 9223 // main block. 9224 auto *SplitBlock = 9225 Target->getParent()->splitAt(std::next(Target->getIterator())); 9226 9227 auto *SplitPred = SplitBlock->getSinglePredecessor(); 9228 9229 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 9230 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 9231 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 9232 if (VPBB == SplitPred) 9233 VPBB = SplitBlock; 9234 } 9235 } 9236 9237 cast<VPRegionBlock>(Plan->getEntry())->setExit(VPBB); 9238 9239 // Now that sink-after is done, move induction recipes for optimized truncates 9240 // to the phi section of the header block. 9241 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 9242 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 9243 9244 // Adjust the recipes for any inloop reductions. 9245 adjustRecipesForReductions(VPBB, Plan, RecipeBuilder, Range.Start); 9246 9247 // Introduce a recipe to combine the incoming and previous values of a 9248 // first-order recurrence. 9249 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9250 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 9251 if (!RecurPhi) 9252 continue; 9253 9254 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 9255 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 9256 auto *Region = GetReplicateRegion(PrevRecipe); 9257 if (Region) 9258 InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor()); 9259 if (Region || PrevRecipe->isPhi()) 9260 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 9261 else 9262 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 9263 9264 auto *RecurSplice = cast<VPInstruction>( 9265 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 9266 {RecurPhi, RecurPhi->getBackedgeValue()})); 9267 9268 RecurPhi->replaceAllUsesWith(RecurSplice); 9269 // Set the first operand of RecurSplice to RecurPhi again, after replacing 9270 // all users. 9271 RecurSplice->setOperand(0, RecurPhi); 9272 } 9273 9274 // Interleave memory: for each Interleave Group we marked earlier as relevant 9275 // for this VPlan, replace the Recipes widening its memory instructions with a 9276 // single VPInterleaveRecipe at its insertion point. 9277 for (auto IG : InterleaveGroups) { 9278 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9279 RecipeBuilder.getRecipe(IG->getInsertPos())); 9280 SmallVector<VPValue *, 4> StoredValues; 9281 for (unsigned i = 0; i < IG->getFactor(); ++i) 9282 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 9283 auto *StoreR = 9284 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 9285 StoredValues.push_back(StoreR->getStoredValue()); 9286 } 9287 9288 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9289 Recipe->getMask()); 9290 VPIG->insertBefore(Recipe); 9291 unsigned J = 0; 9292 for (unsigned i = 0; i < IG->getFactor(); ++i) 9293 if (Instruction *Member = IG->getMember(i)) { 9294 if (!Member->getType()->isVoidTy()) { 9295 VPValue *OriginalV = Plan->getVPValue(Member); 9296 Plan->removeVPValueFor(Member); 9297 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9298 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9299 J++; 9300 } 9301 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9302 } 9303 } 9304 9305 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9306 // in ways that accessing values using original IR values is incorrect. 9307 Plan->disableValue2VPValue(); 9308 9309 VPlanTransforms::sinkScalarOperands(*Plan); 9310 VPlanTransforms::mergeReplicateRegions(*Plan); 9311 9312 std::string PlanName; 9313 raw_string_ostream RSO(PlanName); 9314 ElementCount VF = Range.Start; 9315 Plan->addVF(VF); 9316 RSO << "Initial VPlan for VF={" << VF; 9317 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9318 Plan->addVF(VF); 9319 RSO << "," << VF; 9320 } 9321 RSO << "},UF>=1"; 9322 RSO.flush(); 9323 Plan->setName(PlanName); 9324 9325 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9326 return Plan; 9327 } 9328 9329 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9330 // Outer loop handling: They may require CFG and instruction level 9331 // transformations before even evaluating whether vectorization is profitable. 9332 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9333 // the vectorization pipeline. 9334 assert(!OrigLoop->isInnermost()); 9335 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9336 9337 // Create new empty VPlan 9338 auto Plan = std::make_unique<VPlan>(); 9339 9340 // Build hierarchical CFG 9341 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9342 HCFGBuilder.buildHierarchicalCFG(); 9343 9344 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9345 VF *= 2) 9346 Plan->addVF(VF); 9347 9348 if (EnableVPlanPredication) { 9349 VPlanPredicator VPP(*Plan); 9350 VPP.predicate(); 9351 9352 // Avoid running transformation to recipes until masked code generation in 9353 // VPlan-native path is in place. 9354 return Plan; 9355 } 9356 9357 SmallPtrSet<Instruction *, 1> DeadInstructions; 9358 VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan, 9359 Legal->getInductionVars(), 9360 DeadInstructions, *PSE.getSE()); 9361 return Plan; 9362 } 9363 9364 // Adjust the recipes for reductions. For in-loop reductions the chain of 9365 // instructions leading from the loop exit instr to the phi need to be converted 9366 // to reductions, with one operand being vector and the other being the scalar 9367 // reduction chain. For other reductions, a select is introduced between the phi 9368 // and live-out recipes when folding the tail. 9369 void LoopVectorizationPlanner::adjustRecipesForReductions( 9370 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9371 ElementCount MinVF) { 9372 for (auto &Reduction : CM.getInLoopReductionChains()) { 9373 PHINode *Phi = Reduction.first; 9374 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 9375 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9376 9377 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9378 continue; 9379 9380 // ReductionOperations are orders top-down from the phi's use to the 9381 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9382 // which of the two operands will remain scalar and which will be reduced. 9383 // For minmax the chain will be the select instructions. 9384 Instruction *Chain = Phi; 9385 for (Instruction *R : ReductionOperations) { 9386 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9387 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9388 9389 VPValue *ChainOp = Plan->getVPValue(Chain); 9390 unsigned FirstOpId; 9391 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9392 "Only min/max recurrences allowed for inloop reductions"); 9393 // Recognize a call to the llvm.fmuladd intrinsic. 9394 bool IsFMulAdd = (Kind == RecurKind::FMulAdd); 9395 assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) && 9396 "Expected instruction to be a call to the llvm.fmuladd intrinsic"); 9397 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9398 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9399 "Expected to replace a VPWidenSelectSC"); 9400 FirstOpId = 1; 9401 } else { 9402 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) || 9403 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) && 9404 "Expected to replace a VPWidenSC"); 9405 FirstOpId = 0; 9406 } 9407 unsigned VecOpId = 9408 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9409 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9410 9411 auto *CondOp = CM.foldTailByMasking() 9412 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9413 : nullptr; 9414 9415 if (IsFMulAdd) { 9416 // If the instruction is a call to the llvm.fmuladd intrinsic then we 9417 // need to create an fmul recipe to use as the vector operand for the 9418 // fadd reduction. 9419 VPInstruction *FMulRecipe = new VPInstruction( 9420 Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))}); 9421 FMulRecipe->setFastMathFlags(R->getFastMathFlags()); 9422 WidenRecipe->getParent()->insert(FMulRecipe, 9423 WidenRecipe->getIterator()); 9424 VecOp = FMulRecipe; 9425 } 9426 VPReductionRecipe *RedRecipe = 9427 new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9428 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9429 Plan->removeVPValueFor(R); 9430 Plan->addVPValue(R, RedRecipe); 9431 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9432 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9433 WidenRecipe->eraseFromParent(); 9434 9435 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9436 VPRecipeBase *CompareRecipe = 9437 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9438 assert(isa<VPWidenRecipe>(CompareRecipe) && 9439 "Expected to replace a VPWidenSC"); 9440 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9441 "Expected no remaining users"); 9442 CompareRecipe->eraseFromParent(); 9443 } 9444 Chain = R; 9445 } 9446 } 9447 9448 // If tail is folded by masking, introduce selects between the phi 9449 // and the live-out instruction of each reduction, at the end of the latch. 9450 if (CM.foldTailByMasking()) { 9451 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9452 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9453 if (!PhiR || PhiR->isInLoop()) 9454 continue; 9455 Builder.setInsertPoint(LatchVPBB); 9456 VPValue *Cond = 9457 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9458 VPValue *Red = PhiR->getBackedgeValue(); 9459 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9460 } 9461 } 9462 } 9463 9464 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9465 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9466 VPSlotTracker &SlotTracker) const { 9467 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9468 IG->getInsertPos()->printAsOperand(O, false); 9469 O << ", "; 9470 getAddr()->printAsOperand(O, SlotTracker); 9471 VPValue *Mask = getMask(); 9472 if (Mask) { 9473 O << ", "; 9474 Mask->printAsOperand(O, SlotTracker); 9475 } 9476 9477 unsigned OpIdx = 0; 9478 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9479 if (!IG->getMember(i)) 9480 continue; 9481 if (getNumStoreOperands() > 0) { 9482 O << "\n" << Indent << " store "; 9483 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9484 O << " to index " << i; 9485 } else { 9486 O << "\n" << Indent << " "; 9487 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9488 O << " = load from index " << i; 9489 } 9490 ++OpIdx; 9491 } 9492 } 9493 #endif 9494 9495 void VPWidenCallRecipe::execute(VPTransformState &State) { 9496 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9497 *this, State); 9498 } 9499 9500 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9501 auto &I = *cast<SelectInst>(getUnderlyingInstr()); 9502 State.ILV->setDebugLocFromInst(&I); 9503 9504 // The condition can be loop invariant but still defined inside the 9505 // loop. This means that we can't just use the original 'cond' value. 9506 // We have to take the 'vectorized' value and pick the first lane. 9507 // Instcombine will make this a no-op. 9508 auto *InvarCond = 9509 InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr; 9510 9511 for (unsigned Part = 0; Part < State.UF; ++Part) { 9512 Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part); 9513 Value *Op0 = State.get(getOperand(1), Part); 9514 Value *Op1 = State.get(getOperand(2), Part); 9515 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1); 9516 State.set(this, Sel, Part); 9517 State.ILV->addMetadata(Sel, &I); 9518 } 9519 } 9520 9521 void VPWidenRecipe::execute(VPTransformState &State) { 9522 auto &I = *cast<Instruction>(getUnderlyingValue()); 9523 auto &Builder = State.Builder; 9524 switch (I.getOpcode()) { 9525 case Instruction::Call: 9526 case Instruction::Br: 9527 case Instruction::PHI: 9528 case Instruction::GetElementPtr: 9529 case Instruction::Select: 9530 llvm_unreachable("This instruction is handled by a different recipe."); 9531 case Instruction::UDiv: 9532 case Instruction::SDiv: 9533 case Instruction::SRem: 9534 case Instruction::URem: 9535 case Instruction::Add: 9536 case Instruction::FAdd: 9537 case Instruction::Sub: 9538 case Instruction::FSub: 9539 case Instruction::FNeg: 9540 case Instruction::Mul: 9541 case Instruction::FMul: 9542 case Instruction::FDiv: 9543 case Instruction::FRem: 9544 case Instruction::Shl: 9545 case Instruction::LShr: 9546 case Instruction::AShr: 9547 case Instruction::And: 9548 case Instruction::Or: 9549 case Instruction::Xor: { 9550 // Just widen unops and binops. 9551 State.ILV->setDebugLocFromInst(&I); 9552 9553 for (unsigned Part = 0; Part < State.UF; ++Part) { 9554 SmallVector<Value *, 2> Ops; 9555 for (VPValue *VPOp : operands()) 9556 Ops.push_back(State.get(VPOp, Part)); 9557 9558 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 9559 9560 if (auto *VecOp = dyn_cast<Instruction>(V)) { 9561 VecOp->copyIRFlags(&I); 9562 9563 // If the instruction is vectorized and was in a basic block that needed 9564 // predication, we can't propagate poison-generating flags (nuw/nsw, 9565 // exact, etc.). The control flow has been linearized and the 9566 // instruction is no longer guarded by the predicate, which could make 9567 // the flag properties to no longer hold. 9568 if (State.MayGeneratePoisonRecipes.count(this) > 0) 9569 VecOp->dropPoisonGeneratingFlags(); 9570 } 9571 9572 // Use this vector value for all users of the original instruction. 9573 State.set(this, V, Part); 9574 State.ILV->addMetadata(V, &I); 9575 } 9576 9577 break; 9578 } 9579 case Instruction::ICmp: 9580 case Instruction::FCmp: { 9581 // Widen compares. Generate vector compares. 9582 bool FCmp = (I.getOpcode() == Instruction::FCmp); 9583 auto *Cmp = cast<CmpInst>(&I); 9584 State.ILV->setDebugLocFromInst(Cmp); 9585 for (unsigned Part = 0; Part < State.UF; ++Part) { 9586 Value *A = State.get(getOperand(0), Part); 9587 Value *B = State.get(getOperand(1), Part); 9588 Value *C = nullptr; 9589 if (FCmp) { 9590 // Propagate fast math flags. 9591 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9592 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 9593 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 9594 } else { 9595 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 9596 } 9597 State.set(this, C, Part); 9598 State.ILV->addMetadata(C, &I); 9599 } 9600 9601 break; 9602 } 9603 9604 case Instruction::ZExt: 9605 case Instruction::SExt: 9606 case Instruction::FPToUI: 9607 case Instruction::FPToSI: 9608 case Instruction::FPExt: 9609 case Instruction::PtrToInt: 9610 case Instruction::IntToPtr: 9611 case Instruction::SIToFP: 9612 case Instruction::UIToFP: 9613 case Instruction::Trunc: 9614 case Instruction::FPTrunc: 9615 case Instruction::BitCast: { 9616 auto *CI = cast<CastInst>(&I); 9617 State.ILV->setDebugLocFromInst(CI); 9618 9619 /// Vectorize casts. 9620 Type *DestTy = (State.VF.isScalar()) 9621 ? CI->getType() 9622 : VectorType::get(CI->getType(), State.VF); 9623 9624 for (unsigned Part = 0; Part < State.UF; ++Part) { 9625 Value *A = State.get(getOperand(0), Part); 9626 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 9627 State.set(this, Cast, Part); 9628 State.ILV->addMetadata(Cast, &I); 9629 } 9630 break; 9631 } 9632 default: 9633 // This instruction is not vectorized by simple widening. 9634 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 9635 llvm_unreachable("Unhandled instruction!"); 9636 } // end of switch. 9637 } 9638 9639 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9640 auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr()); 9641 // Construct a vector GEP by widening the operands of the scalar GEP as 9642 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 9643 // results in a vector of pointers when at least one operand of the GEP 9644 // is vector-typed. Thus, to keep the representation compact, we only use 9645 // vector-typed operands for loop-varying values. 9646 9647 if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 9648 // If we are vectorizing, but the GEP has only loop-invariant operands, 9649 // the GEP we build (by only using vector-typed operands for 9650 // loop-varying values) would be a scalar pointer. Thus, to ensure we 9651 // produce a vector of pointers, we need to either arbitrarily pick an 9652 // operand to broadcast, or broadcast a clone of the original GEP. 9653 // Here, we broadcast a clone of the original. 9654 // 9655 // TODO: If at some point we decide to scalarize instructions having 9656 // loop-invariant operands, this special case will no longer be 9657 // required. We would add the scalarization decision to 9658 // collectLoopScalars() and teach getVectorValue() to broadcast 9659 // the lane-zero scalar value. 9660 auto *Clone = State.Builder.Insert(GEP->clone()); 9661 for (unsigned Part = 0; Part < State.UF; ++Part) { 9662 Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone); 9663 State.set(this, EntryPart, Part); 9664 State.ILV->addMetadata(EntryPart, GEP); 9665 } 9666 } else { 9667 // If the GEP has at least one loop-varying operand, we are sure to 9668 // produce a vector of pointers. But if we are only unrolling, we want 9669 // to produce a scalar GEP for each unroll part. Thus, the GEP we 9670 // produce with the code below will be scalar (if VF == 1) or vector 9671 // (otherwise). Note that for the unroll-only case, we still maintain 9672 // values in the vector mapping with initVector, as we do for other 9673 // instructions. 9674 for (unsigned Part = 0; Part < State.UF; ++Part) { 9675 // The pointer operand of the new GEP. If it's loop-invariant, we 9676 // won't broadcast it. 9677 auto *Ptr = IsPtrLoopInvariant 9678 ? State.get(getOperand(0), VPIteration(0, 0)) 9679 : State.get(getOperand(0), Part); 9680 9681 // Collect all the indices for the new GEP. If any index is 9682 // loop-invariant, we won't broadcast it. 9683 SmallVector<Value *, 4> Indices; 9684 for (unsigned I = 1, E = getNumOperands(); I < E; I++) { 9685 VPValue *Operand = getOperand(I); 9686 if (IsIndexLoopInvariant[I - 1]) 9687 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 9688 else 9689 Indices.push_back(State.get(Operand, Part)); 9690 } 9691 9692 // If the GEP instruction is vectorized and was in a basic block that 9693 // needed predication, we can't propagate the poison-generating 'inbounds' 9694 // flag. The control flow has been linearized and the GEP is no longer 9695 // guarded by the predicate, which could make the 'inbounds' properties to 9696 // no longer hold. 9697 bool IsInBounds = 9698 GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0; 9699 9700 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 9701 // but it should be a vector, otherwise. 9702 auto *NewGEP = IsInBounds 9703 ? State.Builder.CreateInBoundsGEP( 9704 GEP->getSourceElementType(), Ptr, Indices) 9705 : State.Builder.CreateGEP(GEP->getSourceElementType(), 9706 Ptr, Indices); 9707 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) && 9708 "NewGEP is not a pointer vector"); 9709 State.set(this, NewGEP, Part); 9710 State.ILV->addMetadata(NewGEP, GEP); 9711 } 9712 } 9713 } 9714 9715 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9716 assert(!State.Instance && "Int or FP induction being replicated."); 9717 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 9718 getTruncInst(), getVPValue(0), 9719 getCastValue(), State); 9720 } 9721 9722 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9723 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9724 State); 9725 } 9726 9727 void VPBlendRecipe::execute(VPTransformState &State) { 9728 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9729 // We know that all PHIs in non-header blocks are converted into 9730 // selects, so we don't have to worry about the insertion order and we 9731 // can just use the builder. 9732 // At this point we generate the predication tree. There may be 9733 // duplications since this is a simple recursive scan, but future 9734 // optimizations will clean it up. 9735 9736 unsigned NumIncoming = getNumIncomingValues(); 9737 9738 // Generate a sequence of selects of the form: 9739 // SELECT(Mask3, In3, 9740 // SELECT(Mask2, In2, 9741 // SELECT(Mask1, In1, 9742 // In0))) 9743 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9744 // are essentially undef are taken from In0. 9745 InnerLoopVectorizer::VectorParts Entry(State.UF); 9746 for (unsigned In = 0; In < NumIncoming; ++In) { 9747 for (unsigned Part = 0; Part < State.UF; ++Part) { 9748 // We might have single edge PHIs (blocks) - use an identity 9749 // 'select' for the first PHI operand. 9750 Value *In0 = State.get(getIncomingValue(In), Part); 9751 if (In == 0) 9752 Entry[Part] = In0; // Initialize with the first incoming value. 9753 else { 9754 // Select between the current value and the previous incoming edge 9755 // based on the incoming mask. 9756 Value *Cond = State.get(getMask(In), Part); 9757 Entry[Part] = 9758 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9759 } 9760 } 9761 } 9762 for (unsigned Part = 0; Part < State.UF; ++Part) 9763 State.set(this, Entry[Part], Part); 9764 } 9765 9766 void VPInterleaveRecipe::execute(VPTransformState &State) { 9767 assert(!State.Instance && "Interleave group being replicated."); 9768 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9769 getStoredValues(), getMask()); 9770 } 9771 9772 void VPReductionRecipe::execute(VPTransformState &State) { 9773 assert(!State.Instance && "Reduction being replicated."); 9774 Value *PrevInChain = State.get(getChainOp(), 0); 9775 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9776 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9777 // Propagate the fast-math flags carried by the underlying instruction. 9778 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9779 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9780 for (unsigned Part = 0; Part < State.UF; ++Part) { 9781 Value *NewVecOp = State.get(getVecOp(), Part); 9782 if (VPValue *Cond = getCondOp()) { 9783 Value *NewCond = State.get(Cond, Part); 9784 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9785 Value *Iden = RdxDesc->getRecurrenceIdentity( 9786 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9787 Value *IdenVec = 9788 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9789 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9790 NewVecOp = Select; 9791 } 9792 Value *NewRed; 9793 Value *NextInChain; 9794 if (IsOrdered) { 9795 if (State.VF.isVector()) 9796 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9797 PrevInChain); 9798 else 9799 NewRed = State.Builder.CreateBinOp( 9800 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9801 NewVecOp); 9802 PrevInChain = NewRed; 9803 } else { 9804 PrevInChain = State.get(getChainOp(), Part); 9805 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9806 } 9807 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9808 NextInChain = 9809 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9810 NewRed, PrevInChain); 9811 } else if (IsOrdered) 9812 NextInChain = NewRed; 9813 else 9814 NextInChain = State.Builder.CreateBinOp( 9815 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9816 PrevInChain); 9817 State.set(this, NextInChain, Part); 9818 } 9819 } 9820 9821 void VPReplicateRecipe::execute(VPTransformState &State) { 9822 if (State.Instance) { // Generate a single instance. 9823 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9824 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance, 9825 IsPredicated, State); 9826 // Insert scalar instance packing it into a vector. 9827 if (AlsoPack && State.VF.isVector()) { 9828 // If we're constructing lane 0, initialize to start from poison. 9829 if (State.Instance->Lane.isFirstLane()) { 9830 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9831 Value *Poison = PoisonValue::get( 9832 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9833 State.set(this, Poison, State.Instance->Part); 9834 } 9835 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9836 } 9837 return; 9838 } 9839 9840 // Generate scalar instances for all VF lanes of all UF parts, unless the 9841 // instruction is uniform inwhich case generate only the first lane for each 9842 // of the UF parts. 9843 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9844 assert((!State.VF.isScalable() || IsUniform) && 9845 "Can't scalarize a scalable vector"); 9846 for (unsigned Part = 0; Part < State.UF; ++Part) 9847 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9848 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 9849 VPIteration(Part, Lane), IsPredicated, 9850 State); 9851 } 9852 9853 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9854 assert(State.Instance && "Branch on Mask works only on single instance."); 9855 9856 unsigned Part = State.Instance->Part; 9857 unsigned Lane = State.Instance->Lane.getKnownLane(); 9858 9859 Value *ConditionBit = nullptr; 9860 VPValue *BlockInMask = getMask(); 9861 if (BlockInMask) { 9862 ConditionBit = State.get(BlockInMask, Part); 9863 if (ConditionBit->getType()->isVectorTy()) 9864 ConditionBit = State.Builder.CreateExtractElement( 9865 ConditionBit, State.Builder.getInt32(Lane)); 9866 } else // Block in mask is all-one. 9867 ConditionBit = State.Builder.getTrue(); 9868 9869 // Replace the temporary unreachable terminator with a new conditional branch, 9870 // whose two destinations will be set later when they are created. 9871 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9872 assert(isa<UnreachableInst>(CurrentTerminator) && 9873 "Expected to replace unreachable terminator with conditional branch."); 9874 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9875 CondBr->setSuccessor(0, nullptr); 9876 ReplaceInstWithInst(CurrentTerminator, CondBr); 9877 } 9878 9879 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9880 assert(State.Instance && "Predicated instruction PHI works per instance."); 9881 Instruction *ScalarPredInst = 9882 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9883 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9884 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9885 assert(PredicatingBB && "Predicated block has no single predecessor."); 9886 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9887 "operand must be VPReplicateRecipe"); 9888 9889 // By current pack/unpack logic we need to generate only a single phi node: if 9890 // a vector value for the predicated instruction exists at this point it means 9891 // the instruction has vector users only, and a phi for the vector value is 9892 // needed. In this case the recipe of the predicated instruction is marked to 9893 // also do that packing, thereby "hoisting" the insert-element sequence. 9894 // Otherwise, a phi node for the scalar value is needed. 9895 unsigned Part = State.Instance->Part; 9896 if (State.hasVectorValue(getOperand(0), Part)) { 9897 Value *VectorValue = State.get(getOperand(0), Part); 9898 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9899 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9900 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9901 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9902 if (State.hasVectorValue(this, Part)) 9903 State.reset(this, VPhi, Part); 9904 else 9905 State.set(this, VPhi, Part); 9906 // NOTE: Currently we need to update the value of the operand, so the next 9907 // predicated iteration inserts its generated value in the correct vector. 9908 State.reset(getOperand(0), VPhi, Part); 9909 } else { 9910 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9911 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9912 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9913 PredicatingBB); 9914 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9915 if (State.hasScalarValue(this, *State.Instance)) 9916 State.reset(this, Phi, *State.Instance); 9917 else 9918 State.set(this, Phi, *State.Instance); 9919 // NOTE: Currently we need to update the value of the operand, so the next 9920 // predicated iteration inserts its generated value in the correct vector. 9921 State.reset(getOperand(0), Phi, *State.Instance); 9922 } 9923 } 9924 9925 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9926 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9927 9928 // Attempt to issue a wide load. 9929 LoadInst *LI = dyn_cast<LoadInst>(&Ingredient); 9930 StoreInst *SI = dyn_cast<StoreInst>(&Ingredient); 9931 9932 assert((LI || SI) && "Invalid Load/Store instruction"); 9933 assert((!SI || StoredValue) && "No stored value provided for widened store"); 9934 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 9935 9936 Type *ScalarDataTy = getLoadStoreType(&Ingredient); 9937 9938 auto *DataTy = VectorType::get(ScalarDataTy, State.VF); 9939 const Align Alignment = getLoadStoreAlignment(&Ingredient); 9940 bool CreateGatherScatter = !Consecutive; 9941 9942 auto &Builder = State.Builder; 9943 InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF); 9944 bool isMaskRequired = getMask(); 9945 if (isMaskRequired) 9946 for (unsigned Part = 0; Part < State.UF; ++Part) 9947 BlockInMaskParts[Part] = State.get(getMask(), Part); 9948 9949 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 9950 // Calculate the pointer for the specific unroll-part. 9951 GetElementPtrInst *PartPtr = nullptr; 9952 9953 bool InBounds = false; 9954 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 9955 InBounds = gep->isInBounds(); 9956 if (Reverse) { 9957 // If the address is consecutive but reversed, then the 9958 // wide store needs to start at the last vector element. 9959 // RunTimeVF = VScale * VF.getKnownMinValue() 9960 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 9961 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF); 9962 // NumElt = -Part * RunTimeVF 9963 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 9964 // LastLane = 1 - RunTimeVF 9965 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 9966 PartPtr = 9967 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 9968 PartPtr->setIsInBounds(InBounds); 9969 PartPtr = cast<GetElementPtrInst>( 9970 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 9971 PartPtr->setIsInBounds(InBounds); 9972 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 9973 BlockInMaskParts[Part] = 9974 Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse"); 9975 } else { 9976 Value *Increment = 9977 createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part); 9978 PartPtr = cast<GetElementPtrInst>( 9979 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 9980 PartPtr->setIsInBounds(InBounds); 9981 } 9982 9983 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 9984 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 9985 }; 9986 9987 // Handle Stores: 9988 if (SI) { 9989 State.ILV->setDebugLocFromInst(SI); 9990 9991 for (unsigned Part = 0; Part < State.UF; ++Part) { 9992 Instruction *NewSI = nullptr; 9993 Value *StoredVal = State.get(StoredValue, Part); 9994 if (CreateGatherScatter) { 9995 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 9996 Value *VectorGep = State.get(getAddr(), Part); 9997 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 9998 MaskPart); 9999 } else { 10000 if (Reverse) { 10001 // If we store to reverse consecutive memory locations, then we need 10002 // to reverse the order of elements in the stored value. 10003 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse"); 10004 // We don't want to update the value in the map as it might be used in 10005 // another expression. So don't call resetVectorValue(StoredVal). 10006 } 10007 auto *VecPtr = 10008 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10009 if (isMaskRequired) 10010 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 10011 BlockInMaskParts[Part]); 10012 else 10013 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 10014 } 10015 State.ILV->addMetadata(NewSI, SI); 10016 } 10017 return; 10018 } 10019 10020 // Handle loads. 10021 assert(LI && "Must have a load instruction"); 10022 State.ILV->setDebugLocFromInst(LI); 10023 for (unsigned Part = 0; Part < State.UF; ++Part) { 10024 Value *NewLI; 10025 if (CreateGatherScatter) { 10026 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 10027 Value *VectorGep = State.get(getAddr(), Part); 10028 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 10029 nullptr, "wide.masked.gather"); 10030 State.ILV->addMetadata(NewLI, LI); 10031 } else { 10032 auto *VecPtr = 10033 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10034 if (isMaskRequired) 10035 NewLI = Builder.CreateMaskedLoad( 10036 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 10037 PoisonValue::get(DataTy), "wide.masked.load"); 10038 else 10039 NewLI = 10040 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 10041 10042 // Add metadata to the load, but setVectorValue to the reverse shuffle. 10043 State.ILV->addMetadata(NewLI, LI); 10044 if (Reverse) 10045 NewLI = Builder.CreateVectorReverse(NewLI, "reverse"); 10046 } 10047 10048 State.set(getVPSingleValue(), NewLI, Part); 10049 } 10050 } 10051 10052 // Determine how to lower the scalar epilogue, which depends on 1) optimising 10053 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 10054 // predication, and 4) a TTI hook that analyses whether the loop is suitable 10055 // for predication. 10056 static ScalarEpilogueLowering getScalarEpilogueLowering( 10057 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 10058 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 10059 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 10060 LoopVectorizationLegality &LVL) { 10061 // 1) OptSize takes precedence over all other options, i.e. if this is set, 10062 // don't look at hints or options, and don't request a scalar epilogue. 10063 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 10064 // LoopAccessInfo (due to code dependency and not being able to reliably get 10065 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 10066 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 10067 // versioning when the vectorization is forced, unlike hasOptSize. So revert 10068 // back to the old way and vectorize with versioning when forced. See D81345.) 10069 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 10070 PGSOQueryType::IRPass) && 10071 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 10072 return CM_ScalarEpilogueNotAllowedOptSize; 10073 10074 // 2) If set, obey the directives 10075 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 10076 switch (PreferPredicateOverEpilogue) { 10077 case PreferPredicateTy::ScalarEpilogue: 10078 return CM_ScalarEpilogueAllowed; 10079 case PreferPredicateTy::PredicateElseScalarEpilogue: 10080 return CM_ScalarEpilogueNotNeededUsePredicate; 10081 case PreferPredicateTy::PredicateOrDontVectorize: 10082 return CM_ScalarEpilogueNotAllowedUsePredicate; 10083 }; 10084 } 10085 10086 // 3) If set, obey the hints 10087 switch (Hints.getPredicate()) { 10088 case LoopVectorizeHints::FK_Enabled: 10089 return CM_ScalarEpilogueNotNeededUsePredicate; 10090 case LoopVectorizeHints::FK_Disabled: 10091 return CM_ScalarEpilogueAllowed; 10092 }; 10093 10094 // 4) if the TTI hook indicates this is profitable, request predication. 10095 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 10096 LVL.getLAI())) 10097 return CM_ScalarEpilogueNotNeededUsePredicate; 10098 10099 return CM_ScalarEpilogueAllowed; 10100 } 10101 10102 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 10103 // If Values have been set for this Def return the one relevant for \p Part. 10104 if (hasVectorValue(Def, Part)) 10105 return Data.PerPartOutput[Def][Part]; 10106 10107 if (!hasScalarValue(Def, {Part, 0})) { 10108 Value *IRV = Def->getLiveInIRValue(); 10109 Value *B = ILV->getBroadcastInstrs(IRV); 10110 set(Def, B, Part); 10111 return B; 10112 } 10113 10114 Value *ScalarValue = get(Def, {Part, 0}); 10115 // If we aren't vectorizing, we can just copy the scalar map values over 10116 // to the vector map. 10117 if (VF.isScalar()) { 10118 set(Def, ScalarValue, Part); 10119 return ScalarValue; 10120 } 10121 10122 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 10123 bool IsUniform = RepR && RepR->isUniform(); 10124 10125 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 10126 // Check if there is a scalar value for the selected lane. 10127 if (!hasScalarValue(Def, {Part, LastLane})) { 10128 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 10129 assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && 10130 "unexpected recipe found to be invariant"); 10131 IsUniform = true; 10132 LastLane = 0; 10133 } 10134 10135 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 10136 // Set the insert point after the last scalarized instruction or after the 10137 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 10138 // will directly follow the scalar definitions. 10139 auto OldIP = Builder.saveIP(); 10140 auto NewIP = 10141 isa<PHINode>(LastInst) 10142 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 10143 : std::next(BasicBlock::iterator(LastInst)); 10144 Builder.SetInsertPoint(&*NewIP); 10145 10146 // However, if we are vectorizing, we need to construct the vector values. 10147 // If the value is known to be uniform after vectorization, we can just 10148 // broadcast the scalar value corresponding to lane zero for each unroll 10149 // iteration. Otherwise, we construct the vector values using 10150 // insertelement instructions. Since the resulting vectors are stored in 10151 // State, we will only generate the insertelements once. 10152 Value *VectorValue = nullptr; 10153 if (IsUniform) { 10154 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 10155 set(Def, VectorValue, Part); 10156 } else { 10157 // Initialize packing with insertelements to start from undef. 10158 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 10159 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10160 set(Def, Undef, Part); 10161 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10162 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10163 VectorValue = get(Def, Part); 10164 } 10165 Builder.restoreIP(OldIP); 10166 return VectorValue; 10167 } 10168 10169 // Process the loop in the VPlan-native vectorization path. This path builds 10170 // VPlan upfront in the vectorization pipeline, which allows to apply 10171 // VPlan-to-VPlan transformations from the very beginning without modifying the 10172 // input LLVM IR. 10173 static bool processLoopInVPlanNativePath( 10174 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10175 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10176 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10177 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10178 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10179 LoopVectorizationRequirements &Requirements) { 10180 10181 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10182 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10183 return false; 10184 } 10185 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10186 Function *F = L->getHeader()->getParent(); 10187 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10188 10189 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10190 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10191 10192 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10193 &Hints, IAI); 10194 // Use the planner for outer loop vectorization. 10195 // TODO: CM is not used at this point inside the planner. Turn CM into an 10196 // optional argument if we don't need it in the future. 10197 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10198 Requirements, ORE); 10199 10200 // Get user vectorization factor. 10201 ElementCount UserVF = Hints.getWidth(); 10202 10203 CM.collectElementTypesForWidening(); 10204 10205 // Plan how to best vectorize, return the best VF and its cost. 10206 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10207 10208 // If we are stress testing VPlan builds, do not attempt to generate vector 10209 // code. Masked vector code generation support will follow soon. 10210 // Also, do not attempt to vectorize if no vector code will be produced. 10211 if (VPlanBuildStressTest || EnableVPlanPredication || 10212 VectorizationFactor::Disabled() == VF) 10213 return false; 10214 10215 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10216 10217 { 10218 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10219 F->getParent()->getDataLayout()); 10220 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10221 &CM, BFI, PSI, Checks); 10222 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10223 << L->getHeader()->getParent()->getName() << "\"\n"); 10224 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT); 10225 } 10226 10227 // Mark the loop as already vectorized to avoid vectorizing again. 10228 Hints.setAlreadyVectorized(); 10229 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10230 return true; 10231 } 10232 10233 // Emit a remark if there are stores to floats that required a floating point 10234 // extension. If the vectorized loop was generated with floating point there 10235 // will be a performance penalty from the conversion overhead and the change in 10236 // the vector width. 10237 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10238 SmallVector<Instruction *, 4> Worklist; 10239 for (BasicBlock *BB : L->getBlocks()) { 10240 for (Instruction &Inst : *BB) { 10241 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10242 if (S->getValueOperand()->getType()->isFloatTy()) 10243 Worklist.push_back(S); 10244 } 10245 } 10246 } 10247 10248 // Traverse the floating point stores upwards searching, for floating point 10249 // conversions. 10250 SmallPtrSet<const Instruction *, 4> Visited; 10251 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10252 while (!Worklist.empty()) { 10253 auto *I = Worklist.pop_back_val(); 10254 if (!L->contains(I)) 10255 continue; 10256 if (!Visited.insert(I).second) 10257 continue; 10258 10259 // Emit a remark if the floating point store required a floating 10260 // point conversion. 10261 // TODO: More work could be done to identify the root cause such as a 10262 // constant or a function return type and point the user to it. 10263 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10264 ORE->emit([&]() { 10265 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10266 I->getDebugLoc(), L->getHeader()) 10267 << "floating point conversion changes vector width. " 10268 << "Mixed floating point precision requires an up/down " 10269 << "cast that will negatively impact performance."; 10270 }); 10271 10272 for (Use &Op : I->operands()) 10273 if (auto *OpI = dyn_cast<Instruction>(Op)) 10274 Worklist.push_back(OpI); 10275 } 10276 } 10277 10278 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10279 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10280 !EnableLoopInterleaving), 10281 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10282 !EnableLoopVectorization) {} 10283 10284 bool LoopVectorizePass::processLoop(Loop *L) { 10285 assert((EnableVPlanNativePath || L->isInnermost()) && 10286 "VPlan-native path is not enabled. Only process inner loops."); 10287 10288 #ifndef NDEBUG 10289 const std::string DebugLocStr = getDebugLocString(L); 10290 #endif /* NDEBUG */ 10291 10292 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 10293 << L->getHeader()->getParent()->getName() << "\" from " 10294 << DebugLocStr << "\n"); 10295 10296 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 10297 10298 LLVM_DEBUG( 10299 dbgs() << "LV: Loop hints:" 10300 << " force=" 10301 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10302 ? "disabled" 10303 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10304 ? "enabled" 10305 : "?")) 10306 << " width=" << Hints.getWidth() 10307 << " interleave=" << Hints.getInterleave() << "\n"); 10308 10309 // Function containing loop 10310 Function *F = L->getHeader()->getParent(); 10311 10312 // Looking at the diagnostic output is the only way to determine if a loop 10313 // was vectorized (other than looking at the IR or machine code), so it 10314 // is important to generate an optimization remark for each loop. Most of 10315 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10316 // generated as OptimizationRemark and OptimizationRemarkMissed are 10317 // less verbose reporting vectorized loops and unvectorized loops that may 10318 // benefit from vectorization, respectively. 10319 10320 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10321 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10322 return false; 10323 } 10324 10325 PredicatedScalarEvolution PSE(*SE, *L); 10326 10327 // Check if it is legal to vectorize the loop. 10328 LoopVectorizationRequirements Requirements; 10329 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10330 &Requirements, &Hints, DB, AC, BFI, PSI); 10331 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10332 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10333 Hints.emitRemarkWithHints(); 10334 return false; 10335 } 10336 10337 // Check the function attributes and profiles to find out if this function 10338 // should be optimized for size. 10339 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10340 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10341 10342 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10343 // here. They may require CFG and instruction level transformations before 10344 // even evaluating whether vectorization is profitable. Since we cannot modify 10345 // the incoming IR, we need to build VPlan upfront in the vectorization 10346 // pipeline. 10347 if (!L->isInnermost()) 10348 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10349 ORE, BFI, PSI, Hints, Requirements); 10350 10351 assert(L->isInnermost() && "Inner loop expected."); 10352 10353 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10354 // count by optimizing for size, to minimize overheads. 10355 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10356 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10357 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10358 << "This loop is worth vectorizing only if no scalar " 10359 << "iteration overheads are incurred."); 10360 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10361 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10362 else { 10363 LLVM_DEBUG(dbgs() << "\n"); 10364 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10365 } 10366 } 10367 10368 // Check the function attributes to see if implicit floats are allowed. 10369 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10370 // an integer loop and the vector instructions selected are purely integer 10371 // vector instructions? 10372 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10373 reportVectorizationFailure( 10374 "Can't vectorize when the NoImplicitFloat attribute is used", 10375 "loop not vectorized due to NoImplicitFloat attribute", 10376 "NoImplicitFloat", ORE, L); 10377 Hints.emitRemarkWithHints(); 10378 return false; 10379 } 10380 10381 // Check if the target supports potentially unsafe FP vectorization. 10382 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10383 // for the target we're vectorizing for, to make sure none of the 10384 // additional fp-math flags can help. 10385 if (Hints.isPotentiallyUnsafe() && 10386 TTI->isFPVectorizationPotentiallyUnsafe()) { 10387 reportVectorizationFailure( 10388 "Potentially unsafe FP op prevents vectorization", 10389 "loop not vectorized due to unsafe FP support.", 10390 "UnsafeFP", ORE, L); 10391 Hints.emitRemarkWithHints(); 10392 return false; 10393 } 10394 10395 bool AllowOrderedReductions; 10396 // If the flag is set, use that instead and override the TTI behaviour. 10397 if (ForceOrderedReductions.getNumOccurrences() > 0) 10398 AllowOrderedReductions = ForceOrderedReductions; 10399 else 10400 AllowOrderedReductions = TTI->enableOrderedReductions(); 10401 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10402 ORE->emit([&]() { 10403 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10404 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10405 ExactFPMathInst->getDebugLoc(), 10406 ExactFPMathInst->getParent()) 10407 << "loop not vectorized: cannot prove it is safe to reorder " 10408 "floating-point operations"; 10409 }); 10410 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10411 "reorder floating-point operations\n"); 10412 Hints.emitRemarkWithHints(); 10413 return false; 10414 } 10415 10416 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10417 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10418 10419 // If an override option has been passed in for interleaved accesses, use it. 10420 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10421 UseInterleaved = EnableInterleavedMemAccesses; 10422 10423 // Analyze interleaved memory accesses. 10424 if (UseInterleaved) { 10425 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10426 } 10427 10428 // Use the cost model. 10429 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10430 F, &Hints, IAI); 10431 CM.collectValuesToIgnore(); 10432 CM.collectElementTypesForWidening(); 10433 10434 // Use the planner for vectorization. 10435 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10436 Requirements, ORE); 10437 10438 // Get user vectorization factor and interleave count. 10439 ElementCount UserVF = Hints.getWidth(); 10440 unsigned UserIC = Hints.getInterleave(); 10441 10442 // Plan how to best vectorize, return the best VF and its cost. 10443 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10444 10445 VectorizationFactor VF = VectorizationFactor::Disabled(); 10446 unsigned IC = 1; 10447 10448 if (MaybeVF) { 10449 VF = *MaybeVF; 10450 // Select the interleave count. 10451 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10452 } 10453 10454 // Identify the diagnostic messages that should be produced. 10455 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10456 bool VectorizeLoop = true, InterleaveLoop = true; 10457 if (VF.Width.isScalar()) { 10458 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10459 VecDiagMsg = std::make_pair( 10460 "VectorizationNotBeneficial", 10461 "the cost-model indicates that vectorization is not beneficial"); 10462 VectorizeLoop = false; 10463 } 10464 10465 if (!MaybeVF && UserIC > 1) { 10466 // Tell the user interleaving was avoided up-front, despite being explicitly 10467 // requested. 10468 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10469 "interleaving should be avoided up front\n"); 10470 IntDiagMsg = std::make_pair( 10471 "InterleavingAvoided", 10472 "Ignoring UserIC, because interleaving was avoided up front"); 10473 InterleaveLoop = false; 10474 } else if (IC == 1 && UserIC <= 1) { 10475 // Tell the user interleaving is not beneficial. 10476 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10477 IntDiagMsg = std::make_pair( 10478 "InterleavingNotBeneficial", 10479 "the cost-model indicates that interleaving is not beneficial"); 10480 InterleaveLoop = false; 10481 if (UserIC == 1) { 10482 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10483 IntDiagMsg.second += 10484 " and is explicitly disabled or interleave count is set to 1"; 10485 } 10486 } else if (IC > 1 && UserIC == 1) { 10487 // Tell the user interleaving is beneficial, but it explicitly disabled. 10488 LLVM_DEBUG( 10489 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10490 IntDiagMsg = std::make_pair( 10491 "InterleavingBeneficialButDisabled", 10492 "the cost-model indicates that interleaving is beneficial " 10493 "but is explicitly disabled or interleave count is set to 1"); 10494 InterleaveLoop = false; 10495 } 10496 10497 // Override IC if user provided an interleave count. 10498 IC = UserIC > 0 ? UserIC : IC; 10499 10500 // Emit diagnostic messages, if any. 10501 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10502 if (!VectorizeLoop && !InterleaveLoop) { 10503 // Do not vectorize or interleaving the loop. 10504 ORE->emit([&]() { 10505 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10506 L->getStartLoc(), L->getHeader()) 10507 << VecDiagMsg.second; 10508 }); 10509 ORE->emit([&]() { 10510 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10511 L->getStartLoc(), L->getHeader()) 10512 << IntDiagMsg.second; 10513 }); 10514 return false; 10515 } else if (!VectorizeLoop && InterleaveLoop) { 10516 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10517 ORE->emit([&]() { 10518 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10519 L->getStartLoc(), L->getHeader()) 10520 << VecDiagMsg.second; 10521 }); 10522 } else if (VectorizeLoop && !InterleaveLoop) { 10523 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10524 << ") in " << DebugLocStr << '\n'); 10525 ORE->emit([&]() { 10526 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10527 L->getStartLoc(), L->getHeader()) 10528 << IntDiagMsg.second; 10529 }); 10530 } else if (VectorizeLoop && InterleaveLoop) { 10531 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10532 << ") in " << DebugLocStr << '\n'); 10533 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10534 } 10535 10536 bool DisableRuntimeUnroll = false; 10537 MDNode *OrigLoopID = L->getLoopID(); 10538 { 10539 // Optimistically generate runtime checks. Drop them if they turn out to not 10540 // be profitable. Limit the scope of Checks, so the cleanup happens 10541 // immediately after vector codegeneration is done. 10542 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10543 F->getParent()->getDataLayout()); 10544 if (!VF.Width.isScalar() || IC > 1) 10545 Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); 10546 10547 using namespace ore; 10548 if (!VectorizeLoop) { 10549 assert(IC > 1 && "interleave count should not be 1 or 0"); 10550 // If we decided that it is not legal to vectorize the loop, then 10551 // interleave it. 10552 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10553 &CM, BFI, PSI, Checks); 10554 10555 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10556 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT); 10557 10558 ORE->emit([&]() { 10559 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10560 L->getHeader()) 10561 << "interleaved loop (interleaved count: " 10562 << NV("InterleaveCount", IC) << ")"; 10563 }); 10564 } else { 10565 // If we decided that it is *legal* to vectorize the loop, then do it. 10566 10567 // Consider vectorizing the epilogue too if it's profitable. 10568 VectorizationFactor EpilogueVF = 10569 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10570 if (EpilogueVF.Width.isVector()) { 10571 10572 // The first pass vectorizes the main loop and creates a scalar epilogue 10573 // to be vectorized by executing the plan (potentially with a different 10574 // factor) again shortly afterwards. 10575 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10576 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10577 EPI, &LVL, &CM, BFI, PSI, Checks); 10578 10579 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10580 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10581 DT); 10582 ++LoopsVectorized; 10583 10584 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10585 formLCSSARecursively(*L, *DT, LI, SE); 10586 10587 // Second pass vectorizes the epilogue and adjusts the control flow 10588 // edges from the first pass. 10589 EPI.MainLoopVF = EPI.EpilogueVF; 10590 EPI.MainLoopUF = EPI.EpilogueUF; 10591 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10592 ORE, EPI, &LVL, &CM, BFI, PSI, 10593 Checks); 10594 10595 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10596 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10597 DT); 10598 ++LoopsEpilogueVectorized; 10599 10600 if (!MainILV.areSafetyChecksAdded()) 10601 DisableRuntimeUnroll = true; 10602 } else { 10603 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10604 &LVL, &CM, BFI, PSI, Checks); 10605 10606 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10607 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT); 10608 ++LoopsVectorized; 10609 10610 // Add metadata to disable runtime unrolling a scalar loop when there 10611 // are no runtime checks about strides and memory. A scalar loop that is 10612 // rarely used is not worth unrolling. 10613 if (!LB.areSafetyChecksAdded()) 10614 DisableRuntimeUnroll = true; 10615 } 10616 // Report the vectorization decision. 10617 ORE->emit([&]() { 10618 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10619 L->getHeader()) 10620 << "vectorized loop (vectorization width: " 10621 << NV("VectorizationFactor", VF.Width) 10622 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10623 }); 10624 } 10625 10626 if (ORE->allowExtraAnalysis(LV_NAME)) 10627 checkMixedPrecision(L, ORE); 10628 } 10629 10630 Optional<MDNode *> RemainderLoopID = 10631 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10632 LLVMLoopVectorizeFollowupEpilogue}); 10633 if (RemainderLoopID.hasValue()) { 10634 L->setLoopID(RemainderLoopID.getValue()); 10635 } else { 10636 if (DisableRuntimeUnroll) 10637 AddRuntimeUnrollDisableMetaData(L); 10638 10639 // Mark the loop as already vectorized to avoid vectorizing again. 10640 Hints.setAlreadyVectorized(); 10641 } 10642 10643 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10644 return true; 10645 } 10646 10647 LoopVectorizeResult LoopVectorizePass::runImpl( 10648 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10649 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10650 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10651 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10652 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10653 SE = &SE_; 10654 LI = &LI_; 10655 TTI = &TTI_; 10656 DT = &DT_; 10657 BFI = &BFI_; 10658 TLI = TLI_; 10659 AA = &AA_; 10660 AC = &AC_; 10661 GetLAA = &GetLAA_; 10662 DB = &DB_; 10663 ORE = &ORE_; 10664 PSI = PSI_; 10665 10666 // Don't attempt if 10667 // 1. the target claims to have no vector registers, and 10668 // 2. interleaving won't help ILP. 10669 // 10670 // The second condition is necessary because, even if the target has no 10671 // vector registers, loop vectorization may still enable scalar 10672 // interleaving. 10673 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10674 TTI->getMaxInterleaveFactor(1) < 2) 10675 return LoopVectorizeResult(false, false); 10676 10677 bool Changed = false, CFGChanged = false; 10678 10679 // The vectorizer requires loops to be in simplified form. 10680 // Since simplification may add new inner loops, it has to run before the 10681 // legality and profitability checks. This means running the loop vectorizer 10682 // will simplify all loops, regardless of whether anything end up being 10683 // vectorized. 10684 for (auto &L : *LI) 10685 Changed |= CFGChanged |= 10686 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10687 10688 // Build up a worklist of inner-loops to vectorize. This is necessary as 10689 // the act of vectorizing or partially unrolling a loop creates new loops 10690 // and can invalidate iterators across the loops. 10691 SmallVector<Loop *, 8> Worklist; 10692 10693 for (Loop *L : *LI) 10694 collectSupportedLoops(*L, LI, ORE, Worklist); 10695 10696 LoopsAnalyzed += Worklist.size(); 10697 10698 // Now walk the identified inner loops. 10699 while (!Worklist.empty()) { 10700 Loop *L = Worklist.pop_back_val(); 10701 10702 // For the inner loops we actually process, form LCSSA to simplify the 10703 // transform. 10704 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10705 10706 Changed |= CFGChanged |= processLoop(L); 10707 } 10708 10709 // Process each loop nest in the function. 10710 return LoopVectorizeResult(Changed, CFGChanged); 10711 } 10712 10713 PreservedAnalyses LoopVectorizePass::run(Function &F, 10714 FunctionAnalysisManager &AM) { 10715 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10716 auto &LI = AM.getResult<LoopAnalysis>(F); 10717 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10718 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10719 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10720 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10721 auto &AA = AM.getResult<AAManager>(F); 10722 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10723 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10724 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10725 10726 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10727 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10728 [&](Loop &L) -> const LoopAccessInfo & { 10729 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10730 TLI, TTI, nullptr, nullptr, nullptr}; 10731 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10732 }; 10733 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10734 ProfileSummaryInfo *PSI = 10735 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10736 LoopVectorizeResult Result = 10737 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10738 if (!Result.MadeAnyChange) 10739 return PreservedAnalyses::all(); 10740 PreservedAnalyses PA; 10741 10742 // We currently do not preserve loopinfo/dominator analyses with outer loop 10743 // vectorization. Until this is addressed, mark these analyses as preserved 10744 // only for non-VPlan-native path. 10745 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10746 if (!EnableVPlanNativePath) { 10747 PA.preserve<LoopAnalysis>(); 10748 PA.preserve<DominatorTreeAnalysis>(); 10749 } 10750 if (!Result.MadeCFGChange) 10751 PA.preserveSet<CFGAnalyses>(); 10752 return PA; 10753 } 10754 10755 void LoopVectorizePass::printPipeline( 10756 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10757 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10758 OS, MapClassName2PassName); 10759 10760 OS << "<"; 10761 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10762 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10763 OS << ">"; 10764 } 10765