1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/LLVMContext.h" 116 #include "llvm/IR/Metadata.h" 117 #include "llvm/IR/Module.h" 118 #include "llvm/IR/Operator.h" 119 #include "llvm/IR/PatternMatch.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 202 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 203 cl::desc("The maximum allowed number of runtime memory checks with a " 204 "vectorize(enable) pragma.")); 205 206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 207 // that predication is preferred, and this lists all options. I.e., the 208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 209 // and predicate the instructions accordingly. If tail-folding fails, there are 210 // different fallback strategies depending on these values: 211 namespace PreferPredicateTy { 212 enum Option { 213 ScalarEpilogue = 0, 214 PredicateElseScalarEpilogue, 215 PredicateOrDontVectorize 216 }; 217 } // namespace PreferPredicateTy 218 219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 220 "prefer-predicate-over-epilogue", 221 cl::init(PreferPredicateTy::ScalarEpilogue), 222 cl::Hidden, 223 cl::desc("Tail-folding and predication preferences over creating a scalar " 224 "epilogue loop."), 225 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 226 "scalar-epilogue", 227 "Don't tail-predicate loops, create scalar epilogue"), 228 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 229 "predicate-else-scalar-epilogue", 230 "prefer tail-folding, create scalar epilogue if tail " 231 "folding fails."), 232 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 233 "predicate-dont-vectorize", 234 "prefers tail-folding, don't attempt vectorization if " 235 "tail-folding fails."))); 236 237 static cl::opt<bool> MaximizeBandwidth( 238 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 239 cl::desc("Maximize bandwidth when selecting vectorization factor which " 240 "will be determined by the smallest type in loop.")); 241 242 static cl::opt<bool> EnableInterleavedMemAccesses( 243 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 244 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 245 246 /// An interleave-group may need masking if it resides in a block that needs 247 /// predication, or in order to mask away gaps. 248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 249 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 250 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 251 252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 253 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 254 cl::desc("We don't interleave loops with a estimated constant trip count " 255 "below this number")); 256 257 static cl::opt<unsigned> ForceTargetNumScalarRegs( 258 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 259 cl::desc("A flag that overrides the target's number of scalar registers.")); 260 261 static cl::opt<unsigned> ForceTargetNumVectorRegs( 262 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 263 cl::desc("A flag that overrides the target's number of vector registers.")); 264 265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 266 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "scalar loops.")); 269 270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 271 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's max interleave factor for " 273 "vectorized loops.")); 274 275 static cl::opt<unsigned> ForceTargetInstructionCost( 276 "force-target-instruction-cost", cl::init(0), cl::Hidden, 277 cl::desc("A flag that overrides the target's expected cost for " 278 "an instruction to a single constant value. Mostly " 279 "useful for getting consistent testing.")); 280 281 static cl::opt<bool> ForceTargetSupportsScalableVectors( 282 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 283 cl::desc( 284 "Pretend that scalable vectors are supported, even if the target does " 285 "not support them. This flag should only be used for testing.")); 286 287 static cl::opt<unsigned> SmallLoopCost( 288 "small-loop-cost", cl::init(20), cl::Hidden, 289 cl::desc( 290 "The cost of a loop that is considered 'small' by the interleaver.")); 291 292 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 293 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 294 cl::desc("Enable the use of the block frequency analysis to access PGO " 295 "heuristics minimizing code growth in cold regions and being more " 296 "aggressive in hot regions.")); 297 298 // Runtime interleave loops for load/store throughput. 299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 300 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 301 cl::desc( 302 "Enable runtime interleaving until load/store ports are saturated")); 303 304 /// Interleave small loops with scalar reductions. 305 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 306 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 307 cl::desc("Enable interleaving for loops with small iteration counts that " 308 "contain scalar reductions to expose ILP.")); 309 310 /// The number of stores in a loop that are allowed to need predication. 311 static cl::opt<unsigned> NumberOfStoresToPredicate( 312 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 313 cl::desc("Max number of stores to be predicated behind an if.")); 314 315 static cl::opt<bool> EnableIndVarRegisterHeur( 316 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 317 cl::desc("Count the induction variable only once when interleaving")); 318 319 static cl::opt<bool> EnableCondStoresVectorization( 320 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 321 cl::desc("Enable if predication of stores during vectorization.")); 322 323 static cl::opt<unsigned> MaxNestedScalarReductionIC( 324 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 325 cl::desc("The maximum interleave count to use when interleaving a scalar " 326 "reduction in a nested loop.")); 327 328 static cl::opt<bool> 329 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 330 cl::Hidden, 331 cl::desc("Prefer in-loop vector reductions, " 332 "overriding the targets preference.")); 333 334 static cl::opt<bool> ForceOrderedReductions( 335 "force-ordered-reductions", cl::init(false), cl::Hidden, 336 cl::desc("Enable the vectorisation of loops with in-order (strict) " 337 "FP reductions")); 338 339 static cl::opt<bool> PreferPredicatedReductionSelect( 340 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 341 cl::desc( 342 "Prefer predicating a reduction operation over an after loop select.")); 343 344 cl::opt<bool> EnableVPlanNativePath( 345 "enable-vplan-native-path", cl::init(false), cl::Hidden, 346 cl::desc("Enable VPlan-native vectorization path with " 347 "support for outer loop vectorization.")); 348 349 // FIXME: Remove this switch once we have divergence analysis. Currently we 350 // assume divergent non-backedge branches when this switch is true. 351 cl::opt<bool> EnableVPlanPredication( 352 "enable-vplan-predication", cl::init(false), cl::Hidden, 353 cl::desc("Enable VPlan-native vectorization path predicator with " 354 "support for outer loop vectorization.")); 355 356 // This flag enables the stress testing of the VPlan H-CFG construction in the 357 // VPlan-native vectorization path. It must be used in conjuction with 358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 359 // verification of the H-CFGs built. 360 static cl::opt<bool> VPlanBuildStressTest( 361 "vplan-build-stress-test", cl::init(false), cl::Hidden, 362 cl::desc( 363 "Build VPlan for every supported loop nest in the function and bail " 364 "out right after the build (stress test the VPlan H-CFG construction " 365 "in the VPlan-native vectorization path).")); 366 367 cl::opt<bool> llvm::EnableLoopInterleaving( 368 "interleave-loops", cl::init(true), cl::Hidden, 369 cl::desc("Enable loop interleaving in Loop vectorization passes")); 370 cl::opt<bool> llvm::EnableLoopVectorization( 371 "vectorize-loops", cl::init(true), cl::Hidden, 372 cl::desc("Run the Loop vectorization passes")); 373 374 cl::opt<bool> PrintVPlansInDotFormat( 375 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 376 cl::desc("Use dot format instead of plain text when dumping VPlans")); 377 378 /// A helper function that returns true if the given type is irregular. The 379 /// type is irregular if its allocated size doesn't equal the store size of an 380 /// element of the corresponding vector type. 381 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 382 // Determine if an array of N elements of type Ty is "bitcast compatible" 383 // with a <N x Ty> vector. 384 // This is only true if there is no padding between the array elements. 385 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 386 } 387 388 /// A helper function that returns the reciprocal of the block probability of 389 /// predicated blocks. If we return X, we are assuming the predicated block 390 /// will execute once for every X iterations of the loop header. 391 /// 392 /// TODO: We should use actual block probability here, if available. Currently, 393 /// we always assume predicated blocks have a 50% chance of executing. 394 static unsigned getReciprocalPredBlockProb() { return 2; } 395 396 /// A helper function that returns an integer or floating-point constant with 397 /// value C. 398 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 399 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 400 : ConstantFP::get(Ty, C); 401 } 402 403 /// Returns "best known" trip count for the specified loop \p L as defined by 404 /// the following procedure: 405 /// 1) Returns exact trip count if it is known. 406 /// 2) Returns expected trip count according to profile data if any. 407 /// 3) Returns upper bound estimate if it is known. 408 /// 4) Returns None if all of the above failed. 409 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 410 // Check if exact trip count is known. 411 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 412 return ExpectedTC; 413 414 // Check if there is an expected trip count available from profile data. 415 if (LoopVectorizeWithBlockFrequency) 416 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 417 return EstimatedTC; 418 419 // Check if upper bound estimate is known. 420 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 421 return ExpectedTC; 422 423 return None; 424 } 425 426 // Forward declare GeneratedRTChecks. 427 class GeneratedRTChecks; 428 429 namespace llvm { 430 431 /// InnerLoopVectorizer vectorizes loops which contain only one basic 432 /// block to a specified vectorization factor (VF). 433 /// This class performs the widening of scalars into vectors, or multiple 434 /// scalars. This class also implements the following features: 435 /// * It inserts an epilogue loop for handling loops that don't have iteration 436 /// counts that are known to be a multiple of the vectorization factor. 437 /// * It handles the code generation for reduction variables. 438 /// * Scalarization (implementation using scalars) of un-vectorizable 439 /// instructions. 440 /// InnerLoopVectorizer does not perform any vectorization-legality 441 /// checks, and relies on the caller to check for the different legality 442 /// aspects. The InnerLoopVectorizer relies on the 443 /// LoopVectorizationLegality class to provide information about the induction 444 /// and reduction variables that were found to a given vectorization factor. 445 class InnerLoopVectorizer { 446 public: 447 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 448 LoopInfo *LI, DominatorTree *DT, 449 const TargetLibraryInfo *TLI, 450 const TargetTransformInfo *TTI, AssumptionCache *AC, 451 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 452 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 453 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 454 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 455 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 456 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 457 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 458 PSI(PSI), RTChecks(RTChecks) { 459 // Query this against the original loop and save it here because the profile 460 // of the original loop header may change as the transformation happens. 461 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 462 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 463 } 464 465 virtual ~InnerLoopVectorizer() = default; 466 467 /// Create a new empty loop that will contain vectorized instructions later 468 /// on, while the old loop will be used as the scalar remainder. Control flow 469 /// is generated around the vectorized (and scalar epilogue) loops consisting 470 /// of various checks and bypasses. Return the pre-header block of the new 471 /// loop. 472 /// In the case of epilogue vectorization, this function is overriden to 473 /// handle the more complex control flow around the loops. 474 virtual BasicBlock *createVectorizedLoopSkeleton(); 475 476 /// Widen a single call instruction within the innermost loop. 477 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 478 VPTransformState &State); 479 480 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 481 void fixVectorizedLoop(VPTransformState &State); 482 483 // Return true if any runtime check is added. 484 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 485 486 /// A type for vectorized values in the new loop. Each value from the 487 /// original loop, when vectorized, is represented by UF vector values in the 488 /// new unrolled loop, where UF is the unroll factor. 489 using VectorParts = SmallVector<Value *, 2>; 490 491 /// Vectorize a single first-order recurrence or pointer induction PHINode in 492 /// a block. This method handles the induction variable canonicalization. It 493 /// supports both VF = 1 for unrolled loops and arbitrary length vectors. 494 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 495 VPTransformState &State); 496 497 /// A helper function to scalarize a single Instruction in the innermost loop. 498 /// Generates a sequence of scalar instances for each lane between \p MinLane 499 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 500 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p 501 /// Instr's operands. 502 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe, 503 const VPIteration &Instance, bool IfPredicateInstr, 504 VPTransformState &State); 505 506 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 507 /// is provided, the integer induction variable will first be truncated to 508 /// the corresponding type. 509 void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, 510 VPValue *Def, VPValue *CastDef, 511 VPTransformState &State); 512 513 /// Construct the vector value of a scalarized value \p V one lane at a time. 514 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 515 VPTransformState &State); 516 517 /// Try to vectorize interleaved access group \p Group with the base address 518 /// given in \p Addr, optionally masking the vector operations if \p 519 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 520 /// values in the vectorized loop. 521 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 522 ArrayRef<VPValue *> VPDefs, 523 VPTransformState &State, VPValue *Addr, 524 ArrayRef<VPValue *> StoredValues, 525 VPValue *BlockInMask = nullptr); 526 527 /// Set the debug location in the builder \p Ptr using the debug location in 528 /// \p V. If \p Ptr is None then it uses the class member's Builder. 529 void setDebugLocFromInst(const Value *V, 530 Optional<IRBuilder<> *> CustomBuilder = None); 531 532 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 533 void fixNonInductionPHIs(VPTransformState &State); 534 535 /// Returns true if the reordering of FP operations is not allowed, but we are 536 /// able to vectorize with strict in-order reductions for the given RdxDesc. 537 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc); 538 539 /// Create a broadcast instruction. This method generates a broadcast 540 /// instruction (shuffle) for loop invariant values and for the induction 541 /// value. If this is the induction variable then we extend it to N, N+1, ... 542 /// this is needed because each iteration in the loop corresponds to a SIMD 543 /// element. 544 virtual Value *getBroadcastInstrs(Value *V); 545 546 /// Add metadata from one instruction to another. 547 /// 548 /// This includes both the original MDs from \p From and additional ones (\see 549 /// addNewMetadata). Use this for *newly created* instructions in the vector 550 /// loop. 551 void addMetadata(Instruction *To, Instruction *From); 552 553 /// Similar to the previous function but it adds the metadata to a 554 /// vector of instructions. 555 void addMetadata(ArrayRef<Value *> To, Instruction *From); 556 557 protected: 558 friend class LoopVectorizationPlanner; 559 560 /// A small list of PHINodes. 561 using PhiVector = SmallVector<PHINode *, 4>; 562 563 /// A type for scalarized values in the new loop. Each value from the 564 /// original loop, when scalarized, is represented by UF x VF scalar values 565 /// in the new unrolled loop, where UF is the unroll factor and VF is the 566 /// vectorization factor. 567 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 568 569 /// Set up the values of the IVs correctly when exiting the vector loop. 570 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 571 Value *CountRoundDown, Value *EndValue, 572 BasicBlock *MiddleBlock); 573 574 /// Create a new induction variable inside L. 575 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 576 Value *Step, Instruction *DL); 577 578 /// Handle all cross-iteration phis in the header. 579 void fixCrossIterationPHIs(VPTransformState &State); 580 581 /// Create the exit value of first order recurrences in the middle block and 582 /// update their users. 583 void fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, VPTransformState &State); 584 585 /// Create code for the loop exit value of the reduction. 586 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 587 588 /// Clear NSW/NUW flags from reduction instructions if necessary. 589 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 590 VPTransformState &State); 591 592 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 593 /// means we need to add the appropriate incoming value from the middle 594 /// block as exiting edges from the scalar epilogue loop (if present) are 595 /// already in place, and we exit the vector loop exclusively to the middle 596 /// block. 597 void fixLCSSAPHIs(VPTransformState &State); 598 599 /// Iteratively sink the scalarized operands of a predicated instruction into 600 /// the block that was created for it. 601 void sinkScalarOperands(Instruction *PredInst); 602 603 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 604 /// represented as. 605 void truncateToMinimalBitwidths(VPTransformState &State); 606 607 /// This function adds 608 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 609 /// to each vector element of Val. The sequence starts at StartIndex. 610 /// \p Opcode is relevant for FP induction variable. 611 virtual Value * 612 getStepVector(Value *Val, Value *StartIdx, Value *Step, 613 Instruction::BinaryOps Opcode = Instruction::BinaryOpsEnd); 614 615 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 616 /// variable on which to base the steps, \p Step is the size of the step, and 617 /// \p EntryVal is the value from the original loop that maps to the steps. 618 /// Note that \p EntryVal doesn't have to be an induction variable - it 619 /// can also be a truncate instruction. 620 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 621 const InductionDescriptor &ID, VPValue *Def, 622 VPValue *CastDef, VPTransformState &State); 623 624 /// Create a vector induction phi node based on an existing scalar one. \p 625 /// EntryVal is the value from the original loop that maps to the vector phi 626 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 627 /// truncate instruction, instead of widening the original IV, we widen a 628 /// version of the IV truncated to \p EntryVal's type. 629 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 630 Value *Step, Value *Start, 631 Instruction *EntryVal, VPValue *Def, 632 VPValue *CastDef, 633 VPTransformState &State); 634 635 /// Returns true if an instruction \p I should be scalarized instead of 636 /// vectorized for the chosen vectorization factor. 637 bool shouldScalarizeInstruction(Instruction *I) const; 638 639 /// Returns true if we should generate a scalar version of \p IV. 640 bool needsScalarInduction(Instruction *IV) const; 641 642 /// If there is a cast involved in the induction variable \p ID, which should 643 /// be ignored in the vectorized loop body, this function records the 644 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 645 /// cast. We had already proved that the casted Phi is equal to the uncasted 646 /// Phi in the vectorized loop (under a runtime guard), and therefore 647 /// there is no need to vectorize the cast - the same value can be used in the 648 /// vector loop for both the Phi and the cast. 649 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 650 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 651 /// 652 /// \p EntryVal is the value from the original loop that maps to the vector 653 /// phi node and is used to distinguish what is the IV currently being 654 /// processed - original one (if \p EntryVal is a phi corresponding to the 655 /// original IV) or the "newly-created" one based on the proof mentioned above 656 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 657 /// latter case \p EntryVal is a TruncInst and we must not record anything for 658 /// that IV, but it's error-prone to expect callers of this routine to care 659 /// about that, hence this explicit parameter. 660 void recordVectorLoopValueForInductionCast( 661 const InductionDescriptor &ID, const Instruction *EntryVal, 662 Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, 663 unsigned Part, unsigned Lane = UINT_MAX); 664 665 /// Generate a shuffle sequence that will reverse the vector Vec. 666 virtual Value *reverseVector(Value *Vec); 667 668 /// Returns (and creates if needed) the original loop trip count. 669 Value *getOrCreateTripCount(Loop *NewLoop); 670 671 /// Returns (and creates if needed) the trip count of the widened loop. 672 Value *getOrCreateVectorTripCount(Loop *NewLoop); 673 674 /// Returns a bitcasted value to the requested vector type. 675 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 676 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 677 const DataLayout &DL); 678 679 /// Emit a bypass check to see if the vector trip count is zero, including if 680 /// it overflows. 681 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 682 683 /// Emit a bypass check to see if all of the SCEV assumptions we've 684 /// had to make are correct. Returns the block containing the checks or 685 /// nullptr if no checks have been added. 686 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); 687 688 /// Emit bypass checks to check any memory assumptions we may have made. 689 /// Returns the block containing the checks or nullptr if no checks have been 690 /// added. 691 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 692 693 /// Compute the transformed value of Index at offset StartValue using step 694 /// StepValue. 695 /// For integer induction, returns StartValue + Index * StepValue. 696 /// For pointer induction, returns StartValue[Index * StepValue]. 697 /// FIXME: The newly created binary instructions should contain nsw/nuw 698 /// flags, which can be found from the original scalar operations. 699 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 700 const DataLayout &DL, 701 const InductionDescriptor &ID) const; 702 703 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 704 /// vector loop preheader, middle block and scalar preheader. Also 705 /// allocate a loop object for the new vector loop and return it. 706 Loop *createVectorLoopSkeleton(StringRef Prefix); 707 708 /// Create new phi nodes for the induction variables to resume iteration count 709 /// in the scalar epilogue, from where the vectorized loop left off (given by 710 /// \p VectorTripCount). 711 /// In cases where the loop skeleton is more complicated (eg. epilogue 712 /// vectorization) and the resume values can come from an additional bypass 713 /// block, the \p AdditionalBypass pair provides information about the bypass 714 /// block and the end value on the edge from bypass to this loop. 715 void createInductionResumeValues( 716 Loop *L, Value *VectorTripCount, 717 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 718 719 /// Complete the loop skeleton by adding debug MDs, creating appropriate 720 /// conditional branches in the middle block, preparing the builder and 721 /// running the verifier. Take in the vector loop \p L as argument, and return 722 /// the preheader of the completed vector loop. 723 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 724 725 /// Add additional metadata to \p To that was not present on \p Orig. 726 /// 727 /// Currently this is used to add the noalias annotations based on the 728 /// inserted memchecks. Use this for instructions that are *cloned* into the 729 /// vector loop. 730 void addNewMetadata(Instruction *To, const Instruction *Orig); 731 732 /// Collect poison-generating recipes that may generate a poison value that is 733 /// used after vectorization, even when their operands are not poison. Those 734 /// recipes meet the following conditions: 735 /// * Contribute to the address computation of a recipe generating a widen 736 /// memory load/store (VPWidenMemoryInstructionRecipe or 737 /// VPInterleaveRecipe). 738 /// * Such a widen memory load/store has at least one underlying Instruction 739 /// that is in a basic block that needs predication and after vectorization 740 /// the generated instruction won't be predicated. 741 void collectPoisonGeneratingRecipes(VPTransformState &State); 742 743 /// Allow subclasses to override and print debug traces before/after vplan 744 /// execution, when trace information is requested. 745 virtual void printDebugTracesAtStart(){}; 746 virtual void printDebugTracesAtEnd(){}; 747 748 /// The original loop. 749 Loop *OrigLoop; 750 751 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 752 /// dynamic knowledge to simplify SCEV expressions and converts them to a 753 /// more usable form. 754 PredicatedScalarEvolution &PSE; 755 756 /// Loop Info. 757 LoopInfo *LI; 758 759 /// Dominator Tree. 760 DominatorTree *DT; 761 762 /// Alias Analysis. 763 AAResults *AA; 764 765 /// Target Library Info. 766 const TargetLibraryInfo *TLI; 767 768 /// Target Transform Info. 769 const TargetTransformInfo *TTI; 770 771 /// Assumption Cache. 772 AssumptionCache *AC; 773 774 /// Interface to emit optimization remarks. 775 OptimizationRemarkEmitter *ORE; 776 777 /// LoopVersioning. It's only set up (non-null) if memchecks were 778 /// used. 779 /// 780 /// This is currently only used to add no-alias metadata based on the 781 /// memchecks. The actually versioning is performed manually. 782 std::unique_ptr<LoopVersioning> LVer; 783 784 /// The vectorization SIMD factor to use. Each vector will have this many 785 /// vector elements. 786 ElementCount VF; 787 788 /// The vectorization unroll factor to use. Each scalar is vectorized to this 789 /// many different vector instructions. 790 unsigned UF; 791 792 /// The builder that we use 793 IRBuilder<> Builder; 794 795 // --- Vectorization state --- 796 797 /// The vector-loop preheader. 798 BasicBlock *LoopVectorPreHeader; 799 800 /// The scalar-loop preheader. 801 BasicBlock *LoopScalarPreHeader; 802 803 /// Middle Block between the vector and the scalar. 804 BasicBlock *LoopMiddleBlock; 805 806 /// The unique ExitBlock of the scalar loop if one exists. Note that 807 /// there can be multiple exiting edges reaching this block. 808 BasicBlock *LoopExitBlock; 809 810 /// The vector loop body. 811 BasicBlock *LoopVectorBody; 812 813 /// The scalar loop body. 814 BasicBlock *LoopScalarBody; 815 816 /// A list of all bypass blocks. The first block is the entry of the loop. 817 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 818 819 /// The new Induction variable which was added to the new block. 820 PHINode *Induction = nullptr; 821 822 /// The induction variable of the old basic block. 823 PHINode *OldInduction = nullptr; 824 825 /// Store instructions that were predicated. 826 SmallVector<Instruction *, 4> PredicatedInstructions; 827 828 /// Trip count of the original loop. 829 Value *TripCount = nullptr; 830 831 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 832 Value *VectorTripCount = nullptr; 833 834 /// The legality analysis. 835 LoopVectorizationLegality *Legal; 836 837 /// The profitablity analysis. 838 LoopVectorizationCostModel *Cost; 839 840 // Record whether runtime checks are added. 841 bool AddedSafetyChecks = false; 842 843 // Holds the end values for each induction variable. We save the end values 844 // so we can later fix-up the external users of the induction variables. 845 DenseMap<PHINode *, Value *> IVEndValues; 846 847 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 848 // fixed up at the end of vector code generation. 849 SmallVector<PHINode *, 8> OrigPHIsToFix; 850 851 /// BFI and PSI are used to check for profile guided size optimizations. 852 BlockFrequencyInfo *BFI; 853 ProfileSummaryInfo *PSI; 854 855 // Whether this loop should be optimized for size based on profile guided size 856 // optimizatios. 857 bool OptForSizeBasedOnProfile; 858 859 /// Structure to hold information about generated runtime checks, responsible 860 /// for cleaning the checks, if vectorization turns out unprofitable. 861 GeneratedRTChecks &RTChecks; 862 }; 863 864 class InnerLoopUnroller : public InnerLoopVectorizer { 865 public: 866 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 867 LoopInfo *LI, DominatorTree *DT, 868 const TargetLibraryInfo *TLI, 869 const TargetTransformInfo *TTI, AssumptionCache *AC, 870 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 871 LoopVectorizationLegality *LVL, 872 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 873 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 874 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 875 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 876 BFI, PSI, Check) {} 877 878 private: 879 Value *getBroadcastInstrs(Value *V) override; 880 Value *getStepVector( 881 Value *Val, Value *StartIdx, Value *Step, 882 Instruction::BinaryOps Opcode = Instruction::BinaryOpsEnd) override; 883 Value *reverseVector(Value *Vec) override; 884 }; 885 886 /// Encapsulate information regarding vectorization of a loop and its epilogue. 887 /// This information is meant to be updated and used across two stages of 888 /// epilogue vectorization. 889 struct EpilogueLoopVectorizationInfo { 890 ElementCount MainLoopVF = ElementCount::getFixed(0); 891 unsigned MainLoopUF = 0; 892 ElementCount EpilogueVF = ElementCount::getFixed(0); 893 unsigned EpilogueUF = 0; 894 BasicBlock *MainLoopIterationCountCheck = nullptr; 895 BasicBlock *EpilogueIterationCountCheck = nullptr; 896 BasicBlock *SCEVSafetyCheck = nullptr; 897 BasicBlock *MemSafetyCheck = nullptr; 898 Value *TripCount = nullptr; 899 Value *VectorTripCount = nullptr; 900 901 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 902 ElementCount EVF, unsigned EUF) 903 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 904 assert(EUF == 1 && 905 "A high UF for the epilogue loop is likely not beneficial."); 906 } 907 }; 908 909 /// An extension of the inner loop vectorizer that creates a skeleton for a 910 /// vectorized loop that has its epilogue (residual) also vectorized. 911 /// The idea is to run the vplan on a given loop twice, firstly to setup the 912 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 913 /// from the first step and vectorize the epilogue. This is achieved by 914 /// deriving two concrete strategy classes from this base class and invoking 915 /// them in succession from the loop vectorizer planner. 916 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 917 public: 918 InnerLoopAndEpilogueVectorizer( 919 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 920 DominatorTree *DT, const TargetLibraryInfo *TLI, 921 const TargetTransformInfo *TTI, AssumptionCache *AC, 922 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 923 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 924 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 925 GeneratedRTChecks &Checks) 926 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 927 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 928 Checks), 929 EPI(EPI) {} 930 931 // Override this function to handle the more complex control flow around the 932 // three loops. 933 BasicBlock *createVectorizedLoopSkeleton() final override { 934 return createEpilogueVectorizedLoopSkeleton(); 935 } 936 937 /// The interface for creating a vectorized skeleton using one of two 938 /// different strategies, each corresponding to one execution of the vplan 939 /// as described above. 940 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 941 942 /// Holds and updates state information required to vectorize the main loop 943 /// and its epilogue in two separate passes. This setup helps us avoid 944 /// regenerating and recomputing runtime safety checks. It also helps us to 945 /// shorten the iteration-count-check path length for the cases where the 946 /// iteration count of the loop is so small that the main vector loop is 947 /// completely skipped. 948 EpilogueLoopVectorizationInfo &EPI; 949 }; 950 951 /// A specialized derived class of inner loop vectorizer that performs 952 /// vectorization of *main* loops in the process of vectorizing loops and their 953 /// epilogues. 954 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 955 public: 956 EpilogueVectorizerMainLoop( 957 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 958 DominatorTree *DT, const TargetLibraryInfo *TLI, 959 const TargetTransformInfo *TTI, AssumptionCache *AC, 960 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 961 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 962 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 963 GeneratedRTChecks &Check) 964 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 965 EPI, LVL, CM, BFI, PSI, Check) {} 966 /// Implements the interface for creating a vectorized skeleton using the 967 /// *main loop* strategy (ie the first pass of vplan execution). 968 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 969 970 protected: 971 /// Emits an iteration count bypass check once for the main loop (when \p 972 /// ForEpilogue is false) and once for the epilogue loop (when \p 973 /// ForEpilogue is true). 974 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 975 bool ForEpilogue); 976 void printDebugTracesAtStart() override; 977 void printDebugTracesAtEnd() override; 978 }; 979 980 // A specialized derived class of inner loop vectorizer that performs 981 // vectorization of *epilogue* loops in the process of vectorizing loops and 982 // their epilogues. 983 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 984 public: 985 EpilogueVectorizerEpilogueLoop( 986 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 987 DominatorTree *DT, const TargetLibraryInfo *TLI, 988 const TargetTransformInfo *TTI, AssumptionCache *AC, 989 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 990 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 991 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 992 GeneratedRTChecks &Checks) 993 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 994 EPI, LVL, CM, BFI, PSI, Checks) {} 995 /// Implements the interface for creating a vectorized skeleton using the 996 /// *epilogue loop* strategy (ie the second pass of vplan execution). 997 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 998 999 protected: 1000 /// Emits an iteration count bypass check after the main vector loop has 1001 /// finished to see if there are any iterations left to execute by either 1002 /// the vector epilogue or the scalar epilogue. 1003 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1004 BasicBlock *Bypass, 1005 BasicBlock *Insert); 1006 void printDebugTracesAtStart() override; 1007 void printDebugTracesAtEnd() override; 1008 }; 1009 } // end namespace llvm 1010 1011 /// Look for a meaningful debug location on the instruction or it's 1012 /// operands. 1013 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1014 if (!I) 1015 return I; 1016 1017 DebugLoc Empty; 1018 if (I->getDebugLoc() != Empty) 1019 return I; 1020 1021 for (Use &Op : I->operands()) { 1022 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1023 if (OpInst->getDebugLoc() != Empty) 1024 return OpInst; 1025 } 1026 1027 return I; 1028 } 1029 1030 void InnerLoopVectorizer::setDebugLocFromInst( 1031 const Value *V, Optional<IRBuilder<> *> CustomBuilder) { 1032 IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 1033 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 1034 const DILocation *DIL = Inst->getDebugLoc(); 1035 1036 // When a FSDiscriminator is enabled, we don't need to add the multiply 1037 // factors to the discriminators. 1038 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1039 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 1040 // FIXME: For scalable vectors, assume vscale=1. 1041 auto NewDIL = 1042 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1043 if (NewDIL) 1044 B->SetCurrentDebugLocation(NewDIL.getValue()); 1045 else 1046 LLVM_DEBUG(dbgs() 1047 << "Failed to create new discriminator: " 1048 << DIL->getFilename() << " Line: " << DIL->getLine()); 1049 } else 1050 B->SetCurrentDebugLocation(DIL); 1051 } else 1052 B->SetCurrentDebugLocation(DebugLoc()); 1053 } 1054 1055 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 1056 /// is passed, the message relates to that particular instruction. 1057 #ifndef NDEBUG 1058 static void debugVectorizationMessage(const StringRef Prefix, 1059 const StringRef DebugMsg, 1060 Instruction *I) { 1061 dbgs() << "LV: " << Prefix << DebugMsg; 1062 if (I != nullptr) 1063 dbgs() << " " << *I; 1064 else 1065 dbgs() << '.'; 1066 dbgs() << '\n'; 1067 } 1068 #endif 1069 1070 /// Create an analysis remark that explains why vectorization failed 1071 /// 1072 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1073 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1074 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1075 /// the location of the remark. \return the remark object that can be 1076 /// streamed to. 1077 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1078 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1079 Value *CodeRegion = TheLoop->getHeader(); 1080 DebugLoc DL = TheLoop->getStartLoc(); 1081 1082 if (I) { 1083 CodeRegion = I->getParent(); 1084 // If there is no debug location attached to the instruction, revert back to 1085 // using the loop's. 1086 if (I->getDebugLoc()) 1087 DL = I->getDebugLoc(); 1088 } 1089 1090 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1091 } 1092 1093 /// Return a value for Step multiplied by VF. 1094 static Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF, 1095 int64_t Step) { 1096 assert(Ty->isIntegerTy() && "Expected an integer step"); 1097 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 1098 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1099 } 1100 1101 namespace llvm { 1102 1103 /// Return the runtime value for VF. 1104 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) { 1105 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1106 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1107 } 1108 1109 static Value *getRuntimeVFAsFloat(IRBuilder<> &B, Type *FTy, ElementCount VF) { 1110 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 1111 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 1112 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 1113 return B.CreateUIToFP(RuntimeVF, FTy); 1114 } 1115 1116 void reportVectorizationFailure(const StringRef DebugMsg, 1117 const StringRef OREMsg, const StringRef ORETag, 1118 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1119 Instruction *I) { 1120 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1121 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1122 ORE->emit( 1123 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1124 << "loop not vectorized: " << OREMsg); 1125 } 1126 1127 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1128 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1129 Instruction *I) { 1130 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1131 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1132 ORE->emit( 1133 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1134 << Msg); 1135 } 1136 1137 } // end namespace llvm 1138 1139 #ifndef NDEBUG 1140 /// \return string containing a file name and a line # for the given loop. 1141 static std::string getDebugLocString(const Loop *L) { 1142 std::string Result; 1143 if (L) { 1144 raw_string_ostream OS(Result); 1145 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1146 LoopDbgLoc.print(OS); 1147 else 1148 // Just print the module name. 1149 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1150 OS.flush(); 1151 } 1152 return Result; 1153 } 1154 #endif 1155 1156 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1157 const Instruction *Orig) { 1158 // If the loop was versioned with memchecks, add the corresponding no-alias 1159 // metadata. 1160 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1161 LVer->annotateInstWithNoAlias(To, Orig); 1162 } 1163 1164 void InnerLoopVectorizer::collectPoisonGeneratingRecipes( 1165 VPTransformState &State) { 1166 1167 // Collect recipes in the backward slice of `Root` that may generate a poison 1168 // value that is used after vectorization. 1169 SmallPtrSet<VPRecipeBase *, 16> Visited; 1170 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1171 SmallVector<VPRecipeBase *, 16> Worklist; 1172 Worklist.push_back(Root); 1173 1174 // Traverse the backward slice of Root through its use-def chain. 1175 while (!Worklist.empty()) { 1176 VPRecipeBase *CurRec = Worklist.back(); 1177 Worklist.pop_back(); 1178 1179 if (!Visited.insert(CurRec).second) 1180 continue; 1181 1182 // Prune search if we find another recipe generating a widen memory 1183 // instruction. Widen memory instructions involved in address computation 1184 // will lead to gather/scatter instructions, which don't need to be 1185 // handled. 1186 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) || 1187 isa<VPInterleaveRecipe>(CurRec)) 1188 continue; 1189 1190 // This recipe contributes to the address computation of a widen 1191 // load/store. Collect recipe if its underlying instruction has 1192 // poison-generating flags. 1193 Instruction *Instr = CurRec->getUnderlyingInstr(); 1194 if (Instr && Instr->hasPoisonGeneratingFlags()) 1195 State.MayGeneratePoisonRecipes.insert(CurRec); 1196 1197 // Add new definitions to the worklist. 1198 for (VPValue *operand : CurRec->operands()) 1199 if (VPDef *OpDef = operand->getDef()) 1200 Worklist.push_back(cast<VPRecipeBase>(OpDef)); 1201 } 1202 }); 1203 1204 // Traverse all the recipes in the VPlan and collect the poison-generating 1205 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1206 // VPInterleaveRecipe. 1207 auto Iter = depth_first( 1208 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry())); 1209 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1210 for (VPRecipeBase &Recipe : *VPBB) { 1211 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) { 1212 Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr(); 1213 VPDef *AddrDef = WidenRec->getAddr()->getDef(); 1214 if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr && 1215 Legal->blockNeedsPredication(UnderlyingInstr->getParent())) 1216 collectPoisonGeneratingInstrsInBackwardSlice( 1217 cast<VPRecipeBase>(AddrDef)); 1218 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1219 VPDef *AddrDef = InterleaveRec->getAddr()->getDef(); 1220 if (AddrDef) { 1221 // Check if any member of the interleave group needs predication. 1222 const InterleaveGroup<Instruction> *InterGroup = 1223 InterleaveRec->getInterleaveGroup(); 1224 bool NeedPredication = false; 1225 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1226 I < NumMembers; ++I) { 1227 Instruction *Member = InterGroup->getMember(I); 1228 if (Member) 1229 NeedPredication |= 1230 Legal->blockNeedsPredication(Member->getParent()); 1231 } 1232 1233 if (NeedPredication) 1234 collectPoisonGeneratingInstrsInBackwardSlice( 1235 cast<VPRecipeBase>(AddrDef)); 1236 } 1237 } 1238 } 1239 } 1240 } 1241 1242 void InnerLoopVectorizer::addMetadata(Instruction *To, 1243 Instruction *From) { 1244 propagateMetadata(To, From); 1245 addNewMetadata(To, From); 1246 } 1247 1248 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1249 Instruction *From) { 1250 for (Value *V : To) { 1251 if (Instruction *I = dyn_cast<Instruction>(V)) 1252 addMetadata(I, From); 1253 } 1254 } 1255 1256 namespace llvm { 1257 1258 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1259 // lowered. 1260 enum ScalarEpilogueLowering { 1261 1262 // The default: allowing scalar epilogues. 1263 CM_ScalarEpilogueAllowed, 1264 1265 // Vectorization with OptForSize: don't allow epilogues. 1266 CM_ScalarEpilogueNotAllowedOptSize, 1267 1268 // A special case of vectorisation with OptForSize: loops with a very small 1269 // trip count are considered for vectorization under OptForSize, thereby 1270 // making sure the cost of their loop body is dominant, free of runtime 1271 // guards and scalar iteration overheads. 1272 CM_ScalarEpilogueNotAllowedLowTripLoop, 1273 1274 // Loop hint predicate indicating an epilogue is undesired. 1275 CM_ScalarEpilogueNotNeededUsePredicate, 1276 1277 // Directive indicating we must either tail fold or not vectorize 1278 CM_ScalarEpilogueNotAllowedUsePredicate 1279 }; 1280 1281 /// ElementCountComparator creates a total ordering for ElementCount 1282 /// for the purposes of using it in a set structure. 1283 struct ElementCountComparator { 1284 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1285 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1286 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1287 } 1288 }; 1289 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1290 1291 /// LoopVectorizationCostModel - estimates the expected speedups due to 1292 /// vectorization. 1293 /// In many cases vectorization is not profitable. This can happen because of 1294 /// a number of reasons. In this class we mainly attempt to predict the 1295 /// expected speedup/slowdowns due to the supported instruction set. We use the 1296 /// TargetTransformInfo to query the different backends for the cost of 1297 /// different operations. 1298 class LoopVectorizationCostModel { 1299 public: 1300 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1301 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1302 LoopVectorizationLegality *Legal, 1303 const TargetTransformInfo &TTI, 1304 const TargetLibraryInfo *TLI, DemandedBits *DB, 1305 AssumptionCache *AC, 1306 OptimizationRemarkEmitter *ORE, const Function *F, 1307 const LoopVectorizeHints *Hints, 1308 InterleavedAccessInfo &IAI) 1309 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1310 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1311 Hints(Hints), InterleaveInfo(IAI) {} 1312 1313 /// \return An upper bound for the vectorization factors (both fixed and 1314 /// scalable). If the factors are 0, vectorization and interleaving should be 1315 /// avoided up front. 1316 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1317 1318 /// \return True if runtime checks are required for vectorization, and false 1319 /// otherwise. 1320 bool runtimeChecksRequired(); 1321 1322 /// \return The most profitable vectorization factor and the cost of that VF. 1323 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1324 /// then this vectorization factor will be selected if vectorization is 1325 /// possible. 1326 VectorizationFactor 1327 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1328 1329 VectorizationFactor 1330 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1331 const LoopVectorizationPlanner &LVP); 1332 1333 /// Setup cost-based decisions for user vectorization factor. 1334 /// \return true if the UserVF is a feasible VF to be chosen. 1335 bool selectUserVectorizationFactor(ElementCount UserVF) { 1336 collectUniformsAndScalars(UserVF); 1337 collectInstsToScalarize(UserVF); 1338 return expectedCost(UserVF).first.isValid(); 1339 } 1340 1341 /// \return The size (in bits) of the smallest and widest types in the code 1342 /// that needs to be vectorized. We ignore values that remain scalar such as 1343 /// 64 bit loop indices. 1344 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1345 1346 /// \return The desired interleave count. 1347 /// If interleave count has been specified by metadata it will be returned. 1348 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1349 /// are the selected vectorization factor and the cost of the selected VF. 1350 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1351 1352 /// Memory access instruction may be vectorized in more than one way. 1353 /// Form of instruction after vectorization depends on cost. 1354 /// This function takes cost-based decisions for Load/Store instructions 1355 /// and collects them in a map. This decisions map is used for building 1356 /// the lists of loop-uniform and loop-scalar instructions. 1357 /// The calculated cost is saved with widening decision in order to 1358 /// avoid redundant calculations. 1359 void setCostBasedWideningDecision(ElementCount VF); 1360 1361 /// A struct that represents some properties of the register usage 1362 /// of a loop. 1363 struct RegisterUsage { 1364 /// Holds the number of loop invariant values that are used in the loop. 1365 /// The key is ClassID of target-provided register class. 1366 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1367 /// Holds the maximum number of concurrent live intervals in the loop. 1368 /// The key is ClassID of target-provided register class. 1369 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1370 }; 1371 1372 /// \return Returns information about the register usages of the loop for the 1373 /// given vectorization factors. 1374 SmallVector<RegisterUsage, 8> 1375 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1376 1377 /// Collect values we want to ignore in the cost model. 1378 void collectValuesToIgnore(); 1379 1380 /// Collect all element types in the loop for which widening is needed. 1381 void collectElementTypesForWidening(); 1382 1383 /// Split reductions into those that happen in the loop, and those that happen 1384 /// outside. In loop reductions are collected into InLoopReductionChains. 1385 void collectInLoopReductions(); 1386 1387 /// Returns true if we should use strict in-order reductions for the given 1388 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1389 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1390 /// of FP operations. 1391 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1392 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1393 } 1394 1395 /// \returns The smallest bitwidth each instruction can be represented with. 1396 /// The vector equivalents of these instructions should be truncated to this 1397 /// type. 1398 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1399 return MinBWs; 1400 } 1401 1402 /// \returns True if it is more profitable to scalarize instruction \p I for 1403 /// vectorization factor \p VF. 1404 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1405 assert(VF.isVector() && 1406 "Profitable to scalarize relevant only for VF > 1."); 1407 1408 // Cost model is not run in the VPlan-native path - return conservative 1409 // result until this changes. 1410 if (EnableVPlanNativePath) 1411 return false; 1412 1413 auto Scalars = InstsToScalarize.find(VF); 1414 assert(Scalars != InstsToScalarize.end() && 1415 "VF not yet analyzed for scalarization profitability"); 1416 return Scalars->second.find(I) != Scalars->second.end(); 1417 } 1418 1419 /// Returns true if \p I is known to be uniform after vectorization. 1420 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1421 if (VF.isScalar()) 1422 return true; 1423 1424 // Cost model is not run in the VPlan-native path - return conservative 1425 // result until this changes. 1426 if (EnableVPlanNativePath) 1427 return false; 1428 1429 auto UniformsPerVF = Uniforms.find(VF); 1430 assert(UniformsPerVF != Uniforms.end() && 1431 "VF not yet analyzed for uniformity"); 1432 return UniformsPerVF->second.count(I); 1433 } 1434 1435 /// Returns true if \p I is known to be scalar after vectorization. 1436 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1437 if (VF.isScalar()) 1438 return true; 1439 1440 // Cost model is not run in the VPlan-native path - return conservative 1441 // result until this changes. 1442 if (EnableVPlanNativePath) 1443 return false; 1444 1445 auto ScalarsPerVF = Scalars.find(VF); 1446 assert(ScalarsPerVF != Scalars.end() && 1447 "Scalar values are not calculated for VF"); 1448 return ScalarsPerVF->second.count(I); 1449 } 1450 1451 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1452 /// for vectorization factor \p VF. 1453 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1454 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1455 !isProfitableToScalarize(I, VF) && 1456 !isScalarAfterVectorization(I, VF); 1457 } 1458 1459 /// Decision that was taken during cost calculation for memory instruction. 1460 enum InstWidening { 1461 CM_Unknown, 1462 CM_Widen, // For consecutive accesses with stride +1. 1463 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1464 CM_Interleave, 1465 CM_GatherScatter, 1466 CM_Scalarize 1467 }; 1468 1469 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1470 /// instruction \p I and vector width \p VF. 1471 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1472 InstructionCost Cost) { 1473 assert(VF.isVector() && "Expected VF >=2"); 1474 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1475 } 1476 1477 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1478 /// interleaving group \p Grp and vector width \p VF. 1479 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1480 ElementCount VF, InstWidening W, 1481 InstructionCost Cost) { 1482 assert(VF.isVector() && "Expected VF >=2"); 1483 /// Broadcast this decicion to all instructions inside the group. 1484 /// But the cost will be assigned to one instruction only. 1485 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1486 if (auto *I = Grp->getMember(i)) { 1487 if (Grp->getInsertPos() == I) 1488 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1489 else 1490 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1491 } 1492 } 1493 } 1494 1495 /// Return the cost model decision for the given instruction \p I and vector 1496 /// width \p VF. Return CM_Unknown if this instruction did not pass 1497 /// through the cost modeling. 1498 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1499 assert(VF.isVector() && "Expected VF to be a vector VF"); 1500 // Cost model is not run in the VPlan-native path - return conservative 1501 // result until this changes. 1502 if (EnableVPlanNativePath) 1503 return CM_GatherScatter; 1504 1505 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1506 auto Itr = WideningDecisions.find(InstOnVF); 1507 if (Itr == WideningDecisions.end()) 1508 return CM_Unknown; 1509 return Itr->second.first; 1510 } 1511 1512 /// Return the vectorization cost for the given instruction \p I and vector 1513 /// width \p VF. 1514 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1515 assert(VF.isVector() && "Expected VF >=2"); 1516 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1517 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1518 "The cost is not calculated"); 1519 return WideningDecisions[InstOnVF].second; 1520 } 1521 1522 /// Return True if instruction \p I is an optimizable truncate whose operand 1523 /// is an induction variable. Such a truncate will be removed by adding a new 1524 /// induction variable with the destination type. 1525 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1526 // If the instruction is not a truncate, return false. 1527 auto *Trunc = dyn_cast<TruncInst>(I); 1528 if (!Trunc) 1529 return false; 1530 1531 // Get the source and destination types of the truncate. 1532 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1533 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1534 1535 // If the truncate is free for the given types, return false. Replacing a 1536 // free truncate with an induction variable would add an induction variable 1537 // update instruction to each iteration of the loop. We exclude from this 1538 // check the primary induction variable since it will need an update 1539 // instruction regardless. 1540 Value *Op = Trunc->getOperand(0); 1541 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1542 return false; 1543 1544 // If the truncated value is not an induction variable, return false. 1545 return Legal->isInductionPhi(Op); 1546 } 1547 1548 /// Collects the instructions to scalarize for each predicated instruction in 1549 /// the loop. 1550 void collectInstsToScalarize(ElementCount VF); 1551 1552 /// Collect Uniform and Scalar values for the given \p VF. 1553 /// The sets depend on CM decision for Load/Store instructions 1554 /// that may be vectorized as interleave, gather-scatter or scalarized. 1555 void collectUniformsAndScalars(ElementCount VF) { 1556 // Do the analysis once. 1557 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1558 return; 1559 setCostBasedWideningDecision(VF); 1560 collectLoopUniforms(VF); 1561 collectLoopScalars(VF); 1562 } 1563 1564 /// Returns true if the target machine supports masked store operation 1565 /// for the given \p DataType and kind of access to \p Ptr. 1566 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1567 return Legal->isConsecutivePtr(DataType, Ptr) && 1568 TTI.isLegalMaskedStore(DataType, Alignment); 1569 } 1570 1571 /// Returns true if the target machine supports masked load operation 1572 /// for the given \p DataType and kind of access to \p Ptr. 1573 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1574 return Legal->isConsecutivePtr(DataType, Ptr) && 1575 TTI.isLegalMaskedLoad(DataType, Alignment); 1576 } 1577 1578 /// Returns true if the target machine can represent \p V as a masked gather 1579 /// or scatter operation. 1580 bool isLegalGatherOrScatter(Value *V) { 1581 bool LI = isa<LoadInst>(V); 1582 bool SI = isa<StoreInst>(V); 1583 if (!LI && !SI) 1584 return false; 1585 auto *Ty = getLoadStoreType(V); 1586 Align Align = getLoadStoreAlignment(V); 1587 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1588 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1589 } 1590 1591 /// Returns true if the target machine supports all of the reduction 1592 /// variables found for the given VF. 1593 bool canVectorizeReductions(ElementCount VF) const { 1594 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1595 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1596 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1597 })); 1598 } 1599 1600 /// Returns true if \p I is an instruction that will be scalarized with 1601 /// predication. Such instructions include conditional stores and 1602 /// instructions that may divide by zero. 1603 /// If a non-zero VF has been calculated, we check if I will be scalarized 1604 /// predication for that VF. 1605 bool isScalarWithPredication(Instruction *I) const; 1606 1607 // Returns true if \p I is an instruction that will be predicated either 1608 // through scalar predication or masked load/store or masked gather/scatter. 1609 // Superset of instructions that return true for isScalarWithPredication. 1610 bool isPredicatedInst(Instruction *I, bool IsKnownUniform = false) { 1611 // When we know the load is uniform and the original scalar loop was not 1612 // predicated we don't need to mark it as a predicated instruction. Any 1613 // vectorised blocks created when tail-folding are something artificial we 1614 // have introduced and we know there is always at least one active lane. 1615 // That's why we call Legal->blockNeedsPredication here because it doesn't 1616 // query tail-folding. 1617 if (IsKnownUniform && isa<LoadInst>(I) && 1618 !Legal->blockNeedsPredication(I->getParent())) 1619 return false; 1620 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1621 return false; 1622 // Loads and stores that need some form of masked operation are predicated 1623 // instructions. 1624 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1625 return Legal->isMaskRequired(I); 1626 return isScalarWithPredication(I); 1627 } 1628 1629 /// Returns true if \p I is a memory instruction with consecutive memory 1630 /// access that can be widened. 1631 bool 1632 memoryInstructionCanBeWidened(Instruction *I, 1633 ElementCount VF = ElementCount::getFixed(1)); 1634 1635 /// Returns true if \p I is a memory instruction in an interleaved-group 1636 /// of memory accesses that can be vectorized with wide vector loads/stores 1637 /// and shuffles. 1638 bool 1639 interleavedAccessCanBeWidened(Instruction *I, 1640 ElementCount VF = ElementCount::getFixed(1)); 1641 1642 /// Check if \p Instr belongs to any interleaved access group. 1643 bool isAccessInterleaved(Instruction *Instr) { 1644 return InterleaveInfo.isInterleaved(Instr); 1645 } 1646 1647 /// Get the interleaved access group that \p Instr belongs to. 1648 const InterleaveGroup<Instruction> * 1649 getInterleavedAccessGroup(Instruction *Instr) { 1650 return InterleaveInfo.getInterleaveGroup(Instr); 1651 } 1652 1653 /// Returns true if we're required to use a scalar epilogue for at least 1654 /// the final iteration of the original loop. 1655 bool requiresScalarEpilogue(ElementCount VF) const { 1656 if (!isScalarEpilogueAllowed()) 1657 return false; 1658 // If we might exit from anywhere but the latch, must run the exiting 1659 // iteration in scalar form. 1660 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1661 return true; 1662 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1663 } 1664 1665 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1666 /// loop hint annotation. 1667 bool isScalarEpilogueAllowed() const { 1668 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1669 } 1670 1671 /// Returns true if all loop blocks should be masked to fold tail loop. 1672 bool foldTailByMasking() const { return FoldTailByMasking; } 1673 1674 /// Returns true if the instructions in this block requires predication 1675 /// for any reason, e.g. because tail folding now requires a predicate 1676 /// or because the block in the original loop was predicated. 1677 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1678 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1679 } 1680 1681 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1682 /// nodes to the chain of instructions representing the reductions. Uses a 1683 /// MapVector to ensure deterministic iteration order. 1684 using ReductionChainMap = 1685 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1686 1687 /// Return the chain of instructions representing an inloop reduction. 1688 const ReductionChainMap &getInLoopReductionChains() const { 1689 return InLoopReductionChains; 1690 } 1691 1692 /// Returns true if the Phi is part of an inloop reduction. 1693 bool isInLoopReduction(PHINode *Phi) const { 1694 return InLoopReductionChains.count(Phi); 1695 } 1696 1697 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1698 /// with factor VF. Return the cost of the instruction, including 1699 /// scalarization overhead if it's needed. 1700 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1701 1702 /// Estimate cost of a call instruction CI if it were vectorized with factor 1703 /// VF. Return the cost of the instruction, including scalarization overhead 1704 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1705 /// scalarized - 1706 /// i.e. either vector version isn't available, or is too expensive. 1707 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1708 bool &NeedToScalarize) const; 1709 1710 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1711 /// that of B. 1712 bool isMoreProfitable(const VectorizationFactor &A, 1713 const VectorizationFactor &B) const; 1714 1715 /// Invalidates decisions already taken by the cost model. 1716 void invalidateCostModelingDecisions() { 1717 WideningDecisions.clear(); 1718 Uniforms.clear(); 1719 Scalars.clear(); 1720 } 1721 1722 private: 1723 unsigned NumPredStores = 0; 1724 1725 /// \return An upper bound for the vectorization factors for both 1726 /// fixed and scalable vectorization, where the minimum-known number of 1727 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1728 /// disabled or unsupported, then the scalable part will be equal to 1729 /// ElementCount::getScalable(0). 1730 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1731 ElementCount UserVF); 1732 1733 /// \return the maximized element count based on the targets vector 1734 /// registers and the loop trip-count, but limited to a maximum safe VF. 1735 /// This is a helper function of computeFeasibleMaxVF. 1736 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1737 /// issue that occurred on one of the buildbots which cannot be reproduced 1738 /// without having access to the properietary compiler (see comments on 1739 /// D98509). The issue is currently under investigation and this workaround 1740 /// will be removed as soon as possible. 1741 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1742 unsigned SmallestType, 1743 unsigned WidestType, 1744 const ElementCount &MaxSafeVF); 1745 1746 /// \return the maximum legal scalable VF, based on the safe max number 1747 /// of elements. 1748 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1749 1750 /// The vectorization cost is a combination of the cost itself and a boolean 1751 /// indicating whether any of the contributing operations will actually 1752 /// operate on vector values after type legalization in the backend. If this 1753 /// latter value is false, then all operations will be scalarized (i.e. no 1754 /// vectorization has actually taken place). 1755 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1756 1757 /// Returns the expected execution cost. The unit of the cost does 1758 /// not matter because we use the 'cost' units to compare different 1759 /// vector widths. The cost that is returned is *not* normalized by 1760 /// the factor width. If \p Invalid is not nullptr, this function 1761 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1762 /// each instruction that has an Invalid cost for the given VF. 1763 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1764 VectorizationCostTy 1765 expectedCost(ElementCount VF, 1766 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1767 1768 /// Returns the execution time cost of an instruction for a given vector 1769 /// width. Vector width of one means scalar. 1770 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1771 1772 /// The cost-computation logic from getInstructionCost which provides 1773 /// the vector type as an output parameter. 1774 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1775 Type *&VectorTy); 1776 1777 /// Return the cost of instructions in an inloop reduction pattern, if I is 1778 /// part of that pattern. 1779 Optional<InstructionCost> 1780 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1781 TTI::TargetCostKind CostKind); 1782 1783 /// Calculate vectorization cost of memory instruction \p I. 1784 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1785 1786 /// The cost computation for scalarized memory instruction. 1787 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1788 1789 /// The cost computation for interleaving group of memory instructions. 1790 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1791 1792 /// The cost computation for Gather/Scatter instruction. 1793 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1794 1795 /// The cost computation for widening instruction \p I with consecutive 1796 /// memory access. 1797 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1798 1799 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1800 /// Load: scalar load + broadcast. 1801 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1802 /// element) 1803 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1804 1805 /// Estimate the overhead of scalarizing an instruction. This is a 1806 /// convenience wrapper for the type-based getScalarizationOverhead API. 1807 InstructionCost getScalarizationOverhead(Instruction *I, 1808 ElementCount VF) const; 1809 1810 /// Returns whether the instruction is a load or store and will be a emitted 1811 /// as a vector operation. 1812 bool isConsecutiveLoadOrStore(Instruction *I); 1813 1814 /// Returns true if an artificially high cost for emulated masked memrefs 1815 /// should be used. 1816 bool useEmulatedMaskMemRefHack(Instruction *I); 1817 1818 /// Map of scalar integer values to the smallest bitwidth they can be legally 1819 /// represented as. The vector equivalents of these values should be truncated 1820 /// to this type. 1821 MapVector<Instruction *, uint64_t> MinBWs; 1822 1823 /// A type representing the costs for instructions if they were to be 1824 /// scalarized rather than vectorized. The entries are Instruction-Cost 1825 /// pairs. 1826 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1827 1828 /// A set containing all BasicBlocks that are known to present after 1829 /// vectorization as a predicated block. 1830 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1831 1832 /// Records whether it is allowed to have the original scalar loop execute at 1833 /// least once. This may be needed as a fallback loop in case runtime 1834 /// aliasing/dependence checks fail, or to handle the tail/remainder 1835 /// iterations when the trip count is unknown or doesn't divide by the VF, 1836 /// or as a peel-loop to handle gaps in interleave-groups. 1837 /// Under optsize and when the trip count is very small we don't allow any 1838 /// iterations to execute in the scalar loop. 1839 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1840 1841 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1842 bool FoldTailByMasking = false; 1843 1844 /// A map holding scalar costs for different vectorization factors. The 1845 /// presence of a cost for an instruction in the mapping indicates that the 1846 /// instruction will be scalarized when vectorizing with the associated 1847 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1848 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1849 1850 /// Holds the instructions known to be uniform after vectorization. 1851 /// The data is collected per VF. 1852 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1853 1854 /// Holds the instructions known to be scalar after vectorization. 1855 /// The data is collected per VF. 1856 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1857 1858 /// Holds the instructions (address computations) that are forced to be 1859 /// scalarized. 1860 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1861 1862 /// PHINodes of the reductions that should be expanded in-loop along with 1863 /// their associated chains of reduction operations, in program order from top 1864 /// (PHI) to bottom 1865 ReductionChainMap InLoopReductionChains; 1866 1867 /// A Map of inloop reduction operations and their immediate chain operand. 1868 /// FIXME: This can be removed once reductions can be costed correctly in 1869 /// vplan. This was added to allow quick lookup to the inloop operations, 1870 /// without having to loop through InLoopReductionChains. 1871 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1872 1873 /// Returns the expected difference in cost from scalarizing the expression 1874 /// feeding a predicated instruction \p PredInst. The instructions to 1875 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1876 /// non-negative return value implies the expression will be scalarized. 1877 /// Currently, only single-use chains are considered for scalarization. 1878 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1879 ElementCount VF); 1880 1881 /// Collect the instructions that are uniform after vectorization. An 1882 /// instruction is uniform if we represent it with a single scalar value in 1883 /// the vectorized loop corresponding to each vector iteration. Examples of 1884 /// uniform instructions include pointer operands of consecutive or 1885 /// interleaved memory accesses. Note that although uniformity implies an 1886 /// instruction will be scalar, the reverse is not true. In general, a 1887 /// scalarized instruction will be represented by VF scalar values in the 1888 /// vectorized loop, each corresponding to an iteration of the original 1889 /// scalar loop. 1890 void collectLoopUniforms(ElementCount VF); 1891 1892 /// Collect the instructions that are scalar after vectorization. An 1893 /// instruction is scalar if it is known to be uniform or will be scalarized 1894 /// during vectorization. collectLoopScalars should only add non-uniform nodes 1895 /// to the list if they are used by a load/store instruction that is marked as 1896 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by 1897 /// VF values in the vectorized loop, each corresponding to an iteration of 1898 /// the original scalar loop. 1899 void collectLoopScalars(ElementCount VF); 1900 1901 /// Keeps cost model vectorization decision and cost for instructions. 1902 /// Right now it is used for memory instructions only. 1903 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1904 std::pair<InstWidening, InstructionCost>>; 1905 1906 DecisionList WideningDecisions; 1907 1908 /// Returns true if \p V is expected to be vectorized and it needs to be 1909 /// extracted. 1910 bool needsExtract(Value *V, ElementCount VF) const { 1911 Instruction *I = dyn_cast<Instruction>(V); 1912 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1913 TheLoop->isLoopInvariant(I)) 1914 return false; 1915 1916 // Assume we can vectorize V (and hence we need extraction) if the 1917 // scalars are not computed yet. This can happen, because it is called 1918 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1919 // the scalars are collected. That should be a safe assumption in most 1920 // cases, because we check if the operands have vectorizable types 1921 // beforehand in LoopVectorizationLegality. 1922 return Scalars.find(VF) == Scalars.end() || 1923 !isScalarAfterVectorization(I, VF); 1924 }; 1925 1926 /// Returns a range containing only operands needing to be extracted. 1927 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1928 ElementCount VF) const { 1929 return SmallVector<Value *, 4>(make_filter_range( 1930 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1931 } 1932 1933 /// Determines if we have the infrastructure to vectorize loop \p L and its 1934 /// epilogue, assuming the main loop is vectorized by \p VF. 1935 bool isCandidateForEpilogueVectorization(const Loop &L, 1936 const ElementCount VF) const; 1937 1938 /// Returns true if epilogue vectorization is considered profitable, and 1939 /// false otherwise. 1940 /// \p VF is the vectorization factor chosen for the original loop. 1941 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1942 1943 public: 1944 /// The loop that we evaluate. 1945 Loop *TheLoop; 1946 1947 /// Predicated scalar evolution analysis. 1948 PredicatedScalarEvolution &PSE; 1949 1950 /// Loop Info analysis. 1951 LoopInfo *LI; 1952 1953 /// Vectorization legality. 1954 LoopVectorizationLegality *Legal; 1955 1956 /// Vector target information. 1957 const TargetTransformInfo &TTI; 1958 1959 /// Target Library Info. 1960 const TargetLibraryInfo *TLI; 1961 1962 /// Demanded bits analysis. 1963 DemandedBits *DB; 1964 1965 /// Assumption cache. 1966 AssumptionCache *AC; 1967 1968 /// Interface to emit optimization remarks. 1969 OptimizationRemarkEmitter *ORE; 1970 1971 const Function *TheFunction; 1972 1973 /// Loop Vectorize Hint. 1974 const LoopVectorizeHints *Hints; 1975 1976 /// The interleave access information contains groups of interleaved accesses 1977 /// with the same stride and close to each other. 1978 InterleavedAccessInfo &InterleaveInfo; 1979 1980 /// Values to ignore in the cost model. 1981 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1982 1983 /// Values to ignore in the cost model when VF > 1. 1984 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1985 1986 /// All element types found in the loop. 1987 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1988 1989 /// Profitable vector factors. 1990 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1991 }; 1992 } // end namespace llvm 1993 1994 /// Helper struct to manage generating runtime checks for vectorization. 1995 /// 1996 /// The runtime checks are created up-front in temporary blocks to allow better 1997 /// estimating the cost and un-linked from the existing IR. After deciding to 1998 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1999 /// temporary blocks are completely removed. 2000 class GeneratedRTChecks { 2001 /// Basic block which contains the generated SCEV checks, if any. 2002 BasicBlock *SCEVCheckBlock = nullptr; 2003 2004 /// The value representing the result of the generated SCEV checks. If it is 2005 /// nullptr, either no SCEV checks have been generated or they have been used. 2006 Value *SCEVCheckCond = nullptr; 2007 2008 /// Basic block which contains the generated memory runtime checks, if any. 2009 BasicBlock *MemCheckBlock = nullptr; 2010 2011 /// The value representing the result of the generated memory runtime checks. 2012 /// If it is nullptr, either no memory runtime checks have been generated or 2013 /// they have been used. 2014 Value *MemRuntimeCheckCond = nullptr; 2015 2016 DominatorTree *DT; 2017 LoopInfo *LI; 2018 2019 SCEVExpander SCEVExp; 2020 SCEVExpander MemCheckExp; 2021 2022 public: 2023 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 2024 const DataLayout &DL) 2025 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 2026 MemCheckExp(SE, DL, "scev.check") {} 2027 2028 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 2029 /// accurately estimate the cost of the runtime checks. The blocks are 2030 /// un-linked from the IR and is added back during vector code generation. If 2031 /// there is no vector code generation, the check blocks are removed 2032 /// completely. 2033 void Create(Loop *L, const LoopAccessInfo &LAI, 2034 const SCEVUnionPredicate &UnionPred) { 2035 2036 BasicBlock *LoopHeader = L->getHeader(); 2037 BasicBlock *Preheader = L->getLoopPreheader(); 2038 2039 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 2040 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 2041 // may be used by SCEVExpander. The blocks will be un-linked from their 2042 // predecessors and removed from LI & DT at the end of the function. 2043 if (!UnionPred.isAlwaysTrue()) { 2044 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 2045 nullptr, "vector.scevcheck"); 2046 2047 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 2048 &UnionPred, SCEVCheckBlock->getTerminator()); 2049 } 2050 2051 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 2052 if (RtPtrChecking.Need) { 2053 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 2054 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 2055 "vector.memcheck"); 2056 2057 MemRuntimeCheckCond = 2058 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 2059 RtPtrChecking.getChecks(), MemCheckExp); 2060 assert(MemRuntimeCheckCond && 2061 "no RT checks generated although RtPtrChecking " 2062 "claimed checks are required"); 2063 } 2064 2065 if (!MemCheckBlock && !SCEVCheckBlock) 2066 return; 2067 2068 // Unhook the temporary block with the checks, update various places 2069 // accordingly. 2070 if (SCEVCheckBlock) 2071 SCEVCheckBlock->replaceAllUsesWith(Preheader); 2072 if (MemCheckBlock) 2073 MemCheckBlock->replaceAllUsesWith(Preheader); 2074 2075 if (SCEVCheckBlock) { 2076 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2077 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 2078 Preheader->getTerminator()->eraseFromParent(); 2079 } 2080 if (MemCheckBlock) { 2081 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2082 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2083 Preheader->getTerminator()->eraseFromParent(); 2084 } 2085 2086 DT->changeImmediateDominator(LoopHeader, Preheader); 2087 if (MemCheckBlock) { 2088 DT->eraseNode(MemCheckBlock); 2089 LI->removeBlock(MemCheckBlock); 2090 } 2091 if (SCEVCheckBlock) { 2092 DT->eraseNode(SCEVCheckBlock); 2093 LI->removeBlock(SCEVCheckBlock); 2094 } 2095 } 2096 2097 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2098 /// unused. 2099 ~GeneratedRTChecks() { 2100 SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT); 2101 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT); 2102 if (!SCEVCheckCond) 2103 SCEVCleaner.markResultUsed(); 2104 2105 if (!MemRuntimeCheckCond) 2106 MemCheckCleaner.markResultUsed(); 2107 2108 if (MemRuntimeCheckCond) { 2109 auto &SE = *MemCheckExp.getSE(); 2110 // Memory runtime check generation creates compares that use expanded 2111 // values. Remove them before running the SCEVExpanderCleaners. 2112 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2113 if (MemCheckExp.isInsertedInstruction(&I)) 2114 continue; 2115 SE.forgetValue(&I); 2116 I.eraseFromParent(); 2117 } 2118 } 2119 MemCheckCleaner.cleanup(); 2120 SCEVCleaner.cleanup(); 2121 2122 if (SCEVCheckCond) 2123 SCEVCheckBlock->eraseFromParent(); 2124 if (MemRuntimeCheckCond) 2125 MemCheckBlock->eraseFromParent(); 2126 } 2127 2128 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2129 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2130 /// depending on the generated condition. 2131 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, 2132 BasicBlock *LoopVectorPreHeader, 2133 BasicBlock *LoopExitBlock) { 2134 if (!SCEVCheckCond) 2135 return nullptr; 2136 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2137 if (C->isZero()) 2138 return nullptr; 2139 2140 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2141 2142 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2143 // Create new preheader for vector loop. 2144 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2145 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2146 2147 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2148 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2149 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2150 SCEVCheckBlock); 2151 2152 DT->addNewBlock(SCEVCheckBlock, Pred); 2153 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2154 2155 ReplaceInstWithInst( 2156 SCEVCheckBlock->getTerminator(), 2157 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2158 // Mark the check as used, to prevent it from being removed during cleanup. 2159 SCEVCheckCond = nullptr; 2160 return SCEVCheckBlock; 2161 } 2162 2163 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2164 /// the branches to branch to the vector preheader or \p Bypass, depending on 2165 /// the generated condition. 2166 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, 2167 BasicBlock *LoopVectorPreHeader) { 2168 // Check if we generated code that checks in runtime if arrays overlap. 2169 if (!MemRuntimeCheckCond) 2170 return nullptr; 2171 2172 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2173 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2174 MemCheckBlock); 2175 2176 DT->addNewBlock(MemCheckBlock, Pred); 2177 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2178 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2179 2180 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2181 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2182 2183 ReplaceInstWithInst( 2184 MemCheckBlock->getTerminator(), 2185 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2186 MemCheckBlock->getTerminator()->setDebugLoc( 2187 Pred->getTerminator()->getDebugLoc()); 2188 2189 // Mark the check as used, to prevent it from being removed during cleanup. 2190 MemRuntimeCheckCond = nullptr; 2191 return MemCheckBlock; 2192 } 2193 }; 2194 2195 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2196 // vectorization. The loop needs to be annotated with #pragma omp simd 2197 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2198 // vector length information is not provided, vectorization is not considered 2199 // explicit. Interleave hints are not allowed either. These limitations will be 2200 // relaxed in the future. 2201 // Please, note that we are currently forced to abuse the pragma 'clang 2202 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2203 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2204 // provides *explicit vectorization hints* (LV can bypass legal checks and 2205 // assume that vectorization is legal). However, both hints are implemented 2206 // using the same metadata (llvm.loop.vectorize, processed by 2207 // LoopVectorizeHints). This will be fixed in the future when the native IR 2208 // representation for pragma 'omp simd' is introduced. 2209 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2210 OptimizationRemarkEmitter *ORE) { 2211 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2212 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2213 2214 // Only outer loops with an explicit vectorization hint are supported. 2215 // Unannotated outer loops are ignored. 2216 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2217 return false; 2218 2219 Function *Fn = OuterLp->getHeader()->getParent(); 2220 if (!Hints.allowVectorization(Fn, OuterLp, 2221 true /*VectorizeOnlyWhenForced*/)) { 2222 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2223 return false; 2224 } 2225 2226 if (Hints.getInterleave() > 1) { 2227 // TODO: Interleave support is future work. 2228 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2229 "outer loops.\n"); 2230 Hints.emitRemarkWithHints(); 2231 return false; 2232 } 2233 2234 return true; 2235 } 2236 2237 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2238 OptimizationRemarkEmitter *ORE, 2239 SmallVectorImpl<Loop *> &V) { 2240 // Collect inner loops and outer loops without irreducible control flow. For 2241 // now, only collect outer loops that have explicit vectorization hints. If we 2242 // are stress testing the VPlan H-CFG construction, we collect the outermost 2243 // loop of every loop nest. 2244 if (L.isInnermost() || VPlanBuildStressTest || 2245 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2246 LoopBlocksRPO RPOT(&L); 2247 RPOT.perform(LI); 2248 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2249 V.push_back(&L); 2250 // TODO: Collect inner loops inside marked outer loops in case 2251 // vectorization fails for the outer loop. Do not invoke 2252 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2253 // already known to be reducible. We can use an inherited attribute for 2254 // that. 2255 return; 2256 } 2257 } 2258 for (Loop *InnerL : L) 2259 collectSupportedLoops(*InnerL, LI, ORE, V); 2260 } 2261 2262 namespace { 2263 2264 /// The LoopVectorize Pass. 2265 struct LoopVectorize : public FunctionPass { 2266 /// Pass identification, replacement for typeid 2267 static char ID; 2268 2269 LoopVectorizePass Impl; 2270 2271 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2272 bool VectorizeOnlyWhenForced = false) 2273 : FunctionPass(ID), 2274 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2275 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2276 } 2277 2278 bool runOnFunction(Function &F) override { 2279 if (skipFunction(F)) 2280 return false; 2281 2282 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2283 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2284 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2285 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2286 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2287 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2288 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2289 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2290 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2291 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2292 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2293 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2294 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2295 2296 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2297 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2298 2299 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2300 GetLAA, *ORE, PSI).MadeAnyChange; 2301 } 2302 2303 void getAnalysisUsage(AnalysisUsage &AU) const override { 2304 AU.addRequired<AssumptionCacheTracker>(); 2305 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2306 AU.addRequired<DominatorTreeWrapperPass>(); 2307 AU.addRequired<LoopInfoWrapperPass>(); 2308 AU.addRequired<ScalarEvolutionWrapperPass>(); 2309 AU.addRequired<TargetTransformInfoWrapperPass>(); 2310 AU.addRequired<AAResultsWrapperPass>(); 2311 AU.addRequired<LoopAccessLegacyAnalysis>(); 2312 AU.addRequired<DemandedBitsWrapperPass>(); 2313 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2314 AU.addRequired<InjectTLIMappingsLegacy>(); 2315 2316 // We currently do not preserve loopinfo/dominator analyses with outer loop 2317 // vectorization. Until this is addressed, mark these analyses as preserved 2318 // only for non-VPlan-native path. 2319 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2320 if (!EnableVPlanNativePath) { 2321 AU.addPreserved<LoopInfoWrapperPass>(); 2322 AU.addPreserved<DominatorTreeWrapperPass>(); 2323 } 2324 2325 AU.addPreserved<BasicAAWrapperPass>(); 2326 AU.addPreserved<GlobalsAAWrapperPass>(); 2327 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2328 } 2329 }; 2330 2331 } // end anonymous namespace 2332 2333 //===----------------------------------------------------------------------===// 2334 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2335 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2336 //===----------------------------------------------------------------------===// 2337 2338 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2339 // We need to place the broadcast of invariant variables outside the loop, 2340 // but only if it's proven safe to do so. Else, broadcast will be inside 2341 // vector loop body. 2342 Instruction *Instr = dyn_cast<Instruction>(V); 2343 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2344 (!Instr || 2345 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2346 // Place the code for broadcasting invariant variables in the new preheader. 2347 IRBuilder<>::InsertPointGuard Guard(Builder); 2348 if (SafeToHoist) 2349 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2350 2351 // Broadcast the scalar into all locations in the vector. 2352 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2353 2354 return Shuf; 2355 } 2356 2357 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2358 const InductionDescriptor &II, Value *Step, Value *Start, 2359 Instruction *EntryVal, VPValue *Def, VPValue *CastDef, 2360 VPTransformState &State) { 2361 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2362 "Expected either an induction phi-node or a truncate of it!"); 2363 2364 // Construct the initial value of the vector IV in the vector loop preheader 2365 auto CurrIP = Builder.saveIP(); 2366 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2367 if (isa<TruncInst>(EntryVal)) { 2368 assert(Start->getType()->isIntegerTy() && 2369 "Truncation requires an integer type"); 2370 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2371 Step = Builder.CreateTrunc(Step, TruncType); 2372 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2373 } 2374 2375 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 2376 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2377 Value *SteppedStart = 2378 getStepVector(SplatStart, Zero, Step, II.getInductionOpcode()); 2379 2380 // We create vector phi nodes for both integer and floating-point induction 2381 // variables. Here, we determine the kind of arithmetic we will perform. 2382 Instruction::BinaryOps AddOp; 2383 Instruction::BinaryOps MulOp; 2384 if (Step->getType()->isIntegerTy()) { 2385 AddOp = Instruction::Add; 2386 MulOp = Instruction::Mul; 2387 } else { 2388 AddOp = II.getInductionOpcode(); 2389 MulOp = Instruction::FMul; 2390 } 2391 2392 // Multiply the vectorization factor by the step using integer or 2393 // floating-point arithmetic as appropriate. 2394 Type *StepType = Step->getType(); 2395 Value *RuntimeVF; 2396 if (Step->getType()->isFloatingPointTy()) 2397 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, VF); 2398 else 2399 RuntimeVF = getRuntimeVF(Builder, StepType, VF); 2400 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 2401 2402 // Create a vector splat to use in the induction update. 2403 // 2404 // FIXME: If the step is non-constant, we create the vector splat with 2405 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2406 // handle a constant vector splat. 2407 Value *SplatVF = isa<Constant>(Mul) 2408 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2409 : Builder.CreateVectorSplat(VF, Mul); 2410 Builder.restoreIP(CurrIP); 2411 2412 // We may need to add the step a number of times, depending on the unroll 2413 // factor. The last of those goes into the PHI. 2414 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2415 &*LoopVectorBody->getFirstInsertionPt()); 2416 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2417 Instruction *LastInduction = VecInd; 2418 for (unsigned Part = 0; Part < UF; ++Part) { 2419 State.set(Def, LastInduction, Part); 2420 2421 if (isa<TruncInst>(EntryVal)) 2422 addMetadata(LastInduction, EntryVal); 2423 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, 2424 State, Part); 2425 2426 LastInduction = cast<Instruction>( 2427 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 2428 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2429 } 2430 2431 // Move the last step to the end of the latch block. This ensures consistent 2432 // placement of all induction updates. 2433 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2434 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2435 auto *ICmp = cast<Instruction>(Br->getCondition()); 2436 LastInduction->moveBefore(ICmp); 2437 LastInduction->setName("vec.ind.next"); 2438 2439 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2440 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2441 } 2442 2443 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2444 return Cost->isScalarAfterVectorization(I, VF) || 2445 Cost->isProfitableToScalarize(I, VF); 2446 } 2447 2448 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2449 if (shouldScalarizeInstruction(IV)) 2450 return true; 2451 auto isScalarInst = [&](User *U) -> bool { 2452 auto *I = cast<Instruction>(U); 2453 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2454 }; 2455 return llvm::any_of(IV->users(), isScalarInst); 2456 } 2457 2458 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2459 const InductionDescriptor &ID, const Instruction *EntryVal, 2460 Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, 2461 unsigned Part, unsigned Lane) { 2462 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2463 "Expected either an induction phi-node or a truncate of it!"); 2464 2465 // This induction variable is not the phi from the original loop but the 2466 // newly-created IV based on the proof that casted Phi is equal to the 2467 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2468 // re-uses the same InductionDescriptor that original IV uses but we don't 2469 // have to do any recording in this case - that is done when original IV is 2470 // processed. 2471 if (isa<TruncInst>(EntryVal)) 2472 return; 2473 2474 if (!CastDef) { 2475 assert(ID.getCastInsts().empty() && 2476 "there are casts for ID, but no CastDef"); 2477 return; 2478 } 2479 assert(!ID.getCastInsts().empty() && 2480 "there is a CastDef, but no casts for ID"); 2481 // Only the first Cast instruction in the Casts vector is of interest. 2482 // The rest of the Casts (if exist) have no uses outside the 2483 // induction update chain itself. 2484 if (Lane < UINT_MAX) 2485 State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); 2486 else 2487 State.set(CastDef, VectorLoopVal, Part); 2488 } 2489 2490 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2491 TruncInst *Trunc, VPValue *Def, 2492 VPValue *CastDef, 2493 VPTransformState &State) { 2494 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2495 "Primary induction variable must have an integer type"); 2496 2497 auto II = Legal->getInductionVars().find(IV); 2498 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2499 2500 auto ID = II->second; 2501 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2502 2503 // The value from the original loop to which we are mapping the new induction 2504 // variable. 2505 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2506 2507 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2508 2509 // Generate code for the induction step. Note that induction steps are 2510 // required to be loop-invariant 2511 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2512 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2513 "Induction step should be loop invariant"); 2514 if (PSE.getSE()->isSCEVable(IV->getType())) { 2515 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2516 return Exp.expandCodeFor(Step, Step->getType(), 2517 LoopVectorPreHeader->getTerminator()); 2518 } 2519 return cast<SCEVUnknown>(Step)->getValue(); 2520 }; 2521 2522 // The scalar value to broadcast. This is derived from the canonical 2523 // induction variable. If a truncation type is given, truncate the canonical 2524 // induction variable and step. Otherwise, derive these values from the 2525 // induction descriptor. 2526 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2527 Value *ScalarIV = Induction; 2528 if (IV != OldInduction) { 2529 ScalarIV = IV->getType()->isIntegerTy() 2530 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2531 : Builder.CreateCast(Instruction::SIToFP, Induction, 2532 IV->getType()); 2533 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2534 ScalarIV->setName("offset.idx"); 2535 } 2536 if (Trunc) { 2537 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2538 assert(Step->getType()->isIntegerTy() && 2539 "Truncation requires an integer step"); 2540 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2541 Step = Builder.CreateTrunc(Step, TruncType); 2542 } 2543 return ScalarIV; 2544 }; 2545 2546 // Create the vector values from the scalar IV, in the absence of creating a 2547 // vector IV. 2548 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2549 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2550 for (unsigned Part = 0; Part < UF; ++Part) { 2551 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2552 Value *StartIdx; 2553 if (Step->getType()->isFloatingPointTy()) 2554 StartIdx = getRuntimeVFAsFloat(Builder, Step->getType(), VF * Part); 2555 else 2556 StartIdx = getRuntimeVF(Builder, Step->getType(), VF * Part); 2557 2558 Value *EntryPart = 2559 getStepVector(Broadcasted, StartIdx, Step, ID.getInductionOpcode()); 2560 State.set(Def, EntryPart, Part); 2561 if (Trunc) 2562 addMetadata(EntryPart, Trunc); 2563 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, 2564 State, Part); 2565 } 2566 }; 2567 2568 // Fast-math-flags propagate from the original induction instruction. 2569 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 2570 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 2571 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 2572 2573 // Now do the actual transformations, and start with creating the step value. 2574 Value *Step = CreateStepValue(ID.getStep()); 2575 if (VF.isZero() || VF.isScalar()) { 2576 Value *ScalarIV = CreateScalarIV(Step); 2577 CreateSplatIV(ScalarIV, Step); 2578 return; 2579 } 2580 2581 // Determine if we want a scalar version of the induction variable. This is 2582 // true if the induction variable itself is not widened, or if it has at 2583 // least one user in the loop that is not widened. 2584 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2585 if (!NeedsScalarIV) { 2586 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2587 State); 2588 return; 2589 } 2590 2591 // Try to create a new independent vector induction variable. If we can't 2592 // create the phi node, we will splat the scalar induction variable in each 2593 // loop iteration. 2594 if (!shouldScalarizeInstruction(EntryVal)) { 2595 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2596 State); 2597 Value *ScalarIV = CreateScalarIV(Step); 2598 // Create scalar steps that can be used by instructions we will later 2599 // scalarize. Note that the addition of the scalar steps will not increase 2600 // the number of instructions in the loop in the common case prior to 2601 // InstCombine. We will be trading one vector extract for each scalar step. 2602 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2603 return; 2604 } 2605 2606 // All IV users are scalar instructions, so only emit a scalar IV, not a 2607 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2608 // predicate used by the masked loads/stores. 2609 Value *ScalarIV = CreateScalarIV(Step); 2610 if (!Cost->isScalarEpilogueAllowed()) 2611 CreateSplatIV(ScalarIV, Step); 2612 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2613 } 2614 2615 Value *InnerLoopVectorizer::getStepVector(Value *Val, Value *StartIdx, 2616 Value *Step, 2617 Instruction::BinaryOps BinOp) { 2618 // Create and check the types. 2619 auto *ValVTy = cast<VectorType>(Val->getType()); 2620 ElementCount VLen = ValVTy->getElementCount(); 2621 2622 Type *STy = Val->getType()->getScalarType(); 2623 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2624 "Induction Step must be an integer or FP"); 2625 assert(Step->getType() == STy && "Step has wrong type"); 2626 2627 SmallVector<Constant *, 8> Indices; 2628 2629 // Create a vector of consecutive numbers from zero to VF. 2630 VectorType *InitVecValVTy = ValVTy; 2631 Type *InitVecValSTy = STy; 2632 if (STy->isFloatingPointTy()) { 2633 InitVecValSTy = 2634 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2635 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2636 } 2637 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2638 2639 // Splat the StartIdx 2640 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2641 2642 if (STy->isIntegerTy()) { 2643 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2644 Step = Builder.CreateVectorSplat(VLen, Step); 2645 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2646 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2647 // which can be found from the original scalar operations. 2648 Step = Builder.CreateMul(InitVec, Step); 2649 return Builder.CreateAdd(Val, Step, "induction"); 2650 } 2651 2652 // Floating point induction. 2653 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2654 "Binary Opcode should be specified for FP induction"); 2655 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2656 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2657 2658 Step = Builder.CreateVectorSplat(VLen, Step); 2659 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2660 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2661 } 2662 2663 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2664 Instruction *EntryVal, 2665 const InductionDescriptor &ID, 2666 VPValue *Def, VPValue *CastDef, 2667 VPTransformState &State) { 2668 // We shouldn't have to build scalar steps if we aren't vectorizing. 2669 assert(VF.isVector() && "VF should be greater than one"); 2670 // Get the value type and ensure it and the step have the same integer type. 2671 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2672 assert(ScalarIVTy == Step->getType() && 2673 "Val and Step should have the same type"); 2674 2675 // We build scalar steps for both integer and floating-point induction 2676 // variables. Here, we determine the kind of arithmetic we will perform. 2677 Instruction::BinaryOps AddOp; 2678 Instruction::BinaryOps MulOp; 2679 if (ScalarIVTy->isIntegerTy()) { 2680 AddOp = Instruction::Add; 2681 MulOp = Instruction::Mul; 2682 } else { 2683 AddOp = ID.getInductionOpcode(); 2684 MulOp = Instruction::FMul; 2685 } 2686 2687 // Determine the number of scalars we need to generate for each unroll 2688 // iteration. If EntryVal is uniform, we only need to generate the first 2689 // lane. Otherwise, we generate all VF values. 2690 bool IsUniform = 2691 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF); 2692 unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue(); 2693 // Compute the scalar steps and save the results in State. 2694 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2695 ScalarIVTy->getScalarSizeInBits()); 2696 Type *VecIVTy = nullptr; 2697 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2698 if (!IsUniform && VF.isScalable()) { 2699 VecIVTy = VectorType::get(ScalarIVTy, VF); 2700 UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF)); 2701 SplatStep = Builder.CreateVectorSplat(VF, Step); 2702 SplatIV = Builder.CreateVectorSplat(VF, ScalarIV); 2703 } 2704 2705 for (unsigned Part = 0; Part < UF; ++Part) { 2706 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, VF, Part); 2707 2708 if (!IsUniform && VF.isScalable()) { 2709 auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0); 2710 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2711 if (ScalarIVTy->isFloatingPointTy()) 2712 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2713 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2714 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2715 State.set(Def, Add, Part); 2716 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2717 Part); 2718 // It's useful to record the lane values too for the known minimum number 2719 // of elements so we do those below. This improves the code quality when 2720 // trying to extract the first element, for example. 2721 } 2722 2723 if (ScalarIVTy->isFloatingPointTy()) 2724 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2725 2726 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2727 Value *StartIdx = Builder.CreateBinOp( 2728 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2729 // The step returned by `createStepForVF` is a runtime-evaluated value 2730 // when VF is scalable. Otherwise, it should be folded into a Constant. 2731 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2732 "Expected StartIdx to be folded to a constant when VF is not " 2733 "scalable"); 2734 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2735 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2736 State.set(Def, Add, VPIteration(Part, Lane)); 2737 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2738 Part, Lane); 2739 } 2740 } 2741 } 2742 2743 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2744 const VPIteration &Instance, 2745 VPTransformState &State) { 2746 Value *ScalarInst = State.get(Def, Instance); 2747 Value *VectorValue = State.get(Def, Instance.Part); 2748 VectorValue = Builder.CreateInsertElement( 2749 VectorValue, ScalarInst, 2750 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2751 State.set(Def, VectorValue, Instance.Part); 2752 } 2753 2754 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2755 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2756 return Builder.CreateVectorReverse(Vec, "reverse"); 2757 } 2758 2759 // Return whether we allow using masked interleave-groups (for dealing with 2760 // strided loads/stores that reside in predicated blocks, or for dealing 2761 // with gaps). 2762 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2763 // If an override option has been passed in for interleaved accesses, use it. 2764 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2765 return EnableMaskedInterleavedMemAccesses; 2766 2767 return TTI.enableMaskedInterleavedAccessVectorization(); 2768 } 2769 2770 // Try to vectorize the interleave group that \p Instr belongs to. 2771 // 2772 // E.g. Translate following interleaved load group (factor = 3): 2773 // for (i = 0; i < N; i+=3) { 2774 // R = Pic[i]; // Member of index 0 2775 // G = Pic[i+1]; // Member of index 1 2776 // B = Pic[i+2]; // Member of index 2 2777 // ... // do something to R, G, B 2778 // } 2779 // To: 2780 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2781 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2782 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2783 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2784 // 2785 // Or translate following interleaved store group (factor = 3): 2786 // for (i = 0; i < N; i+=3) { 2787 // ... do something to R, G, B 2788 // Pic[i] = R; // Member of index 0 2789 // Pic[i+1] = G; // Member of index 1 2790 // Pic[i+2] = B; // Member of index 2 2791 // } 2792 // To: 2793 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2794 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2795 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2796 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2797 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2798 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2799 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2800 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2801 VPValue *BlockInMask) { 2802 Instruction *Instr = Group->getInsertPos(); 2803 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2804 2805 // Prepare for the vector type of the interleaved load/store. 2806 Type *ScalarTy = getLoadStoreType(Instr); 2807 unsigned InterleaveFactor = Group->getFactor(); 2808 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2809 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2810 2811 // Prepare for the new pointers. 2812 SmallVector<Value *, 2> AddrParts; 2813 unsigned Index = Group->getIndex(Instr); 2814 2815 // TODO: extend the masked interleaved-group support to reversed access. 2816 assert((!BlockInMask || !Group->isReverse()) && 2817 "Reversed masked interleave-group not supported."); 2818 2819 // If the group is reverse, adjust the index to refer to the last vector lane 2820 // instead of the first. We adjust the index from the first vector lane, 2821 // rather than directly getting the pointer for lane VF - 1, because the 2822 // pointer operand of the interleaved access is supposed to be uniform. For 2823 // uniform instructions, we're only required to generate a value for the 2824 // first vector lane in each unroll iteration. 2825 if (Group->isReverse()) 2826 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2827 2828 for (unsigned Part = 0; Part < UF; Part++) { 2829 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2830 setDebugLocFromInst(AddrPart); 2831 2832 // Notice current instruction could be any index. Need to adjust the address 2833 // to the member of index 0. 2834 // 2835 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2836 // b = A[i]; // Member of index 0 2837 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2838 // 2839 // E.g. A[i+1] = a; // Member of index 1 2840 // A[i] = b; // Member of index 0 2841 // A[i+2] = c; // Member of index 2 (Current instruction) 2842 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2843 2844 bool InBounds = false; 2845 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2846 InBounds = gep->isInBounds(); 2847 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2848 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2849 2850 // Cast to the vector pointer type. 2851 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2852 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2853 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2854 } 2855 2856 setDebugLocFromInst(Instr); 2857 Value *PoisonVec = PoisonValue::get(VecTy); 2858 2859 Value *MaskForGaps = nullptr; 2860 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2861 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2862 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2863 } 2864 2865 // Vectorize the interleaved load group. 2866 if (isa<LoadInst>(Instr)) { 2867 // For each unroll part, create a wide load for the group. 2868 SmallVector<Value *, 2> NewLoads; 2869 for (unsigned Part = 0; Part < UF; Part++) { 2870 Instruction *NewLoad; 2871 if (BlockInMask || MaskForGaps) { 2872 assert(useMaskedInterleavedAccesses(*TTI) && 2873 "masked interleaved groups are not allowed."); 2874 Value *GroupMask = MaskForGaps; 2875 if (BlockInMask) { 2876 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2877 Value *ShuffledMask = Builder.CreateShuffleVector( 2878 BlockInMaskPart, 2879 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2880 "interleaved.mask"); 2881 GroupMask = MaskForGaps 2882 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2883 MaskForGaps) 2884 : ShuffledMask; 2885 } 2886 NewLoad = 2887 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2888 GroupMask, PoisonVec, "wide.masked.vec"); 2889 } 2890 else 2891 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2892 Group->getAlign(), "wide.vec"); 2893 Group->addMetadata(NewLoad); 2894 NewLoads.push_back(NewLoad); 2895 } 2896 2897 // For each member in the group, shuffle out the appropriate data from the 2898 // wide loads. 2899 unsigned J = 0; 2900 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2901 Instruction *Member = Group->getMember(I); 2902 2903 // Skip the gaps in the group. 2904 if (!Member) 2905 continue; 2906 2907 auto StrideMask = 2908 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2909 for (unsigned Part = 0; Part < UF; Part++) { 2910 Value *StridedVec = Builder.CreateShuffleVector( 2911 NewLoads[Part], StrideMask, "strided.vec"); 2912 2913 // If this member has different type, cast the result type. 2914 if (Member->getType() != ScalarTy) { 2915 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2916 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2917 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2918 } 2919 2920 if (Group->isReverse()) 2921 StridedVec = reverseVector(StridedVec); 2922 2923 State.set(VPDefs[J], StridedVec, Part); 2924 } 2925 ++J; 2926 } 2927 return; 2928 } 2929 2930 // The sub vector type for current instruction. 2931 auto *SubVT = VectorType::get(ScalarTy, VF); 2932 2933 // Vectorize the interleaved store group. 2934 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2935 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2936 "masked interleaved groups are not allowed."); 2937 assert((!MaskForGaps || !VF.isScalable()) && 2938 "masking gaps for scalable vectors is not yet supported."); 2939 for (unsigned Part = 0; Part < UF; Part++) { 2940 // Collect the stored vector from each member. 2941 SmallVector<Value *, 4> StoredVecs; 2942 for (unsigned i = 0; i < InterleaveFactor; i++) { 2943 assert((Group->getMember(i) || MaskForGaps) && 2944 "Fail to get a member from an interleaved store group"); 2945 Instruction *Member = Group->getMember(i); 2946 2947 // Skip the gaps in the group. 2948 if (!Member) { 2949 Value *Undef = PoisonValue::get(SubVT); 2950 StoredVecs.push_back(Undef); 2951 continue; 2952 } 2953 2954 Value *StoredVec = State.get(StoredValues[i], Part); 2955 2956 if (Group->isReverse()) 2957 StoredVec = reverseVector(StoredVec); 2958 2959 // If this member has different type, cast it to a unified type. 2960 2961 if (StoredVec->getType() != SubVT) 2962 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2963 2964 StoredVecs.push_back(StoredVec); 2965 } 2966 2967 // Concatenate all vectors into a wide vector. 2968 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2969 2970 // Interleave the elements in the wide vector. 2971 Value *IVec = Builder.CreateShuffleVector( 2972 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2973 "interleaved.vec"); 2974 2975 Instruction *NewStoreInstr; 2976 if (BlockInMask || MaskForGaps) { 2977 Value *GroupMask = MaskForGaps; 2978 if (BlockInMask) { 2979 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2980 Value *ShuffledMask = Builder.CreateShuffleVector( 2981 BlockInMaskPart, 2982 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2983 "interleaved.mask"); 2984 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2985 ShuffledMask, MaskForGaps) 2986 : ShuffledMask; 2987 } 2988 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2989 Group->getAlign(), GroupMask); 2990 } else 2991 NewStoreInstr = 2992 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2993 2994 Group->addMetadata(NewStoreInstr); 2995 } 2996 } 2997 2998 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2999 VPReplicateRecipe *RepRecipe, 3000 const VPIteration &Instance, 3001 bool IfPredicateInstr, 3002 VPTransformState &State) { 3003 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 3004 3005 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 3006 // the first lane and part. 3007 if (isa<NoAliasScopeDeclInst>(Instr)) 3008 if (!Instance.isFirstIteration()) 3009 return; 3010 3011 setDebugLocFromInst(Instr); 3012 3013 // Does this instruction return a value ? 3014 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3015 3016 Instruction *Cloned = Instr->clone(); 3017 if (!IsVoidRetTy) 3018 Cloned->setName(Instr->getName() + ".cloned"); 3019 3020 // If the scalarized instruction contributes to the address computation of a 3021 // widen masked load/store which was in a basic block that needed predication 3022 // and is not predicated after vectorization, we can't propagate 3023 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized 3024 // instruction could feed a poison value to the base address of the widen 3025 // load/store. 3026 if (State.MayGeneratePoisonRecipes.count(RepRecipe) > 0) 3027 Cloned->dropPoisonGeneratingFlags(); 3028 3029 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 3030 Builder.GetInsertPoint()); 3031 // Replace the operands of the cloned instructions with their scalar 3032 // equivalents in the new loop. 3033 for (auto &I : enumerate(RepRecipe->operands())) { 3034 auto InputInstance = Instance; 3035 VPValue *Operand = I.value(); 3036 if (State.Plan->isUniformAfterVectorization(Operand)) 3037 InputInstance.Lane = VPLane::getFirstLane(); 3038 Cloned->setOperand(I.index(), State.get(Operand, InputInstance)); 3039 } 3040 addNewMetadata(Cloned, Instr); 3041 3042 // Place the cloned scalar in the new loop. 3043 Builder.Insert(Cloned); 3044 3045 State.set(RepRecipe, Cloned, Instance); 3046 3047 // If we just cloned a new assumption, add it the assumption cache. 3048 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 3049 AC->registerAssumption(II); 3050 3051 // End if-block. 3052 if (IfPredicateInstr) 3053 PredicatedInstructions.push_back(Cloned); 3054 } 3055 3056 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3057 Value *End, Value *Step, 3058 Instruction *DL) { 3059 BasicBlock *Header = L->getHeader(); 3060 BasicBlock *Latch = L->getLoopLatch(); 3061 // As we're just creating this loop, it's possible no latch exists 3062 // yet. If so, use the header as this will be a single block loop. 3063 if (!Latch) 3064 Latch = Header; 3065 3066 IRBuilder<> B(&*Header->getFirstInsertionPt()); 3067 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3068 setDebugLocFromInst(OldInst, &B); 3069 auto *Induction = B.CreatePHI(Start->getType(), 2, "index"); 3070 3071 B.SetInsertPoint(Latch->getTerminator()); 3072 setDebugLocFromInst(OldInst, &B); 3073 3074 // Create i+1 and fill the PHINode. 3075 // 3076 // If the tail is not folded, we know that End - Start >= Step (either 3077 // statically or through the minimum iteration checks). We also know that both 3078 // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV + 3079 // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned 3080 // overflows and we can mark the induction increment as NUW. 3081 Value *Next = B.CreateAdd(Induction, Step, "index.next", 3082 /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false); 3083 Induction->addIncoming(Start, L->getLoopPreheader()); 3084 Induction->addIncoming(Next, Latch); 3085 // Create the compare. 3086 Value *ICmp = B.CreateICmpEQ(Next, End); 3087 B.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 3088 3089 // Now we have two terminators. Remove the old one from the block. 3090 Latch->getTerminator()->eraseFromParent(); 3091 3092 return Induction; 3093 } 3094 3095 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3096 if (TripCount) 3097 return TripCount; 3098 3099 assert(L && "Create Trip Count for null loop."); 3100 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3101 // Find the loop boundaries. 3102 ScalarEvolution *SE = PSE.getSE(); 3103 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3104 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 3105 "Invalid loop count"); 3106 3107 Type *IdxTy = Legal->getWidestInductionType(); 3108 assert(IdxTy && "No type for induction"); 3109 3110 // The exit count might have the type of i64 while the phi is i32. This can 3111 // happen if we have an induction variable that is sign extended before the 3112 // compare. The only way that we get a backedge taken count is that the 3113 // induction variable was signed and as such will not overflow. In such a case 3114 // truncation is legal. 3115 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3116 IdxTy->getPrimitiveSizeInBits()) 3117 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3118 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3119 3120 // Get the total trip count from the count by adding 1. 3121 const SCEV *ExitCount = SE->getAddExpr( 3122 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3123 3124 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3125 3126 // Expand the trip count and place the new instructions in the preheader. 3127 // Notice that the pre-header does not change, only the loop body. 3128 SCEVExpander Exp(*SE, DL, "induction"); 3129 3130 // Count holds the overall loop count (N). 3131 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3132 L->getLoopPreheader()->getTerminator()); 3133 3134 if (TripCount->getType()->isPointerTy()) 3135 TripCount = 3136 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3137 L->getLoopPreheader()->getTerminator()); 3138 3139 return TripCount; 3140 } 3141 3142 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3143 if (VectorTripCount) 3144 return VectorTripCount; 3145 3146 Value *TC = getOrCreateTripCount(L); 3147 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3148 3149 Type *Ty = TC->getType(); 3150 // This is where we can make the step a runtime constant. 3151 Value *Step = createStepForVF(Builder, Ty, VF, UF); 3152 3153 // If the tail is to be folded by masking, round the number of iterations N 3154 // up to a multiple of Step instead of rounding down. This is done by first 3155 // adding Step-1 and then rounding down. Note that it's ok if this addition 3156 // overflows: the vector induction variable will eventually wrap to zero given 3157 // that it starts at zero and its Step is a power of two; the loop will then 3158 // exit, with the last early-exit vector comparison also producing all-true. 3159 if (Cost->foldTailByMasking()) { 3160 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3161 "VF*UF must be a power of 2 when folding tail by masking"); 3162 assert(!VF.isScalable() && 3163 "Tail folding not yet supported for scalable vectors"); 3164 TC = Builder.CreateAdd( 3165 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 3166 } 3167 3168 // Now we need to generate the expression for the part of the loop that the 3169 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3170 // iterations are not required for correctness, or N - Step, otherwise. Step 3171 // is equal to the vectorization factor (number of SIMD elements) times the 3172 // unroll factor (number of SIMD instructions). 3173 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3174 3175 // There are cases where we *must* run at least one iteration in the remainder 3176 // loop. See the cost model for when this can happen. If the step evenly 3177 // divides the trip count, we set the remainder to be equal to the step. If 3178 // the step does not evenly divide the trip count, no adjustment is necessary 3179 // since there will already be scalar iterations. Note that the minimum 3180 // iterations check ensures that N >= Step. 3181 if (Cost->requiresScalarEpilogue(VF)) { 3182 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3183 R = Builder.CreateSelect(IsZero, Step, R); 3184 } 3185 3186 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3187 3188 return VectorTripCount; 3189 } 3190 3191 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3192 const DataLayout &DL) { 3193 // Verify that V is a vector type with same number of elements as DstVTy. 3194 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3195 unsigned VF = DstFVTy->getNumElements(); 3196 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3197 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3198 Type *SrcElemTy = SrcVecTy->getElementType(); 3199 Type *DstElemTy = DstFVTy->getElementType(); 3200 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3201 "Vector elements must have same size"); 3202 3203 // Do a direct cast if element types are castable. 3204 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3205 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3206 } 3207 // V cannot be directly casted to desired vector type. 3208 // May happen when V is a floating point vector but DstVTy is a vector of 3209 // pointers or vice-versa. Handle this using a two-step bitcast using an 3210 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3211 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3212 "Only one type should be a pointer type"); 3213 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3214 "Only one type should be a floating point type"); 3215 Type *IntTy = 3216 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3217 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3218 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3219 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3220 } 3221 3222 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3223 BasicBlock *Bypass) { 3224 Value *Count = getOrCreateTripCount(L); 3225 // Reuse existing vector loop preheader for TC checks. 3226 // Note that new preheader block is generated for vector loop. 3227 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3228 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3229 3230 // Generate code to check if the loop's trip count is less than VF * UF, or 3231 // equal to it in case a scalar epilogue is required; this implies that the 3232 // vector trip count is zero. This check also covers the case where adding one 3233 // to the backedge-taken count overflowed leading to an incorrect trip count 3234 // of zero. In this case we will also jump to the scalar loop. 3235 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 3236 : ICmpInst::ICMP_ULT; 3237 3238 // If tail is to be folded, vector loop takes care of all iterations. 3239 Value *CheckMinIters = Builder.getFalse(); 3240 if (!Cost->foldTailByMasking()) { 3241 Value *Step = createStepForVF(Builder, Count->getType(), VF, UF); 3242 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3243 } 3244 // Create new preheader for vector loop. 3245 LoopVectorPreHeader = 3246 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3247 "vector.ph"); 3248 3249 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3250 DT->getNode(Bypass)->getIDom()) && 3251 "TC check is expected to dominate Bypass"); 3252 3253 // Update dominator for Bypass & LoopExit (if needed). 3254 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3255 if (!Cost->requiresScalarEpilogue(VF)) 3256 // If there is an epilogue which must run, there's no edge from the 3257 // middle block to exit blocks and thus no need to update the immediate 3258 // dominator of the exit blocks. 3259 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3260 3261 ReplaceInstWithInst( 3262 TCCheckBlock->getTerminator(), 3263 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3264 LoopBypassBlocks.push_back(TCCheckBlock); 3265 } 3266 3267 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3268 3269 BasicBlock *const SCEVCheckBlock = 3270 RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); 3271 if (!SCEVCheckBlock) 3272 return nullptr; 3273 3274 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3275 (OptForSizeBasedOnProfile && 3276 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3277 "Cannot SCEV check stride or overflow when optimizing for size"); 3278 3279 3280 // Update dominator only if this is first RT check. 3281 if (LoopBypassBlocks.empty()) { 3282 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3283 if (!Cost->requiresScalarEpilogue(VF)) 3284 // If there is an epilogue which must run, there's no edge from the 3285 // middle block to exit blocks and thus no need to update the immediate 3286 // dominator of the exit blocks. 3287 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3288 } 3289 3290 LoopBypassBlocks.push_back(SCEVCheckBlock); 3291 AddedSafetyChecks = true; 3292 return SCEVCheckBlock; 3293 } 3294 3295 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 3296 BasicBlock *Bypass) { 3297 // VPlan-native path does not do any analysis for runtime checks currently. 3298 if (EnableVPlanNativePath) 3299 return nullptr; 3300 3301 BasicBlock *const MemCheckBlock = 3302 RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); 3303 3304 // Check if we generated code that checks in runtime if arrays overlap. We put 3305 // the checks into a separate block to make the more common case of few 3306 // elements faster. 3307 if (!MemCheckBlock) 3308 return nullptr; 3309 3310 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3311 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3312 "Cannot emit memory checks when optimizing for size, unless forced " 3313 "to vectorize."); 3314 ORE->emit([&]() { 3315 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3316 L->getStartLoc(), L->getHeader()) 3317 << "Code-size may be reduced by not forcing " 3318 "vectorization, or by source-code modifications " 3319 "eliminating the need for runtime checks " 3320 "(e.g., adding 'restrict')."; 3321 }); 3322 } 3323 3324 LoopBypassBlocks.push_back(MemCheckBlock); 3325 3326 AddedSafetyChecks = true; 3327 3328 // We currently don't use LoopVersioning for the actual loop cloning but we 3329 // still use it to add the noalias metadata. 3330 LVer = std::make_unique<LoopVersioning>( 3331 *Legal->getLAI(), 3332 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3333 DT, PSE.getSE()); 3334 LVer->prepareNoAliasMetadata(); 3335 return MemCheckBlock; 3336 } 3337 3338 Value *InnerLoopVectorizer::emitTransformedIndex( 3339 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3340 const InductionDescriptor &ID) const { 3341 3342 SCEVExpander Exp(*SE, DL, "induction"); 3343 auto Step = ID.getStep(); 3344 auto StartValue = ID.getStartValue(); 3345 assert(Index->getType()->getScalarType() == Step->getType() && 3346 "Index scalar type does not match StepValue type"); 3347 3348 // Note: the IR at this point is broken. We cannot use SE to create any new 3349 // SCEV and then expand it, hoping that SCEV's simplification will give us 3350 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3351 // lead to various SCEV crashes. So all we can do is to use builder and rely 3352 // on InstCombine for future simplifications. Here we handle some trivial 3353 // cases only. 3354 auto CreateAdd = [&B](Value *X, Value *Y) { 3355 assert(X->getType() == Y->getType() && "Types don't match!"); 3356 if (auto *CX = dyn_cast<ConstantInt>(X)) 3357 if (CX->isZero()) 3358 return Y; 3359 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3360 if (CY->isZero()) 3361 return X; 3362 return B.CreateAdd(X, Y); 3363 }; 3364 3365 // We allow X to be a vector type, in which case Y will potentially be 3366 // splatted into a vector with the same element count. 3367 auto CreateMul = [&B](Value *X, Value *Y) { 3368 assert(X->getType()->getScalarType() == Y->getType() && 3369 "Types don't match!"); 3370 if (auto *CX = dyn_cast<ConstantInt>(X)) 3371 if (CX->isOne()) 3372 return Y; 3373 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3374 if (CY->isOne()) 3375 return X; 3376 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 3377 if (XVTy && !isa<VectorType>(Y->getType())) 3378 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 3379 return B.CreateMul(X, Y); 3380 }; 3381 3382 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3383 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3384 // the DomTree is not kept up-to-date for additional blocks generated in the 3385 // vector loop. By using the header as insertion point, we guarantee that the 3386 // expanded instructions dominate all their uses. 3387 auto GetInsertPoint = [this, &B]() { 3388 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3389 if (InsertBB != LoopVectorBody && 3390 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3391 return LoopVectorBody->getTerminator(); 3392 return &*B.GetInsertPoint(); 3393 }; 3394 3395 switch (ID.getKind()) { 3396 case InductionDescriptor::IK_IntInduction: { 3397 assert(!isa<VectorType>(Index->getType()) && 3398 "Vector indices not supported for integer inductions yet"); 3399 assert(Index->getType() == StartValue->getType() && 3400 "Index type does not match StartValue type"); 3401 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3402 return B.CreateSub(StartValue, Index); 3403 auto *Offset = CreateMul( 3404 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3405 return CreateAdd(StartValue, Offset); 3406 } 3407 case InductionDescriptor::IK_PtrInduction: { 3408 assert(isa<SCEVConstant>(Step) && 3409 "Expected constant step for pointer induction"); 3410 return B.CreateGEP( 3411 ID.getElementType(), StartValue, 3412 CreateMul(Index, 3413 Exp.expandCodeFor(Step, Index->getType()->getScalarType(), 3414 GetInsertPoint()))); 3415 } 3416 case InductionDescriptor::IK_FpInduction: { 3417 assert(!isa<VectorType>(Index->getType()) && 3418 "Vector indices not supported for FP inductions yet"); 3419 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3420 auto InductionBinOp = ID.getInductionBinOp(); 3421 assert(InductionBinOp && 3422 (InductionBinOp->getOpcode() == Instruction::FAdd || 3423 InductionBinOp->getOpcode() == Instruction::FSub) && 3424 "Original bin op should be defined for FP induction"); 3425 3426 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3427 Value *MulExp = B.CreateFMul(StepValue, Index); 3428 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3429 "induction"); 3430 } 3431 case InductionDescriptor::IK_NoInduction: 3432 return nullptr; 3433 } 3434 llvm_unreachable("invalid enum"); 3435 } 3436 3437 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3438 LoopScalarBody = OrigLoop->getHeader(); 3439 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3440 assert(LoopVectorPreHeader && "Invalid loop structure"); 3441 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3442 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3443 "multiple exit loop without required epilogue?"); 3444 3445 LoopMiddleBlock = 3446 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3447 LI, nullptr, Twine(Prefix) + "middle.block"); 3448 LoopScalarPreHeader = 3449 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3450 nullptr, Twine(Prefix) + "scalar.ph"); 3451 3452 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3453 3454 // Set up the middle block terminator. Two cases: 3455 // 1) If we know that we must execute the scalar epilogue, emit an 3456 // unconditional branch. 3457 // 2) Otherwise, we must have a single unique exit block (due to how we 3458 // implement the multiple exit case). In this case, set up a conditonal 3459 // branch from the middle block to the loop scalar preheader, and the 3460 // exit block. completeLoopSkeleton will update the condition to use an 3461 // iteration check, if required to decide whether to execute the remainder. 3462 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3463 BranchInst::Create(LoopScalarPreHeader) : 3464 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3465 Builder.getTrue()); 3466 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3467 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3468 3469 // We intentionally don't let SplitBlock to update LoopInfo since 3470 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3471 // LoopVectorBody is explicitly added to the correct place few lines later. 3472 LoopVectorBody = 3473 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3474 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3475 3476 // Update dominator for loop exit. 3477 if (!Cost->requiresScalarEpilogue(VF)) 3478 // If there is an epilogue which must run, there's no edge from the 3479 // middle block to exit blocks and thus no need to update the immediate 3480 // dominator of the exit blocks. 3481 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3482 3483 // Create and register the new vector loop. 3484 Loop *Lp = LI->AllocateLoop(); 3485 Loop *ParentLoop = OrigLoop->getParentLoop(); 3486 3487 // Insert the new loop into the loop nest and register the new basic blocks 3488 // before calling any utilities such as SCEV that require valid LoopInfo. 3489 if (ParentLoop) { 3490 ParentLoop->addChildLoop(Lp); 3491 } else { 3492 LI->addTopLevelLoop(Lp); 3493 } 3494 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3495 return Lp; 3496 } 3497 3498 void InnerLoopVectorizer::createInductionResumeValues( 3499 Loop *L, Value *VectorTripCount, 3500 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3501 assert(VectorTripCount && L && "Expected valid arguments"); 3502 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3503 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3504 "Inconsistent information about additional bypass."); 3505 // We are going to resume the execution of the scalar loop. 3506 // Go over all of the induction variables that we found and fix the 3507 // PHIs that are left in the scalar version of the loop. 3508 // The starting values of PHI nodes depend on the counter of the last 3509 // iteration in the vectorized loop. 3510 // If we come from a bypass edge then we need to start from the original 3511 // start value. 3512 for (auto &InductionEntry : Legal->getInductionVars()) { 3513 PHINode *OrigPhi = InductionEntry.first; 3514 InductionDescriptor II = InductionEntry.second; 3515 3516 // Create phi nodes to merge from the backedge-taken check block. 3517 PHINode *BCResumeVal = 3518 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3519 LoopScalarPreHeader->getTerminator()); 3520 // Copy original phi DL over to the new one. 3521 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3522 Value *&EndValue = IVEndValues[OrigPhi]; 3523 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3524 if (OrigPhi == OldInduction) { 3525 // We know what the end value is. 3526 EndValue = VectorTripCount; 3527 } else { 3528 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3529 3530 // Fast-math-flags propagate from the original induction instruction. 3531 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3532 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3533 3534 Type *StepType = II.getStep()->getType(); 3535 Instruction::CastOps CastOp = 3536 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3537 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3538 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3539 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3540 EndValue->setName("ind.end"); 3541 3542 // Compute the end value for the additional bypass (if applicable). 3543 if (AdditionalBypass.first) { 3544 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3545 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3546 StepType, true); 3547 CRD = 3548 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3549 EndValueFromAdditionalBypass = 3550 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3551 EndValueFromAdditionalBypass->setName("ind.end"); 3552 } 3553 } 3554 // The new PHI merges the original incoming value, in case of a bypass, 3555 // or the value at the end of the vectorized loop. 3556 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3557 3558 // Fix the scalar body counter (PHI node). 3559 // The old induction's phi node in the scalar body needs the truncated 3560 // value. 3561 for (BasicBlock *BB : LoopBypassBlocks) 3562 BCResumeVal->addIncoming(II.getStartValue(), BB); 3563 3564 if (AdditionalBypass.first) 3565 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3566 EndValueFromAdditionalBypass); 3567 3568 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3569 } 3570 } 3571 3572 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3573 MDNode *OrigLoopID) { 3574 assert(L && "Expected valid loop."); 3575 3576 // The trip counts should be cached by now. 3577 Value *Count = getOrCreateTripCount(L); 3578 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3579 3580 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3581 3582 // Add a check in the middle block to see if we have completed 3583 // all of the iterations in the first vector loop. Three cases: 3584 // 1) If we require a scalar epilogue, there is no conditional branch as 3585 // we unconditionally branch to the scalar preheader. Do nothing. 3586 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3587 // Thus if tail is to be folded, we know we don't need to run the 3588 // remainder and we can use the previous value for the condition (true). 3589 // 3) Otherwise, construct a runtime check. 3590 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3591 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3592 Count, VectorTripCount, "cmp.n", 3593 LoopMiddleBlock->getTerminator()); 3594 3595 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3596 // of the corresponding compare because they may have ended up with 3597 // different line numbers and we want to avoid awkward line stepping while 3598 // debugging. Eg. if the compare has got a line number inside the loop. 3599 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3600 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3601 } 3602 3603 // Get ready to start creating new instructions into the vectorized body. 3604 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3605 "Inconsistent vector loop preheader"); 3606 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3607 3608 Optional<MDNode *> VectorizedLoopID = 3609 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3610 LLVMLoopVectorizeFollowupVectorized}); 3611 if (VectorizedLoopID.hasValue()) { 3612 L->setLoopID(VectorizedLoopID.getValue()); 3613 3614 // Do not setAlreadyVectorized if loop attributes have been defined 3615 // explicitly. 3616 return LoopVectorPreHeader; 3617 } 3618 3619 // Keep all loop hints from the original loop on the vector loop (we'll 3620 // replace the vectorizer-specific hints below). 3621 if (MDNode *LID = OrigLoop->getLoopID()) 3622 L->setLoopID(LID); 3623 3624 LoopVectorizeHints Hints(L, true, *ORE); 3625 Hints.setAlreadyVectorized(); 3626 3627 #ifdef EXPENSIVE_CHECKS 3628 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3629 LI->verify(*DT); 3630 #endif 3631 3632 return LoopVectorPreHeader; 3633 } 3634 3635 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3636 /* 3637 In this function we generate a new loop. The new loop will contain 3638 the vectorized instructions while the old loop will continue to run the 3639 scalar remainder. 3640 3641 [ ] <-- loop iteration number check. 3642 / | 3643 / v 3644 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3645 | / | 3646 | / v 3647 || [ ] <-- vector pre header. 3648 |/ | 3649 | v 3650 | [ ] \ 3651 | [ ]_| <-- vector loop. 3652 | | 3653 | v 3654 \ -[ ] <--- middle-block. 3655 \/ | 3656 /\ v 3657 | ->[ ] <--- new preheader. 3658 | | 3659 (opt) v <-- edge from middle to exit iff epilogue is not required. 3660 | [ ] \ 3661 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3662 \ | 3663 \ v 3664 >[ ] <-- exit block(s). 3665 ... 3666 */ 3667 3668 // Get the metadata of the original loop before it gets modified. 3669 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3670 3671 // Workaround! Compute the trip count of the original loop and cache it 3672 // before we start modifying the CFG. This code has a systemic problem 3673 // wherein it tries to run analysis over partially constructed IR; this is 3674 // wrong, and not simply for SCEV. The trip count of the original loop 3675 // simply happens to be prone to hitting this in practice. In theory, we 3676 // can hit the same issue for any SCEV, or ValueTracking query done during 3677 // mutation. See PR49900. 3678 getOrCreateTripCount(OrigLoop); 3679 3680 // Create an empty vector loop, and prepare basic blocks for the runtime 3681 // checks. 3682 Loop *Lp = createVectorLoopSkeleton(""); 3683 3684 // Now, compare the new count to zero. If it is zero skip the vector loop and 3685 // jump to the scalar loop. This check also covers the case where the 3686 // backedge-taken count is uint##_max: adding one to it will overflow leading 3687 // to an incorrect trip count of zero. In this (rare) case we will also jump 3688 // to the scalar loop. 3689 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3690 3691 // Generate the code to check any assumptions that we've made for SCEV 3692 // expressions. 3693 emitSCEVChecks(Lp, LoopScalarPreHeader); 3694 3695 // Generate the code that checks in runtime if arrays overlap. We put the 3696 // checks into a separate block to make the more common case of few elements 3697 // faster. 3698 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3699 3700 // Some loops have a single integer induction variable, while other loops 3701 // don't. One example is c++ iterators that often have multiple pointer 3702 // induction variables. In the code below we also support a case where we 3703 // don't have a single induction variable. 3704 // 3705 // We try to obtain an induction variable from the original loop as hard 3706 // as possible. However if we don't find one that: 3707 // - is an integer 3708 // - counts from zero, stepping by one 3709 // - is the size of the widest induction variable type 3710 // then we create a new one. 3711 OldInduction = Legal->getPrimaryInduction(); 3712 Type *IdxTy = Legal->getWidestInductionType(); 3713 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3714 // The loop step is equal to the vectorization factor (num of SIMD elements) 3715 // times the unroll factor (num of SIMD instructions). 3716 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3717 Value *Step = createStepForVF(Builder, IdxTy, VF, UF); 3718 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3719 Induction = 3720 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3721 getDebugLocFromInstOrOperands(OldInduction)); 3722 3723 // Emit phis for the new starting index of the scalar loop. 3724 createInductionResumeValues(Lp, CountRoundDown); 3725 3726 return completeLoopSkeleton(Lp, OrigLoopID); 3727 } 3728 3729 // Fix up external users of the induction variable. At this point, we are 3730 // in LCSSA form, with all external PHIs that use the IV having one input value, 3731 // coming from the remainder loop. We need those PHIs to also have a correct 3732 // value for the IV when arriving directly from the middle block. 3733 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3734 const InductionDescriptor &II, 3735 Value *CountRoundDown, Value *EndValue, 3736 BasicBlock *MiddleBlock) { 3737 // There are two kinds of external IV usages - those that use the value 3738 // computed in the last iteration (the PHI) and those that use the penultimate 3739 // value (the value that feeds into the phi from the loop latch). 3740 // We allow both, but they, obviously, have different values. 3741 3742 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3743 3744 DenseMap<Value *, Value *> MissingVals; 3745 3746 // An external user of the last iteration's value should see the value that 3747 // the remainder loop uses to initialize its own IV. 3748 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3749 for (User *U : PostInc->users()) { 3750 Instruction *UI = cast<Instruction>(U); 3751 if (!OrigLoop->contains(UI)) { 3752 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3753 MissingVals[UI] = EndValue; 3754 } 3755 } 3756 3757 // An external user of the penultimate value need to see EndValue - Step. 3758 // The simplest way to get this is to recompute it from the constituent SCEVs, 3759 // that is Start + (Step * (CRD - 1)). 3760 for (User *U : OrigPhi->users()) { 3761 auto *UI = cast<Instruction>(U); 3762 if (!OrigLoop->contains(UI)) { 3763 const DataLayout &DL = 3764 OrigLoop->getHeader()->getModule()->getDataLayout(); 3765 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3766 3767 IRBuilder<> B(MiddleBlock->getTerminator()); 3768 3769 // Fast-math-flags propagate from the original induction instruction. 3770 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3771 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3772 3773 Value *CountMinusOne = B.CreateSub( 3774 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3775 Value *CMO = 3776 !II.getStep()->getType()->isIntegerTy() 3777 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3778 II.getStep()->getType()) 3779 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3780 CMO->setName("cast.cmo"); 3781 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3782 Escape->setName("ind.escape"); 3783 MissingVals[UI] = Escape; 3784 } 3785 } 3786 3787 for (auto &I : MissingVals) { 3788 PHINode *PHI = cast<PHINode>(I.first); 3789 // One corner case we have to handle is two IVs "chasing" each-other, 3790 // that is %IV2 = phi [...], [ %IV1, %latch ] 3791 // In this case, if IV1 has an external use, we need to avoid adding both 3792 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3793 // don't already have an incoming value for the middle block. 3794 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3795 PHI->addIncoming(I.second, MiddleBlock); 3796 } 3797 } 3798 3799 namespace { 3800 3801 struct CSEDenseMapInfo { 3802 static bool canHandle(const Instruction *I) { 3803 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3804 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3805 } 3806 3807 static inline Instruction *getEmptyKey() { 3808 return DenseMapInfo<Instruction *>::getEmptyKey(); 3809 } 3810 3811 static inline Instruction *getTombstoneKey() { 3812 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3813 } 3814 3815 static unsigned getHashValue(const Instruction *I) { 3816 assert(canHandle(I) && "Unknown instruction!"); 3817 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3818 I->value_op_end())); 3819 } 3820 3821 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3822 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3823 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3824 return LHS == RHS; 3825 return LHS->isIdenticalTo(RHS); 3826 } 3827 }; 3828 3829 } // end anonymous namespace 3830 3831 ///Perform cse of induction variable instructions. 3832 static void cse(BasicBlock *BB) { 3833 // Perform simple cse. 3834 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3835 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3836 if (!CSEDenseMapInfo::canHandle(&In)) 3837 continue; 3838 3839 // Check if we can replace this instruction with any of the 3840 // visited instructions. 3841 if (Instruction *V = CSEMap.lookup(&In)) { 3842 In.replaceAllUsesWith(V); 3843 In.eraseFromParent(); 3844 continue; 3845 } 3846 3847 CSEMap[&In] = &In; 3848 } 3849 } 3850 3851 InstructionCost 3852 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3853 bool &NeedToScalarize) const { 3854 Function *F = CI->getCalledFunction(); 3855 Type *ScalarRetTy = CI->getType(); 3856 SmallVector<Type *, 4> Tys, ScalarTys; 3857 for (auto &ArgOp : CI->args()) 3858 ScalarTys.push_back(ArgOp->getType()); 3859 3860 // Estimate cost of scalarized vector call. The source operands are assumed 3861 // to be vectors, so we need to extract individual elements from there, 3862 // execute VF scalar calls, and then gather the result into the vector return 3863 // value. 3864 InstructionCost ScalarCallCost = 3865 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3866 if (VF.isScalar()) 3867 return ScalarCallCost; 3868 3869 // Compute corresponding vector type for return value and arguments. 3870 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3871 for (Type *ScalarTy : ScalarTys) 3872 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3873 3874 // Compute costs of unpacking argument values for the scalar calls and 3875 // packing the return values to a vector. 3876 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3877 3878 InstructionCost Cost = 3879 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3880 3881 // If we can't emit a vector call for this function, then the currently found 3882 // cost is the cost we need to return. 3883 NeedToScalarize = true; 3884 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3885 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3886 3887 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3888 return Cost; 3889 3890 // If the corresponding vector cost is cheaper, return its cost. 3891 InstructionCost VectorCallCost = 3892 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3893 if (VectorCallCost < Cost) { 3894 NeedToScalarize = false; 3895 Cost = VectorCallCost; 3896 } 3897 return Cost; 3898 } 3899 3900 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3901 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3902 return Elt; 3903 return VectorType::get(Elt, VF); 3904 } 3905 3906 InstructionCost 3907 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3908 ElementCount VF) const { 3909 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3910 assert(ID && "Expected intrinsic call!"); 3911 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3912 FastMathFlags FMF; 3913 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3914 FMF = FPMO->getFastMathFlags(); 3915 3916 SmallVector<const Value *> Arguments(CI->args()); 3917 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3918 SmallVector<Type *> ParamTys; 3919 std::transform(FTy->param_begin(), FTy->param_end(), 3920 std::back_inserter(ParamTys), 3921 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3922 3923 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3924 dyn_cast<IntrinsicInst>(CI)); 3925 return TTI.getIntrinsicInstrCost(CostAttrs, 3926 TargetTransformInfo::TCK_RecipThroughput); 3927 } 3928 3929 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3930 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3931 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3932 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3933 } 3934 3935 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3936 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3937 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3938 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3939 } 3940 3941 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3942 // For every instruction `I` in MinBWs, truncate the operands, create a 3943 // truncated version of `I` and reextend its result. InstCombine runs 3944 // later and will remove any ext/trunc pairs. 3945 SmallPtrSet<Value *, 4> Erased; 3946 for (const auto &KV : Cost->getMinimalBitwidths()) { 3947 // If the value wasn't vectorized, we must maintain the original scalar 3948 // type. The absence of the value from State indicates that it 3949 // wasn't vectorized. 3950 // FIXME: Should not rely on getVPValue at this point. 3951 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3952 if (!State.hasAnyVectorValue(Def)) 3953 continue; 3954 for (unsigned Part = 0; Part < UF; ++Part) { 3955 Value *I = State.get(Def, Part); 3956 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3957 continue; 3958 Type *OriginalTy = I->getType(); 3959 Type *ScalarTruncatedTy = 3960 IntegerType::get(OriginalTy->getContext(), KV.second); 3961 auto *TruncatedTy = VectorType::get( 3962 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3963 if (TruncatedTy == OriginalTy) 3964 continue; 3965 3966 IRBuilder<> B(cast<Instruction>(I)); 3967 auto ShrinkOperand = [&](Value *V) -> Value * { 3968 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3969 if (ZI->getSrcTy() == TruncatedTy) 3970 return ZI->getOperand(0); 3971 return B.CreateZExtOrTrunc(V, TruncatedTy); 3972 }; 3973 3974 // The actual instruction modification depends on the instruction type, 3975 // unfortunately. 3976 Value *NewI = nullptr; 3977 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3978 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3979 ShrinkOperand(BO->getOperand(1))); 3980 3981 // Any wrapping introduced by shrinking this operation shouldn't be 3982 // considered undefined behavior. So, we can't unconditionally copy 3983 // arithmetic wrapping flags to NewI. 3984 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3985 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3986 NewI = 3987 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3988 ShrinkOperand(CI->getOperand(1))); 3989 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3990 NewI = B.CreateSelect(SI->getCondition(), 3991 ShrinkOperand(SI->getTrueValue()), 3992 ShrinkOperand(SI->getFalseValue())); 3993 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3994 switch (CI->getOpcode()) { 3995 default: 3996 llvm_unreachable("Unhandled cast!"); 3997 case Instruction::Trunc: 3998 NewI = ShrinkOperand(CI->getOperand(0)); 3999 break; 4000 case Instruction::SExt: 4001 NewI = B.CreateSExtOrTrunc( 4002 CI->getOperand(0), 4003 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4004 break; 4005 case Instruction::ZExt: 4006 NewI = B.CreateZExtOrTrunc( 4007 CI->getOperand(0), 4008 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4009 break; 4010 } 4011 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 4012 auto Elements0 = 4013 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 4014 auto *O0 = B.CreateZExtOrTrunc( 4015 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 4016 auto Elements1 = 4017 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 4018 auto *O1 = B.CreateZExtOrTrunc( 4019 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 4020 4021 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 4022 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 4023 // Don't do anything with the operands, just extend the result. 4024 continue; 4025 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 4026 auto Elements = 4027 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 4028 auto *O0 = B.CreateZExtOrTrunc( 4029 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4030 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 4031 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 4032 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 4033 auto Elements = 4034 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 4035 auto *O0 = B.CreateZExtOrTrunc( 4036 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4037 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 4038 } else { 4039 // If we don't know what to do, be conservative and don't do anything. 4040 continue; 4041 } 4042 4043 // Lastly, extend the result. 4044 NewI->takeName(cast<Instruction>(I)); 4045 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 4046 I->replaceAllUsesWith(Res); 4047 cast<Instruction>(I)->eraseFromParent(); 4048 Erased.insert(I); 4049 State.reset(Def, Res, Part); 4050 } 4051 } 4052 4053 // We'll have created a bunch of ZExts that are now parentless. Clean up. 4054 for (const auto &KV : Cost->getMinimalBitwidths()) { 4055 // If the value wasn't vectorized, we must maintain the original scalar 4056 // type. The absence of the value from State indicates that it 4057 // wasn't vectorized. 4058 // FIXME: Should not rely on getVPValue at this point. 4059 VPValue *Def = State.Plan->getVPValue(KV.first, true); 4060 if (!State.hasAnyVectorValue(Def)) 4061 continue; 4062 for (unsigned Part = 0; Part < UF; ++Part) { 4063 Value *I = State.get(Def, Part); 4064 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 4065 if (Inst && Inst->use_empty()) { 4066 Value *NewI = Inst->getOperand(0); 4067 Inst->eraseFromParent(); 4068 State.reset(Def, NewI, Part); 4069 } 4070 } 4071 } 4072 } 4073 4074 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 4075 // Insert truncates and extends for any truncated instructions as hints to 4076 // InstCombine. 4077 if (VF.isVector()) 4078 truncateToMinimalBitwidths(State); 4079 4080 // Fix widened non-induction PHIs by setting up the PHI operands. 4081 if (OrigPHIsToFix.size()) { 4082 assert(EnableVPlanNativePath && 4083 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 4084 fixNonInductionPHIs(State); 4085 } 4086 4087 // At this point every instruction in the original loop is widened to a 4088 // vector form. Now we need to fix the recurrences in the loop. These PHI 4089 // nodes are currently empty because we did not want to introduce cycles. 4090 // This is the second stage of vectorizing recurrences. 4091 fixCrossIterationPHIs(State); 4092 4093 // Forget the original basic block. 4094 PSE.getSE()->forgetLoop(OrigLoop); 4095 4096 // If we inserted an edge from the middle block to the unique exit block, 4097 // update uses outside the loop (phis) to account for the newly inserted 4098 // edge. 4099 if (!Cost->requiresScalarEpilogue(VF)) { 4100 // Fix-up external users of the induction variables. 4101 for (auto &Entry : Legal->getInductionVars()) 4102 fixupIVUsers(Entry.first, Entry.second, 4103 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4104 IVEndValues[Entry.first], LoopMiddleBlock); 4105 4106 fixLCSSAPHIs(State); 4107 } 4108 4109 for (Instruction *PI : PredicatedInstructions) 4110 sinkScalarOperands(&*PI); 4111 4112 // Remove redundant induction instructions. 4113 cse(LoopVectorBody); 4114 4115 // Set/update profile weights for the vector and remainder loops as original 4116 // loop iterations are now distributed among them. Note that original loop 4117 // represented by LoopScalarBody becomes remainder loop after vectorization. 4118 // 4119 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 4120 // end up getting slightly roughened result but that should be OK since 4121 // profile is not inherently precise anyway. Note also possible bypass of 4122 // vector code caused by legality checks is ignored, assigning all the weight 4123 // to the vector loop, optimistically. 4124 // 4125 // For scalable vectorization we can't know at compile time how many iterations 4126 // of the loop are handled in one vector iteration, so instead assume a pessimistic 4127 // vscale of '1'. 4128 setProfileInfoAfterUnrolling( 4129 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 4130 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 4131 } 4132 4133 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 4134 // In order to support recurrences we need to be able to vectorize Phi nodes. 4135 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4136 // stage #2: We now need to fix the recurrences by adding incoming edges to 4137 // the currently empty PHI nodes. At this point every instruction in the 4138 // original loop is widened to a vector form so we can use them to construct 4139 // the incoming edges. 4140 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); 4141 for (VPRecipeBase &R : Header->phis()) { 4142 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 4143 fixReduction(ReductionPhi, State); 4144 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 4145 fixFirstOrderRecurrence(FOR, State); 4146 } 4147 } 4148 4149 void InnerLoopVectorizer::fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, 4150 VPTransformState &State) { 4151 // This is the second phase of vectorizing first-order recurrences. An 4152 // overview of the transformation is described below. Suppose we have the 4153 // following loop. 4154 // 4155 // for (int i = 0; i < n; ++i) 4156 // b[i] = a[i] - a[i - 1]; 4157 // 4158 // There is a first-order recurrence on "a". For this loop, the shorthand 4159 // scalar IR looks like: 4160 // 4161 // scalar.ph: 4162 // s_init = a[-1] 4163 // br scalar.body 4164 // 4165 // scalar.body: 4166 // i = phi [0, scalar.ph], [i+1, scalar.body] 4167 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4168 // s2 = a[i] 4169 // b[i] = s2 - s1 4170 // br cond, scalar.body, ... 4171 // 4172 // In this example, s1 is a recurrence because it's value depends on the 4173 // previous iteration. In the first phase of vectorization, we created a 4174 // vector phi v1 for s1. We now complete the vectorization and produce the 4175 // shorthand vector IR shown below (for VF = 4, UF = 1). 4176 // 4177 // vector.ph: 4178 // v_init = vector(..., ..., ..., a[-1]) 4179 // br vector.body 4180 // 4181 // vector.body 4182 // i = phi [0, vector.ph], [i+4, vector.body] 4183 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4184 // v2 = a[i, i+1, i+2, i+3]; 4185 // v3 = vector(v1(3), v2(0, 1, 2)) 4186 // b[i, i+1, i+2, i+3] = v2 - v3 4187 // br cond, vector.body, middle.block 4188 // 4189 // middle.block: 4190 // x = v2(3) 4191 // br scalar.ph 4192 // 4193 // scalar.ph: 4194 // s_init = phi [x, middle.block], [a[-1], otherwise] 4195 // br scalar.body 4196 // 4197 // After execution completes the vector loop, we extract the next value of 4198 // the recurrence (x) to use as the initial value in the scalar loop. 4199 4200 // Extract the last vector element in the middle block. This will be the 4201 // initial value for the recurrence when jumping to the scalar loop. 4202 VPValue *PreviousDef = PhiR->getBackedgeValue(); 4203 Value *Incoming = State.get(PreviousDef, UF - 1); 4204 auto *ExtractForScalar = Incoming; 4205 auto *IdxTy = Builder.getInt32Ty(); 4206 if (VF.isVector()) { 4207 auto *One = ConstantInt::get(IdxTy, 1); 4208 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4209 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4210 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 4211 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 4212 "vector.recur.extract"); 4213 } 4214 // Extract the second last element in the middle block if the 4215 // Phi is used outside the loop. We need to extract the phi itself 4216 // and not the last element (the phi update in the current iteration). This 4217 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4218 // when the scalar loop is not run at all. 4219 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4220 if (VF.isVector()) { 4221 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4222 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 4223 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4224 Incoming, Idx, "vector.recur.extract.for.phi"); 4225 } else if (UF > 1) 4226 // When loop is unrolled without vectorizing, initialize 4227 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 4228 // of `Incoming`. This is analogous to the vectorized case above: extracting 4229 // the second last element when VF > 1. 4230 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4231 4232 // Fix the initial value of the original recurrence in the scalar loop. 4233 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4234 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 4235 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4236 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 4237 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4238 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4239 Start->addIncoming(Incoming, BB); 4240 } 4241 4242 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4243 Phi->setName("scalar.recur"); 4244 4245 // Finally, fix users of the recurrence outside the loop. The users will need 4246 // either the last value of the scalar recurrence or the last value of the 4247 // vector recurrence we extracted in the middle block. Since the loop is in 4248 // LCSSA form, we just need to find all the phi nodes for the original scalar 4249 // recurrence in the exit block, and then add an edge for the middle block. 4250 // Note that LCSSA does not imply single entry when the original scalar loop 4251 // had multiple exiting edges (as we always run the last iteration in the 4252 // scalar epilogue); in that case, there is no edge from middle to exit and 4253 // and thus no phis which needed updated. 4254 if (!Cost->requiresScalarEpilogue(VF)) 4255 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4256 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) 4257 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4258 } 4259 4260 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 4261 VPTransformState &State) { 4262 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 4263 // Get it's reduction variable descriptor. 4264 assert(Legal->isReductionVariable(OrigPhi) && 4265 "Unable to find the reduction variable"); 4266 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 4267 4268 RecurKind RK = RdxDesc.getRecurrenceKind(); 4269 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4270 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4271 setDebugLocFromInst(ReductionStartValue); 4272 4273 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 4274 // This is the vector-clone of the value that leaves the loop. 4275 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4276 4277 // Wrap flags are in general invalid after vectorization, clear them. 4278 clearReductionWrapFlags(RdxDesc, State); 4279 4280 // Before each round, move the insertion point right between 4281 // the PHIs and the values we are going to write. 4282 // This allows us to write both PHINodes and the extractelement 4283 // instructions. 4284 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4285 4286 setDebugLocFromInst(LoopExitInst); 4287 4288 Type *PhiTy = OrigPhi->getType(); 4289 // If tail is folded by masking, the vector value to leave the loop should be 4290 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4291 // instead of the former. For an inloop reduction the reduction will already 4292 // be predicated, and does not need to be handled here. 4293 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 4294 for (unsigned Part = 0; Part < UF; ++Part) { 4295 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4296 Value *Sel = nullptr; 4297 for (User *U : VecLoopExitInst->users()) { 4298 if (isa<SelectInst>(U)) { 4299 assert(!Sel && "Reduction exit feeding two selects"); 4300 Sel = U; 4301 } else 4302 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4303 } 4304 assert(Sel && "Reduction exit feeds no select"); 4305 State.reset(LoopExitInstDef, Sel, Part); 4306 4307 // If the target can create a predicated operator for the reduction at no 4308 // extra cost in the loop (for example a predicated vadd), it can be 4309 // cheaper for the select to remain in the loop than be sunk out of it, 4310 // and so use the select value for the phi instead of the old 4311 // LoopExitValue. 4312 if (PreferPredicatedReductionSelect || 4313 TTI->preferPredicatedReductionSelect( 4314 RdxDesc.getOpcode(), PhiTy, 4315 TargetTransformInfo::ReductionFlags())) { 4316 auto *VecRdxPhi = 4317 cast<PHINode>(State.get(PhiR, Part)); 4318 VecRdxPhi->setIncomingValueForBlock( 4319 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4320 } 4321 } 4322 } 4323 4324 // If the vector reduction can be performed in a smaller type, we truncate 4325 // then extend the loop exit value to enable InstCombine to evaluate the 4326 // entire expression in the smaller type. 4327 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 4328 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 4329 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4330 Builder.SetInsertPoint( 4331 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4332 VectorParts RdxParts(UF); 4333 for (unsigned Part = 0; Part < UF; ++Part) { 4334 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4335 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4336 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4337 : Builder.CreateZExt(Trunc, VecTy); 4338 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 4339 if (U != Trunc) { 4340 U->replaceUsesOfWith(RdxParts[Part], Extnd); 4341 RdxParts[Part] = Extnd; 4342 } 4343 } 4344 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4345 for (unsigned Part = 0; Part < UF; ++Part) { 4346 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4347 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4348 } 4349 } 4350 4351 // Reduce all of the unrolled parts into a single vector. 4352 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4353 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4354 4355 // The middle block terminator has already been assigned a DebugLoc here (the 4356 // OrigLoop's single latch terminator). We want the whole middle block to 4357 // appear to execute on this line because: (a) it is all compiler generated, 4358 // (b) these instructions are always executed after evaluating the latch 4359 // conditional branch, and (c) other passes may add new predecessors which 4360 // terminate on this line. This is the easiest way to ensure we don't 4361 // accidentally cause an extra step back into the loop while debugging. 4362 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 4363 if (PhiR->isOrdered()) 4364 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 4365 else { 4366 // Floating-point operations should have some FMF to enable the reduction. 4367 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4368 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4369 for (unsigned Part = 1; Part < UF; ++Part) { 4370 Value *RdxPart = State.get(LoopExitInstDef, Part); 4371 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4372 ReducedPartRdx = Builder.CreateBinOp( 4373 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4374 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 4375 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 4376 ReducedPartRdx, RdxPart); 4377 else 4378 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4379 } 4380 } 4381 4382 // Create the reduction after the loop. Note that inloop reductions create the 4383 // target reduction in the loop using a Reduction recipe. 4384 if (VF.isVector() && !PhiR->isInLoop()) { 4385 ReducedPartRdx = 4386 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 4387 // If the reduction can be performed in a smaller type, we need to extend 4388 // the reduction to the wider type before we branch to the original loop. 4389 if (PhiTy != RdxDesc.getRecurrenceType()) 4390 ReducedPartRdx = RdxDesc.isSigned() 4391 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4392 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4393 } 4394 4395 // Create a phi node that merges control-flow from the backedge-taken check 4396 // block and the middle block. 4397 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4398 LoopScalarPreHeader->getTerminator()); 4399 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4400 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4401 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4402 4403 // Now, we need to fix the users of the reduction variable 4404 // inside and outside of the scalar remainder loop. 4405 4406 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4407 // in the exit blocks. See comment on analogous loop in 4408 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4409 if (!Cost->requiresScalarEpilogue(VF)) 4410 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4411 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) 4412 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4413 4414 // Fix the scalar loop reduction variable with the incoming reduction sum 4415 // from the vector body and from the backedge value. 4416 int IncomingEdgeBlockIdx = 4417 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4418 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4419 // Pick the other block. 4420 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4421 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4422 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4423 } 4424 4425 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4426 VPTransformState &State) { 4427 RecurKind RK = RdxDesc.getRecurrenceKind(); 4428 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4429 return; 4430 4431 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4432 assert(LoopExitInstr && "null loop exit instruction"); 4433 SmallVector<Instruction *, 8> Worklist; 4434 SmallPtrSet<Instruction *, 8> Visited; 4435 Worklist.push_back(LoopExitInstr); 4436 Visited.insert(LoopExitInstr); 4437 4438 while (!Worklist.empty()) { 4439 Instruction *Cur = Worklist.pop_back_val(); 4440 if (isa<OverflowingBinaryOperator>(Cur)) 4441 for (unsigned Part = 0; Part < UF; ++Part) { 4442 // FIXME: Should not rely on getVPValue at this point. 4443 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4444 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4445 } 4446 4447 for (User *U : Cur->users()) { 4448 Instruction *UI = cast<Instruction>(U); 4449 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4450 Visited.insert(UI).second) 4451 Worklist.push_back(UI); 4452 } 4453 } 4454 } 4455 4456 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4457 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4458 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4459 // Some phis were already hand updated by the reduction and recurrence 4460 // code above, leave them alone. 4461 continue; 4462 4463 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4464 // Non-instruction incoming values will have only one value. 4465 4466 VPLane Lane = VPLane::getFirstLane(); 4467 if (isa<Instruction>(IncomingValue) && 4468 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4469 VF)) 4470 Lane = VPLane::getLastLaneForVF(VF); 4471 4472 // Can be a loop invariant incoming value or the last scalar value to be 4473 // extracted from the vectorized loop. 4474 // FIXME: Should not rely on getVPValue at this point. 4475 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4476 Value *lastIncomingValue = 4477 OrigLoop->isLoopInvariant(IncomingValue) 4478 ? IncomingValue 4479 : State.get(State.Plan->getVPValue(IncomingValue, true), 4480 VPIteration(UF - 1, Lane)); 4481 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4482 } 4483 } 4484 4485 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4486 // The basic block and loop containing the predicated instruction. 4487 auto *PredBB = PredInst->getParent(); 4488 auto *VectorLoop = LI->getLoopFor(PredBB); 4489 4490 // Initialize a worklist with the operands of the predicated instruction. 4491 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4492 4493 // Holds instructions that we need to analyze again. An instruction may be 4494 // reanalyzed if we don't yet know if we can sink it or not. 4495 SmallVector<Instruction *, 8> InstsToReanalyze; 4496 4497 // Returns true if a given use occurs in the predicated block. Phi nodes use 4498 // their operands in their corresponding predecessor blocks. 4499 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4500 auto *I = cast<Instruction>(U.getUser()); 4501 BasicBlock *BB = I->getParent(); 4502 if (auto *Phi = dyn_cast<PHINode>(I)) 4503 BB = Phi->getIncomingBlock( 4504 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4505 return BB == PredBB; 4506 }; 4507 4508 // Iteratively sink the scalarized operands of the predicated instruction 4509 // into the block we created for it. When an instruction is sunk, it's 4510 // operands are then added to the worklist. The algorithm ends after one pass 4511 // through the worklist doesn't sink a single instruction. 4512 bool Changed; 4513 do { 4514 // Add the instructions that need to be reanalyzed to the worklist, and 4515 // reset the changed indicator. 4516 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4517 InstsToReanalyze.clear(); 4518 Changed = false; 4519 4520 while (!Worklist.empty()) { 4521 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4522 4523 // We can't sink an instruction if it is a phi node, is not in the loop, 4524 // or may have side effects. 4525 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4526 I->mayHaveSideEffects()) 4527 continue; 4528 4529 // If the instruction is already in PredBB, check if we can sink its 4530 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4531 // sinking the scalar instruction I, hence it appears in PredBB; but it 4532 // may have failed to sink I's operands (recursively), which we try 4533 // (again) here. 4534 if (I->getParent() == PredBB) { 4535 Worklist.insert(I->op_begin(), I->op_end()); 4536 continue; 4537 } 4538 4539 // It's legal to sink the instruction if all its uses occur in the 4540 // predicated block. Otherwise, there's nothing to do yet, and we may 4541 // need to reanalyze the instruction. 4542 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4543 InstsToReanalyze.push_back(I); 4544 continue; 4545 } 4546 4547 // Move the instruction to the beginning of the predicated block, and add 4548 // it's operands to the worklist. 4549 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4550 Worklist.insert(I->op_begin(), I->op_end()); 4551 4552 // The sinking may have enabled other instructions to be sunk, so we will 4553 // need to iterate. 4554 Changed = true; 4555 } 4556 } while (Changed); 4557 } 4558 4559 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4560 for (PHINode *OrigPhi : OrigPHIsToFix) { 4561 VPWidenPHIRecipe *VPPhi = 4562 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4563 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4564 // Make sure the builder has a valid insert point. 4565 Builder.SetInsertPoint(NewPhi); 4566 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4567 VPValue *Inc = VPPhi->getIncomingValue(i); 4568 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4569 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4570 } 4571 } 4572 } 4573 4574 bool InnerLoopVectorizer::useOrderedReductions( 4575 const RecurrenceDescriptor &RdxDesc) { 4576 return Cost->useOrderedReductions(RdxDesc); 4577 } 4578 4579 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4580 VPWidenPHIRecipe *PhiR, 4581 VPTransformState &State) { 4582 PHINode *P = cast<PHINode>(PN); 4583 if (EnableVPlanNativePath) { 4584 // Currently we enter here in the VPlan-native path for non-induction 4585 // PHIs where all control flow is uniform. We simply widen these PHIs. 4586 // Create a vector phi with no operands - the vector phi operands will be 4587 // set at the end of vector code generation. 4588 Type *VecTy = (State.VF.isScalar()) 4589 ? PN->getType() 4590 : VectorType::get(PN->getType(), State.VF); 4591 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4592 State.set(PhiR, VecPhi, 0); 4593 OrigPHIsToFix.push_back(P); 4594 4595 return; 4596 } 4597 4598 assert(PN->getParent() == OrigLoop->getHeader() && 4599 "Non-header phis should have been handled elsewhere"); 4600 4601 // In order to support recurrences we need to be able to vectorize Phi nodes. 4602 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4603 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4604 // this value when we vectorize all of the instructions that use the PHI. 4605 4606 assert(!Legal->isReductionVariable(P) && 4607 "reductions should be handled elsewhere"); 4608 4609 setDebugLocFromInst(P); 4610 4611 // This PHINode must be an induction variable. 4612 // Make sure that we know about it. 4613 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4614 4615 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4616 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4617 4618 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4619 // which can be found from the original scalar operations. 4620 switch (II.getKind()) { 4621 case InductionDescriptor::IK_NoInduction: 4622 llvm_unreachable("Unknown induction"); 4623 case InductionDescriptor::IK_IntInduction: 4624 case InductionDescriptor::IK_FpInduction: 4625 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4626 case InductionDescriptor::IK_PtrInduction: { 4627 // Handle the pointer induction variable case. 4628 assert(P->getType()->isPointerTy() && "Unexpected type."); 4629 4630 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4631 // This is the normalized GEP that starts counting at zero. 4632 Value *PtrInd = 4633 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4634 // Determine the number of scalars we need to generate for each unroll 4635 // iteration. If the instruction is uniform, we only need to generate the 4636 // first lane. Otherwise, we generate all VF values. 4637 bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); 4638 assert((IsUniform || !State.VF.isScalable()) && 4639 "Cannot scalarize a scalable VF"); 4640 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 4641 4642 for (unsigned Part = 0; Part < UF; ++Part) { 4643 Value *PartStart = 4644 createStepForVF(Builder, PtrInd->getType(), VF, Part); 4645 4646 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4647 Value *Idx = Builder.CreateAdd( 4648 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 4649 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4650 Value *SclrGep = 4651 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4652 SclrGep->setName("next.gep"); 4653 State.set(PhiR, SclrGep, VPIteration(Part, Lane)); 4654 } 4655 } 4656 return; 4657 } 4658 assert(isa<SCEVConstant>(II.getStep()) && 4659 "Induction step not a SCEV constant!"); 4660 Type *PhiType = II.getStep()->getType(); 4661 4662 // Build a pointer phi 4663 Value *ScalarStartValue = II.getStartValue(); 4664 Type *ScStValueType = ScalarStartValue->getType(); 4665 PHINode *NewPointerPhi = 4666 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4667 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4668 4669 // A pointer induction, performed by using a gep 4670 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4671 Instruction *InductionLoc = LoopLatch->getTerminator(); 4672 const SCEV *ScalarStep = II.getStep(); 4673 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4674 Value *ScalarStepValue = 4675 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4676 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); 4677 Value *NumUnrolledElems = 4678 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 4679 Value *InductionGEP = GetElementPtrInst::Create( 4680 II.getElementType(), NewPointerPhi, 4681 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 4682 InductionLoc); 4683 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4684 4685 // Create UF many actual address geps that use the pointer 4686 // phi as base and a vectorized version of the step value 4687 // (<step*0, ..., step*N>) as offset. 4688 for (unsigned Part = 0; Part < State.UF; ++Part) { 4689 Type *VecPhiType = VectorType::get(PhiType, State.VF); 4690 Value *StartOffsetScalar = 4691 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 4692 Value *StartOffset = 4693 Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 4694 // Create a vector of consecutive numbers from zero to VF. 4695 StartOffset = 4696 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); 4697 4698 Value *GEP = Builder.CreateGEP( 4699 II.getElementType(), NewPointerPhi, 4700 Builder.CreateMul( 4701 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), 4702 "vector.gep")); 4703 State.set(PhiR, GEP, Part); 4704 } 4705 } 4706 } 4707 } 4708 4709 /// A helper function for checking whether an integer division-related 4710 /// instruction may divide by zero (in which case it must be predicated if 4711 /// executed conditionally in the scalar code). 4712 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4713 /// Non-zero divisors that are non compile-time constants will not be 4714 /// converted into multiplication, so we will still end up scalarizing 4715 /// the division, but can do so w/o predication. 4716 static bool mayDivideByZero(Instruction &I) { 4717 assert((I.getOpcode() == Instruction::UDiv || 4718 I.getOpcode() == Instruction::SDiv || 4719 I.getOpcode() == Instruction::URem || 4720 I.getOpcode() == Instruction::SRem) && 4721 "Unexpected instruction"); 4722 Value *Divisor = I.getOperand(1); 4723 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4724 return !CInt || CInt->isZero(); 4725 } 4726 4727 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4728 VPUser &ArgOperands, 4729 VPTransformState &State) { 4730 assert(!isa<DbgInfoIntrinsic>(I) && 4731 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4732 setDebugLocFromInst(&I); 4733 4734 Module *M = I.getParent()->getParent()->getParent(); 4735 auto *CI = cast<CallInst>(&I); 4736 4737 SmallVector<Type *, 4> Tys; 4738 for (Value *ArgOperand : CI->args()) 4739 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4740 4741 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4742 4743 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4744 // version of the instruction. 4745 // Is it beneficial to perform intrinsic call compared to lib call? 4746 bool NeedToScalarize = false; 4747 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4748 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4749 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4750 assert((UseVectorIntrinsic || !NeedToScalarize) && 4751 "Instruction should be scalarized elsewhere."); 4752 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4753 "Either the intrinsic cost or vector call cost must be valid"); 4754 4755 for (unsigned Part = 0; Part < UF; ++Part) { 4756 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 4757 SmallVector<Value *, 4> Args; 4758 for (auto &I : enumerate(ArgOperands.operands())) { 4759 // Some intrinsics have a scalar argument - don't replace it with a 4760 // vector. 4761 Value *Arg; 4762 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4763 Arg = State.get(I.value(), Part); 4764 else { 4765 Arg = State.get(I.value(), VPIteration(0, 0)); 4766 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 4767 TysForDecl.push_back(Arg->getType()); 4768 } 4769 Args.push_back(Arg); 4770 } 4771 4772 Function *VectorF; 4773 if (UseVectorIntrinsic) { 4774 // Use vector version of the intrinsic. 4775 if (VF.isVector()) 4776 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4777 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4778 assert(VectorF && "Can't retrieve vector intrinsic."); 4779 } else { 4780 // Use vector version of the function call. 4781 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4782 #ifndef NDEBUG 4783 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4784 "Can't create vector function."); 4785 #endif 4786 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4787 } 4788 SmallVector<OperandBundleDef, 1> OpBundles; 4789 CI->getOperandBundlesAsDefs(OpBundles); 4790 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4791 4792 if (isa<FPMathOperator>(V)) 4793 V->copyFastMathFlags(CI); 4794 4795 State.set(Def, V, Part); 4796 addMetadata(V, &I); 4797 } 4798 } 4799 4800 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4801 // We should not collect Scalars more than once per VF. Right now, this 4802 // function is called from collectUniformsAndScalars(), which already does 4803 // this check. Collecting Scalars for VF=1 does not make any sense. 4804 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4805 "This function should not be visited twice for the same VF"); 4806 4807 SmallSetVector<Instruction *, 8> Worklist; 4808 4809 // These sets are used to seed the analysis with pointers used by memory 4810 // accesses that will remain scalar. 4811 SmallSetVector<Instruction *, 8> ScalarPtrs; 4812 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4813 auto *Latch = TheLoop->getLoopLatch(); 4814 4815 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4816 // The pointer operands of loads and stores will be scalar as long as the 4817 // memory access is not a gather or scatter operation. The value operand of a 4818 // store will remain scalar if the store is scalarized. 4819 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4820 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4821 assert(WideningDecision != CM_Unknown && 4822 "Widening decision should be ready at this moment"); 4823 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4824 if (Ptr == Store->getValueOperand()) 4825 return WideningDecision == CM_Scalarize; 4826 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4827 "Ptr is neither a value or pointer operand"); 4828 return WideningDecision != CM_GatherScatter; 4829 }; 4830 4831 // A helper that returns true if the given value is a bitcast or 4832 // getelementptr instruction contained in the loop. 4833 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4834 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4835 isa<GetElementPtrInst>(V)) && 4836 !TheLoop->isLoopInvariant(V); 4837 }; 4838 4839 // A helper that evaluates a memory access's use of a pointer. If the use will 4840 // be a scalar use and the pointer is only used by memory accesses, we place 4841 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4842 // PossibleNonScalarPtrs. 4843 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4844 // We only care about bitcast and getelementptr instructions contained in 4845 // the loop. 4846 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4847 return; 4848 4849 // If the pointer has already been identified as scalar (e.g., if it was 4850 // also identified as uniform), there's nothing to do. 4851 auto *I = cast<Instruction>(Ptr); 4852 if (Worklist.count(I)) 4853 return; 4854 4855 // If the use of the pointer will be a scalar use, and all users of the 4856 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4857 // place the pointer in PossibleNonScalarPtrs. 4858 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4859 return isa<LoadInst>(U) || isa<StoreInst>(U); 4860 })) 4861 ScalarPtrs.insert(I); 4862 else 4863 PossibleNonScalarPtrs.insert(I); 4864 }; 4865 4866 // We seed the scalars analysis with three classes of instructions: (1) 4867 // instructions marked uniform-after-vectorization and (2) bitcast, 4868 // getelementptr and (pointer) phi instructions used by memory accesses 4869 // requiring a scalar use. 4870 // 4871 // (1) Add to the worklist all instructions that have been identified as 4872 // uniform-after-vectorization. 4873 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4874 4875 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4876 // memory accesses requiring a scalar use. The pointer operands of loads and 4877 // stores will be scalar as long as the memory accesses is not a gather or 4878 // scatter operation. The value operand of a store will remain scalar if the 4879 // store is scalarized. 4880 for (auto *BB : TheLoop->blocks()) 4881 for (auto &I : *BB) { 4882 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4883 evaluatePtrUse(Load, Load->getPointerOperand()); 4884 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4885 evaluatePtrUse(Store, Store->getPointerOperand()); 4886 evaluatePtrUse(Store, Store->getValueOperand()); 4887 } 4888 } 4889 for (auto *I : ScalarPtrs) 4890 if (!PossibleNonScalarPtrs.count(I)) { 4891 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4892 Worklist.insert(I); 4893 } 4894 4895 // Insert the forced scalars. 4896 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4897 // induction variable when the PHI user is scalarized. 4898 auto ForcedScalar = ForcedScalars.find(VF); 4899 if (ForcedScalar != ForcedScalars.end()) 4900 for (auto *I : ForcedScalar->second) 4901 Worklist.insert(I); 4902 4903 // Expand the worklist by looking through any bitcasts and getelementptr 4904 // instructions we've already identified as scalar. This is similar to the 4905 // expansion step in collectLoopUniforms(); however, here we're only 4906 // expanding to include additional bitcasts and getelementptr instructions. 4907 unsigned Idx = 0; 4908 while (Idx != Worklist.size()) { 4909 Instruction *Dst = Worklist[Idx++]; 4910 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4911 continue; 4912 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4913 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4914 auto *J = cast<Instruction>(U); 4915 return !TheLoop->contains(J) || Worklist.count(J) || 4916 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4917 isScalarUse(J, Src)); 4918 })) { 4919 Worklist.insert(Src); 4920 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4921 } 4922 } 4923 4924 // An induction variable will remain scalar if all users of the induction 4925 // variable and induction variable update remain scalar. 4926 for (auto &Induction : Legal->getInductionVars()) { 4927 auto *Ind = Induction.first; 4928 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4929 4930 // If tail-folding is applied, the primary induction variable will be used 4931 // to feed a vector compare. 4932 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 4933 continue; 4934 4935 // Returns true if \p Indvar is a pointer induction that is used directly by 4936 // load/store instruction \p I. 4937 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar, 4938 Instruction *I) { 4939 return Induction.second.getKind() == 4940 InductionDescriptor::IK_PtrInduction && 4941 (isa<LoadInst>(I) || isa<StoreInst>(I)) && 4942 Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar); 4943 }; 4944 4945 // Determine if all users of the induction variable are scalar after 4946 // vectorization. 4947 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4948 auto *I = cast<Instruction>(U); 4949 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4950 IsDirectLoadStoreFromPtrIndvar(Ind, I); 4951 }); 4952 if (!ScalarInd) 4953 continue; 4954 4955 // Determine if all users of the induction variable update instruction are 4956 // scalar after vectorization. 4957 auto ScalarIndUpdate = 4958 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4959 auto *I = cast<Instruction>(U); 4960 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4961 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I); 4962 }); 4963 if (!ScalarIndUpdate) 4964 continue; 4965 4966 // The induction variable and its update instruction will remain scalar. 4967 Worklist.insert(Ind); 4968 Worklist.insert(IndUpdate); 4969 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4970 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4971 << "\n"); 4972 } 4973 4974 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4975 } 4976 4977 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const { 4978 if (!blockNeedsPredicationForAnyReason(I->getParent())) 4979 return false; 4980 switch(I->getOpcode()) { 4981 default: 4982 break; 4983 case Instruction::Load: 4984 case Instruction::Store: { 4985 if (!Legal->isMaskRequired(I)) 4986 return false; 4987 auto *Ptr = getLoadStorePointerOperand(I); 4988 auto *Ty = getLoadStoreType(I); 4989 const Align Alignment = getLoadStoreAlignment(I); 4990 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4991 TTI.isLegalMaskedGather(Ty, Alignment)) 4992 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4993 TTI.isLegalMaskedScatter(Ty, Alignment)); 4994 } 4995 case Instruction::UDiv: 4996 case Instruction::SDiv: 4997 case Instruction::SRem: 4998 case Instruction::URem: 4999 return mayDivideByZero(*I); 5000 } 5001 return false; 5002 } 5003 5004 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5005 Instruction *I, ElementCount VF) { 5006 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5007 assert(getWideningDecision(I, VF) == CM_Unknown && 5008 "Decision should not be set yet."); 5009 auto *Group = getInterleavedAccessGroup(I); 5010 assert(Group && "Must have a group."); 5011 5012 // If the instruction's allocated size doesn't equal it's type size, it 5013 // requires padding and will be scalarized. 5014 auto &DL = I->getModule()->getDataLayout(); 5015 auto *ScalarTy = getLoadStoreType(I); 5016 if (hasIrregularType(ScalarTy, DL)) 5017 return false; 5018 5019 // Check if masking is required. 5020 // A Group may need masking for one of two reasons: it resides in a block that 5021 // needs predication, or it was decided to use masking to deal with gaps 5022 // (either a gap at the end of a load-access that may result in a speculative 5023 // load, or any gaps in a store-access). 5024 bool PredicatedAccessRequiresMasking = 5025 blockNeedsPredicationForAnyReason(I->getParent()) && 5026 Legal->isMaskRequired(I); 5027 bool LoadAccessWithGapsRequiresEpilogMasking = 5028 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 5029 !isScalarEpilogueAllowed(); 5030 bool StoreAccessWithGapsRequiresMasking = 5031 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 5032 if (!PredicatedAccessRequiresMasking && 5033 !LoadAccessWithGapsRequiresEpilogMasking && 5034 !StoreAccessWithGapsRequiresMasking) 5035 return true; 5036 5037 // If masked interleaving is required, we expect that the user/target had 5038 // enabled it, because otherwise it either wouldn't have been created or 5039 // it should have been invalidated by the CostModel. 5040 assert(useMaskedInterleavedAccesses(TTI) && 5041 "Masked interleave-groups for predicated accesses are not enabled."); 5042 5043 if (Group->isReverse()) 5044 return false; 5045 5046 auto *Ty = getLoadStoreType(I); 5047 const Align Alignment = getLoadStoreAlignment(I); 5048 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5049 : TTI.isLegalMaskedStore(Ty, Alignment); 5050 } 5051 5052 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5053 Instruction *I, ElementCount VF) { 5054 // Get and ensure we have a valid memory instruction. 5055 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 5056 5057 auto *Ptr = getLoadStorePointerOperand(I); 5058 auto *ScalarTy = getLoadStoreType(I); 5059 5060 // In order to be widened, the pointer should be consecutive, first of all. 5061 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 5062 return false; 5063 5064 // If the instruction is a store located in a predicated block, it will be 5065 // scalarized. 5066 if (isScalarWithPredication(I)) 5067 return false; 5068 5069 // If the instruction's allocated size doesn't equal it's type size, it 5070 // requires padding and will be scalarized. 5071 auto &DL = I->getModule()->getDataLayout(); 5072 if (hasIrregularType(ScalarTy, DL)) 5073 return false; 5074 5075 return true; 5076 } 5077 5078 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5079 // We should not collect Uniforms more than once per VF. Right now, 5080 // this function is called from collectUniformsAndScalars(), which 5081 // already does this check. Collecting Uniforms for VF=1 does not make any 5082 // sense. 5083 5084 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5085 "This function should not be visited twice for the same VF"); 5086 5087 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5088 // not analyze again. Uniforms.count(VF) will return 1. 5089 Uniforms[VF].clear(); 5090 5091 // We now know that the loop is vectorizable! 5092 // Collect instructions inside the loop that will remain uniform after 5093 // vectorization. 5094 5095 // Global values, params and instructions outside of current loop are out of 5096 // scope. 5097 auto isOutOfScope = [&](Value *V) -> bool { 5098 Instruction *I = dyn_cast<Instruction>(V); 5099 return (!I || !TheLoop->contains(I)); 5100 }; 5101 5102 // Worklist containing uniform instructions demanding lane 0. 5103 SetVector<Instruction *> Worklist; 5104 BasicBlock *Latch = TheLoop->getLoopLatch(); 5105 5106 // Add uniform instructions demanding lane 0 to the worklist. Instructions 5107 // that are scalar with predication must not be considered uniform after 5108 // vectorization, because that would create an erroneous replicating region 5109 // where only a single instance out of VF should be formed. 5110 // TODO: optimize such seldom cases if found important, see PR40816. 5111 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5112 if (isOutOfScope(I)) { 5113 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5114 << *I << "\n"); 5115 return; 5116 } 5117 if (isScalarWithPredication(I)) { 5118 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5119 << *I << "\n"); 5120 return; 5121 } 5122 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5123 Worklist.insert(I); 5124 }; 5125 5126 // Start with the conditional branch. If the branch condition is an 5127 // instruction contained in the loop that is only used by the branch, it is 5128 // uniform. 5129 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5130 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5131 addToWorklistIfAllowed(Cmp); 5132 5133 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5134 InstWidening WideningDecision = getWideningDecision(I, VF); 5135 assert(WideningDecision != CM_Unknown && 5136 "Widening decision should be ready at this moment"); 5137 5138 // A uniform memory op is itself uniform. We exclude uniform stores 5139 // here as they demand the last lane, not the first one. 5140 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5141 assert(WideningDecision == CM_Scalarize); 5142 return true; 5143 } 5144 5145 return (WideningDecision == CM_Widen || 5146 WideningDecision == CM_Widen_Reverse || 5147 WideningDecision == CM_Interleave); 5148 }; 5149 5150 5151 // Returns true if Ptr is the pointer operand of a memory access instruction 5152 // I, and I is known to not require scalarization. 5153 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5154 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5155 }; 5156 5157 // Holds a list of values which are known to have at least one uniform use. 5158 // Note that there may be other uses which aren't uniform. A "uniform use" 5159 // here is something which only demands lane 0 of the unrolled iterations; 5160 // it does not imply that all lanes produce the same value (e.g. this is not 5161 // the usual meaning of uniform) 5162 SetVector<Value *> HasUniformUse; 5163 5164 // Scan the loop for instructions which are either a) known to have only 5165 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5166 for (auto *BB : TheLoop->blocks()) 5167 for (auto &I : *BB) { 5168 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 5169 switch (II->getIntrinsicID()) { 5170 case Intrinsic::sideeffect: 5171 case Intrinsic::experimental_noalias_scope_decl: 5172 case Intrinsic::assume: 5173 case Intrinsic::lifetime_start: 5174 case Intrinsic::lifetime_end: 5175 if (TheLoop->hasLoopInvariantOperands(&I)) 5176 addToWorklistIfAllowed(&I); 5177 break; 5178 default: 5179 break; 5180 } 5181 } 5182 5183 // ExtractValue instructions must be uniform, because the operands are 5184 // known to be loop-invariant. 5185 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 5186 assert(isOutOfScope(EVI->getAggregateOperand()) && 5187 "Expected aggregate value to be loop invariant"); 5188 addToWorklistIfAllowed(EVI); 5189 continue; 5190 } 5191 5192 // If there's no pointer operand, there's nothing to do. 5193 auto *Ptr = getLoadStorePointerOperand(&I); 5194 if (!Ptr) 5195 continue; 5196 5197 // A uniform memory op is itself uniform. We exclude uniform stores 5198 // here as they demand the last lane, not the first one. 5199 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5200 addToWorklistIfAllowed(&I); 5201 5202 if (isUniformDecision(&I, VF)) { 5203 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5204 HasUniformUse.insert(Ptr); 5205 } 5206 } 5207 5208 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5209 // demanding) users. Since loops are assumed to be in LCSSA form, this 5210 // disallows uses outside the loop as well. 5211 for (auto *V : HasUniformUse) { 5212 if (isOutOfScope(V)) 5213 continue; 5214 auto *I = cast<Instruction>(V); 5215 auto UsersAreMemAccesses = 5216 llvm::all_of(I->users(), [&](User *U) -> bool { 5217 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5218 }); 5219 if (UsersAreMemAccesses) 5220 addToWorklistIfAllowed(I); 5221 } 5222 5223 // Expand Worklist in topological order: whenever a new instruction 5224 // is added , its users should be already inside Worklist. It ensures 5225 // a uniform instruction will only be used by uniform instructions. 5226 unsigned idx = 0; 5227 while (idx != Worklist.size()) { 5228 Instruction *I = Worklist[idx++]; 5229 5230 for (auto OV : I->operand_values()) { 5231 // isOutOfScope operands cannot be uniform instructions. 5232 if (isOutOfScope(OV)) 5233 continue; 5234 // First order recurrence Phi's should typically be considered 5235 // non-uniform. 5236 auto *OP = dyn_cast<PHINode>(OV); 5237 if (OP && Legal->isFirstOrderRecurrence(OP)) 5238 continue; 5239 // If all the users of the operand are uniform, then add the 5240 // operand into the uniform worklist. 5241 auto *OI = cast<Instruction>(OV); 5242 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5243 auto *J = cast<Instruction>(U); 5244 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5245 })) 5246 addToWorklistIfAllowed(OI); 5247 } 5248 } 5249 5250 // For an instruction to be added into Worklist above, all its users inside 5251 // the loop should also be in Worklist. However, this condition cannot be 5252 // true for phi nodes that form a cyclic dependence. We must process phi 5253 // nodes separately. An induction variable will remain uniform if all users 5254 // of the induction variable and induction variable update remain uniform. 5255 // The code below handles both pointer and non-pointer induction variables. 5256 for (auto &Induction : Legal->getInductionVars()) { 5257 auto *Ind = Induction.first; 5258 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5259 5260 // Determine if all users of the induction variable are uniform after 5261 // vectorization. 5262 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5263 auto *I = cast<Instruction>(U); 5264 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5265 isVectorizedMemAccessUse(I, Ind); 5266 }); 5267 if (!UniformInd) 5268 continue; 5269 5270 // Determine if all users of the induction variable update instruction are 5271 // uniform after vectorization. 5272 auto UniformIndUpdate = 5273 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5274 auto *I = cast<Instruction>(U); 5275 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5276 isVectorizedMemAccessUse(I, IndUpdate); 5277 }); 5278 if (!UniformIndUpdate) 5279 continue; 5280 5281 // The induction variable and its update instruction will remain uniform. 5282 addToWorklistIfAllowed(Ind); 5283 addToWorklistIfAllowed(IndUpdate); 5284 } 5285 5286 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5287 } 5288 5289 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5290 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5291 5292 if (Legal->getRuntimePointerChecking()->Need) { 5293 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5294 "runtime pointer checks needed. Enable vectorization of this " 5295 "loop with '#pragma clang loop vectorize(enable)' when " 5296 "compiling with -Os/-Oz", 5297 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5298 return true; 5299 } 5300 5301 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5302 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5303 "runtime SCEV checks needed. Enable vectorization of this " 5304 "loop with '#pragma clang loop vectorize(enable)' when " 5305 "compiling with -Os/-Oz", 5306 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5307 return true; 5308 } 5309 5310 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5311 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5312 reportVectorizationFailure("Runtime stride check for small trip count", 5313 "runtime stride == 1 checks needed. Enable vectorization of " 5314 "this loop without such check by compiling with -Os/-Oz", 5315 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5316 return true; 5317 } 5318 5319 return false; 5320 } 5321 5322 ElementCount 5323 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 5324 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 5325 return ElementCount::getScalable(0); 5326 5327 if (Hints->isScalableVectorizationDisabled()) { 5328 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 5329 "ScalableVectorizationDisabled", ORE, TheLoop); 5330 return ElementCount::getScalable(0); 5331 } 5332 5333 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 5334 5335 auto MaxScalableVF = ElementCount::getScalable( 5336 std::numeric_limits<ElementCount::ScalarTy>::max()); 5337 5338 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 5339 // FIXME: While for scalable vectors this is currently sufficient, this should 5340 // be replaced by a more detailed mechanism that filters out specific VFs, 5341 // instead of invalidating vectorization for a whole set of VFs based on the 5342 // MaxVF. 5343 5344 // Disable scalable vectorization if the loop contains unsupported reductions. 5345 if (!canVectorizeReductions(MaxScalableVF)) { 5346 reportVectorizationInfo( 5347 "Scalable vectorization not supported for the reduction " 5348 "operations found in this loop.", 5349 "ScalableVFUnfeasible", ORE, TheLoop); 5350 return ElementCount::getScalable(0); 5351 } 5352 5353 // Disable scalable vectorization if the loop contains any instructions 5354 // with element types not supported for scalable vectors. 5355 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 5356 return !Ty->isVoidTy() && 5357 !this->TTI.isElementTypeLegalForScalableVector(Ty); 5358 })) { 5359 reportVectorizationInfo("Scalable vectorization is not supported " 5360 "for all element types found in this loop.", 5361 "ScalableVFUnfeasible", ORE, TheLoop); 5362 return ElementCount::getScalable(0); 5363 } 5364 5365 if (Legal->isSafeForAnyVectorWidth()) 5366 return MaxScalableVF; 5367 5368 // Limit MaxScalableVF by the maximum safe dependence distance. 5369 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5370 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) 5371 MaxVScale = 5372 TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax(); 5373 MaxScalableVF = ElementCount::getScalable( 5374 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5375 if (!MaxScalableVF) 5376 reportVectorizationInfo( 5377 "Max legal vector width too small, scalable vectorization " 5378 "unfeasible.", 5379 "ScalableVFUnfeasible", ORE, TheLoop); 5380 5381 return MaxScalableVF; 5382 } 5383 5384 FixedScalableVFPair 5385 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5386 ElementCount UserVF) { 5387 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5388 unsigned SmallestType, WidestType; 5389 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5390 5391 // Get the maximum safe dependence distance in bits computed by LAA. 5392 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5393 // the memory accesses that is most restrictive (involved in the smallest 5394 // dependence distance). 5395 unsigned MaxSafeElements = 5396 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 5397 5398 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 5399 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 5400 5401 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 5402 << ".\n"); 5403 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 5404 << ".\n"); 5405 5406 // First analyze the UserVF, fall back if the UserVF should be ignored. 5407 if (UserVF) { 5408 auto MaxSafeUserVF = 5409 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 5410 5411 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 5412 // If `VF=vscale x N` is safe, then so is `VF=N` 5413 if (UserVF.isScalable()) 5414 return FixedScalableVFPair( 5415 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 5416 else 5417 return UserVF; 5418 } 5419 5420 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 5421 5422 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 5423 // is better to ignore the hint and let the compiler choose a suitable VF. 5424 if (!UserVF.isScalable()) { 5425 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5426 << " is unsafe, clamping to max safe VF=" 5427 << MaxSafeFixedVF << ".\n"); 5428 ORE->emit([&]() { 5429 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5430 TheLoop->getStartLoc(), 5431 TheLoop->getHeader()) 5432 << "User-specified vectorization factor " 5433 << ore::NV("UserVectorizationFactor", UserVF) 5434 << " is unsafe, clamping to maximum safe vectorization factor " 5435 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 5436 }); 5437 return MaxSafeFixedVF; 5438 } 5439 5440 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 5441 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5442 << " is ignored because scalable vectors are not " 5443 "available.\n"); 5444 ORE->emit([&]() { 5445 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5446 TheLoop->getStartLoc(), 5447 TheLoop->getHeader()) 5448 << "User-specified vectorization factor " 5449 << ore::NV("UserVectorizationFactor", UserVF) 5450 << " is ignored because the target does not support scalable " 5451 "vectors. The compiler will pick a more suitable value."; 5452 }); 5453 } else { 5454 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5455 << " is unsafe. Ignoring scalable UserVF.\n"); 5456 ORE->emit([&]() { 5457 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5458 TheLoop->getStartLoc(), 5459 TheLoop->getHeader()) 5460 << "User-specified vectorization factor " 5461 << ore::NV("UserVectorizationFactor", UserVF) 5462 << " is unsafe. Ignoring the hint to let the compiler pick a " 5463 "more suitable value."; 5464 }); 5465 } 5466 } 5467 5468 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5469 << " / " << WidestType << " bits.\n"); 5470 5471 FixedScalableVFPair Result(ElementCount::getFixed(1), 5472 ElementCount::getScalable(0)); 5473 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5474 WidestType, MaxSafeFixedVF)) 5475 Result.FixedVF = MaxVF; 5476 5477 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5478 WidestType, MaxSafeScalableVF)) 5479 if (MaxVF.isScalable()) { 5480 Result.ScalableVF = MaxVF; 5481 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5482 << "\n"); 5483 } 5484 5485 return Result; 5486 } 5487 5488 FixedScalableVFPair 5489 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5490 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5491 // TODO: It may by useful to do since it's still likely to be dynamically 5492 // uniform if the target can skip. 5493 reportVectorizationFailure( 5494 "Not inserting runtime ptr check for divergent target", 5495 "runtime pointer checks needed. Not enabled for divergent target", 5496 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5497 return FixedScalableVFPair::getNone(); 5498 } 5499 5500 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5501 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5502 if (TC == 1) { 5503 reportVectorizationFailure("Single iteration (non) loop", 5504 "loop trip count is one, irrelevant for vectorization", 5505 "SingleIterationLoop", ORE, TheLoop); 5506 return FixedScalableVFPair::getNone(); 5507 } 5508 5509 switch (ScalarEpilogueStatus) { 5510 case CM_ScalarEpilogueAllowed: 5511 return computeFeasibleMaxVF(TC, UserVF); 5512 case CM_ScalarEpilogueNotAllowedUsePredicate: 5513 LLVM_FALLTHROUGH; 5514 case CM_ScalarEpilogueNotNeededUsePredicate: 5515 LLVM_DEBUG( 5516 dbgs() << "LV: vector predicate hint/switch found.\n" 5517 << "LV: Not allowing scalar epilogue, creating predicated " 5518 << "vector loop.\n"); 5519 break; 5520 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5521 // fallthrough as a special case of OptForSize 5522 case CM_ScalarEpilogueNotAllowedOptSize: 5523 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5524 LLVM_DEBUG( 5525 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5526 else 5527 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5528 << "count.\n"); 5529 5530 // Bail if runtime checks are required, which are not good when optimising 5531 // for size. 5532 if (runtimeChecksRequired()) 5533 return FixedScalableVFPair::getNone(); 5534 5535 break; 5536 } 5537 5538 // The only loops we can vectorize without a scalar epilogue, are loops with 5539 // a bottom-test and a single exiting block. We'd have to handle the fact 5540 // that not every instruction executes on the last iteration. This will 5541 // require a lane mask which varies through the vector loop body. (TODO) 5542 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5543 // If there was a tail-folding hint/switch, but we can't fold the tail by 5544 // masking, fallback to a vectorization with a scalar epilogue. 5545 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5546 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5547 "scalar epilogue instead.\n"); 5548 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5549 return computeFeasibleMaxVF(TC, UserVF); 5550 } 5551 return FixedScalableVFPair::getNone(); 5552 } 5553 5554 // Now try the tail folding 5555 5556 // Invalidate interleave groups that require an epilogue if we can't mask 5557 // the interleave-group. 5558 if (!useMaskedInterleavedAccesses(TTI)) { 5559 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5560 "No decisions should have been taken at this point"); 5561 // Note: There is no need to invalidate any cost modeling decisions here, as 5562 // non where taken so far. 5563 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5564 } 5565 5566 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF); 5567 // Avoid tail folding if the trip count is known to be a multiple of any VF 5568 // we chose. 5569 // FIXME: The condition below pessimises the case for fixed-width vectors, 5570 // when scalable VFs are also candidates for vectorization. 5571 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5572 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5573 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5574 "MaxFixedVF must be a power of 2"); 5575 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5576 : MaxFixedVF.getFixedValue(); 5577 ScalarEvolution *SE = PSE.getSE(); 5578 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5579 const SCEV *ExitCount = SE->getAddExpr( 5580 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5581 const SCEV *Rem = SE->getURemExpr( 5582 SE->applyLoopGuards(ExitCount, TheLoop), 5583 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5584 if (Rem->isZero()) { 5585 // Accept MaxFixedVF if we do not have a tail. 5586 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5587 return MaxFactors; 5588 } 5589 } 5590 5591 // For scalable vectors, don't use tail folding as this is currently not yet 5592 // supported. The code is likely to have ended up here if the tripcount is 5593 // low, in which case it makes sense not to use scalable vectors. 5594 if (MaxFactors.ScalableVF.isVector()) 5595 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5596 5597 // If we don't know the precise trip count, or if the trip count that we 5598 // found modulo the vectorization factor is not zero, try to fold the tail 5599 // by masking. 5600 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5601 if (Legal->prepareToFoldTailByMasking()) { 5602 FoldTailByMasking = true; 5603 return MaxFactors; 5604 } 5605 5606 // If there was a tail-folding hint/switch, but we can't fold the tail by 5607 // masking, fallback to a vectorization with a scalar epilogue. 5608 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5609 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5610 "scalar epilogue instead.\n"); 5611 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5612 return MaxFactors; 5613 } 5614 5615 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5616 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5617 return FixedScalableVFPair::getNone(); 5618 } 5619 5620 if (TC == 0) { 5621 reportVectorizationFailure( 5622 "Unable to calculate the loop count due to complex control flow", 5623 "unable to calculate the loop count due to complex control flow", 5624 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5625 return FixedScalableVFPair::getNone(); 5626 } 5627 5628 reportVectorizationFailure( 5629 "Cannot optimize for size and vectorize at the same time.", 5630 "cannot optimize for size and vectorize at the same time. " 5631 "Enable vectorization of this loop with '#pragma clang loop " 5632 "vectorize(enable)' when compiling with -Os/-Oz", 5633 "NoTailLoopWithOptForSize", ORE, TheLoop); 5634 return FixedScalableVFPair::getNone(); 5635 } 5636 5637 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5638 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5639 const ElementCount &MaxSafeVF) { 5640 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5641 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5642 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5643 : TargetTransformInfo::RGK_FixedWidthVector); 5644 5645 // Convenience function to return the minimum of two ElementCounts. 5646 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5647 assert((LHS.isScalable() == RHS.isScalable()) && 5648 "Scalable flags must match"); 5649 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5650 }; 5651 5652 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5653 // Note that both WidestRegister and WidestType may not be a powers of 2. 5654 auto MaxVectorElementCount = ElementCount::get( 5655 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5656 ComputeScalableMaxVF); 5657 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5658 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5659 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5660 5661 if (!MaxVectorElementCount) { 5662 LLVM_DEBUG(dbgs() << "LV: The target has no " 5663 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5664 << " vector registers.\n"); 5665 return ElementCount::getFixed(1); 5666 } 5667 5668 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5669 if (ConstTripCount && 5670 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5671 isPowerOf2_32(ConstTripCount)) { 5672 // We need to clamp the VF to be the ConstTripCount. There is no point in 5673 // choosing a higher viable VF as done in the loop below. If 5674 // MaxVectorElementCount is scalable, we only fall back on a fixed VF when 5675 // the TC is less than or equal to the known number of lanes. 5676 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5677 << ConstTripCount << "\n"); 5678 return TripCountEC; 5679 } 5680 5681 ElementCount MaxVF = MaxVectorElementCount; 5682 if (TTI.shouldMaximizeVectorBandwidth() || 5683 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5684 auto MaxVectorElementCountMaxBW = ElementCount::get( 5685 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5686 ComputeScalableMaxVF); 5687 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5688 5689 // Collect all viable vectorization factors larger than the default MaxVF 5690 // (i.e. MaxVectorElementCount). 5691 SmallVector<ElementCount, 8> VFs; 5692 for (ElementCount VS = MaxVectorElementCount * 2; 5693 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5694 VFs.push_back(VS); 5695 5696 // For each VF calculate its register usage. 5697 auto RUs = calculateRegisterUsage(VFs); 5698 5699 // Select the largest VF which doesn't require more registers than existing 5700 // ones. 5701 for (int i = RUs.size() - 1; i >= 0; --i) { 5702 bool Selected = true; 5703 for (auto &pair : RUs[i].MaxLocalUsers) { 5704 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5705 if (pair.second > TargetNumRegisters) 5706 Selected = false; 5707 } 5708 if (Selected) { 5709 MaxVF = VFs[i]; 5710 break; 5711 } 5712 } 5713 if (ElementCount MinVF = 5714 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5715 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5716 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5717 << ") with target's minimum: " << MinVF << '\n'); 5718 MaxVF = MinVF; 5719 } 5720 } 5721 } 5722 return MaxVF; 5723 } 5724 5725 bool LoopVectorizationCostModel::isMoreProfitable( 5726 const VectorizationFactor &A, const VectorizationFactor &B) const { 5727 InstructionCost CostA = A.Cost; 5728 InstructionCost CostB = B.Cost; 5729 5730 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 5731 5732 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 5733 MaxTripCount) { 5734 // If we are folding the tail and the trip count is a known (possibly small) 5735 // constant, the trip count will be rounded up to an integer number of 5736 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 5737 // which we compare directly. When not folding the tail, the total cost will 5738 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 5739 // approximated with the per-lane cost below instead of using the tripcount 5740 // as here. 5741 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 5742 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 5743 return RTCostA < RTCostB; 5744 } 5745 5746 // Improve estimate for the vector width if it is scalable. 5747 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 5748 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 5749 if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) { 5750 if (A.Width.isScalable()) 5751 EstimatedWidthA *= VScale.getValue(); 5752 if (B.Width.isScalable()) 5753 EstimatedWidthB *= VScale.getValue(); 5754 } 5755 5756 // When set to preferred, for now assume vscale may be larger than 1 (or the 5757 // one being tuned for), so that scalable vectorization is slightly favorable 5758 // over fixed-width vectorization. 5759 if (Hints->isScalableVectorizationPreferred()) 5760 if (A.Width.isScalable() && !B.Width.isScalable()) 5761 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 5762 5763 // To avoid the need for FP division: 5764 // (CostA / A.Width) < (CostB / B.Width) 5765 // <=> (CostA * B.Width) < (CostB * A.Width) 5766 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 5767 } 5768 5769 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 5770 const ElementCountSet &VFCandidates) { 5771 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5772 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5773 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5774 assert(VFCandidates.count(ElementCount::getFixed(1)) && 5775 "Expected Scalar VF to be a candidate"); 5776 5777 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 5778 VectorizationFactor ChosenFactor = ScalarCost; 5779 5780 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5781 if (ForceVectorization && VFCandidates.size() > 1) { 5782 // Ignore scalar width, because the user explicitly wants vectorization. 5783 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5784 // evaluation. 5785 ChosenFactor.Cost = InstructionCost::getMax(); 5786 } 5787 5788 SmallVector<InstructionVFPair> InvalidCosts; 5789 for (const auto &i : VFCandidates) { 5790 // The cost for scalar VF=1 is already calculated, so ignore it. 5791 if (i.isScalar()) 5792 continue; 5793 5794 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 5795 VectorizationFactor Candidate(i, C.first); 5796 5797 #ifndef NDEBUG 5798 unsigned AssumedMinimumVscale = 1; 5799 if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) 5800 AssumedMinimumVscale = VScale.getValue(); 5801 unsigned Width = 5802 Candidate.Width.isScalable() 5803 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 5804 : Candidate.Width.getFixedValue(); 5805 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5806 << " costs: " << (Candidate.Cost / Width)); 5807 if (i.isScalable()) 5808 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 5809 << AssumedMinimumVscale << ")"); 5810 LLVM_DEBUG(dbgs() << ".\n"); 5811 #endif 5812 5813 if (!C.second && !ForceVectorization) { 5814 LLVM_DEBUG( 5815 dbgs() << "LV: Not considering vector loop of width " << i 5816 << " because it will not generate any vector instructions.\n"); 5817 continue; 5818 } 5819 5820 // If profitable add it to ProfitableVF list. 5821 if (isMoreProfitable(Candidate, ScalarCost)) 5822 ProfitableVFs.push_back(Candidate); 5823 5824 if (isMoreProfitable(Candidate, ChosenFactor)) 5825 ChosenFactor = Candidate; 5826 } 5827 5828 // Emit a report of VFs with invalid costs in the loop. 5829 if (!InvalidCosts.empty()) { 5830 // Group the remarks per instruction, keeping the instruction order from 5831 // InvalidCosts. 5832 std::map<Instruction *, unsigned> Numbering; 5833 unsigned I = 0; 5834 for (auto &Pair : InvalidCosts) 5835 if (!Numbering.count(Pair.first)) 5836 Numbering[Pair.first] = I++; 5837 5838 // Sort the list, first on instruction(number) then on VF. 5839 llvm::sort(InvalidCosts, 5840 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 5841 if (Numbering[A.first] != Numbering[B.first]) 5842 return Numbering[A.first] < Numbering[B.first]; 5843 ElementCountComparator ECC; 5844 return ECC(A.second, B.second); 5845 }); 5846 5847 // For a list of ordered instruction-vf pairs: 5848 // [(load, vf1), (load, vf2), (store, vf1)] 5849 // Group the instructions together to emit separate remarks for: 5850 // load (vf1, vf2) 5851 // store (vf1) 5852 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 5853 auto Subset = ArrayRef<InstructionVFPair>(); 5854 do { 5855 if (Subset.empty()) 5856 Subset = Tail.take_front(1); 5857 5858 Instruction *I = Subset.front().first; 5859 5860 // If the next instruction is different, or if there are no other pairs, 5861 // emit a remark for the collated subset. e.g. 5862 // [(load, vf1), (load, vf2))] 5863 // to emit: 5864 // remark: invalid costs for 'load' at VF=(vf, vf2) 5865 if (Subset == Tail || Tail[Subset.size()].first != I) { 5866 std::string OutString; 5867 raw_string_ostream OS(OutString); 5868 assert(!Subset.empty() && "Unexpected empty range"); 5869 OS << "Instruction with invalid costs prevented vectorization at VF=("; 5870 for (auto &Pair : Subset) 5871 OS << (Pair.second == Subset.front().second ? "" : ", ") 5872 << Pair.second; 5873 OS << "):"; 5874 if (auto *CI = dyn_cast<CallInst>(I)) 5875 OS << " call to " << CI->getCalledFunction()->getName(); 5876 else 5877 OS << " " << I->getOpcodeName(); 5878 OS.flush(); 5879 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 5880 Tail = Tail.drop_front(Subset.size()); 5881 Subset = {}; 5882 } else 5883 // Grow the subset by one element 5884 Subset = Tail.take_front(Subset.size() + 1); 5885 } while (!Tail.empty()); 5886 } 5887 5888 if (!EnableCondStoresVectorization && NumPredStores) { 5889 reportVectorizationFailure("There are conditional stores.", 5890 "store that is conditionally executed prevents vectorization", 5891 "ConditionalStore", ORE, TheLoop); 5892 ChosenFactor = ScalarCost; 5893 } 5894 5895 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 5896 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 5897 << "LV: Vectorization seems to be not beneficial, " 5898 << "but was forced by a user.\n"); 5899 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 5900 return ChosenFactor; 5901 } 5902 5903 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5904 const Loop &L, ElementCount VF) const { 5905 // Cross iteration phis such as reductions need special handling and are 5906 // currently unsupported. 5907 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 5908 return Legal->isFirstOrderRecurrence(&Phi) || 5909 Legal->isReductionVariable(&Phi); 5910 })) 5911 return false; 5912 5913 // Phis with uses outside of the loop require special handling and are 5914 // currently unsupported. 5915 for (auto &Entry : Legal->getInductionVars()) { 5916 // Look for uses of the value of the induction at the last iteration. 5917 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5918 for (User *U : PostInc->users()) 5919 if (!L.contains(cast<Instruction>(U))) 5920 return false; 5921 // Look for uses of penultimate value of the induction. 5922 for (User *U : Entry.first->users()) 5923 if (!L.contains(cast<Instruction>(U))) 5924 return false; 5925 } 5926 5927 // Induction variables that are widened require special handling that is 5928 // currently not supported. 5929 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5930 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5931 this->isProfitableToScalarize(Entry.first, VF)); 5932 })) 5933 return false; 5934 5935 // Epilogue vectorization code has not been auditted to ensure it handles 5936 // non-latch exits properly. It may be fine, but it needs auditted and 5937 // tested. 5938 if (L.getExitingBlock() != L.getLoopLatch()) 5939 return false; 5940 5941 return true; 5942 } 5943 5944 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5945 const ElementCount VF) const { 5946 // FIXME: We need a much better cost-model to take different parameters such 5947 // as register pressure, code size increase and cost of extra branches into 5948 // account. For now we apply a very crude heuristic and only consider loops 5949 // with vectorization factors larger than a certain value. 5950 // We also consider epilogue vectorization unprofitable for targets that don't 5951 // consider interleaving beneficial (eg. MVE). 5952 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5953 return false; 5954 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 5955 return true; 5956 return false; 5957 } 5958 5959 VectorizationFactor 5960 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5961 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5962 VectorizationFactor Result = VectorizationFactor::Disabled(); 5963 if (!EnableEpilogueVectorization) { 5964 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5965 return Result; 5966 } 5967 5968 if (!isScalarEpilogueAllowed()) { 5969 LLVM_DEBUG( 5970 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5971 "allowed.\n";); 5972 return Result; 5973 } 5974 5975 // Not really a cost consideration, but check for unsupported cases here to 5976 // simplify the logic. 5977 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5978 LLVM_DEBUG( 5979 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5980 "not a supported candidate.\n";); 5981 return Result; 5982 } 5983 5984 if (EpilogueVectorizationForceVF > 1) { 5985 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5986 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 5987 if (LVP.hasPlanWithVF(ForcedEC)) 5988 return {ForcedEC, 0}; 5989 else { 5990 LLVM_DEBUG( 5991 dbgs() 5992 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5993 return Result; 5994 } 5995 } 5996 5997 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5998 TheLoop->getHeader()->getParent()->hasMinSize()) { 5999 LLVM_DEBUG( 6000 dbgs() 6001 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 6002 return Result; 6003 } 6004 6005 auto FixedMainLoopVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 6006 if (MainLoopVF.isScalable()) 6007 LLVM_DEBUG( 6008 dbgs() << "LEV: Epilogue vectorization using scalable vectors not " 6009 "yet supported. Converting to fixed-width (VF=" 6010 << FixedMainLoopVF << ") instead\n"); 6011 6012 if (!isEpilogueVectorizationProfitable(FixedMainLoopVF)) { 6013 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 6014 "this loop\n"); 6015 return Result; 6016 } 6017 6018 for (auto &NextVF : ProfitableVFs) 6019 if (ElementCount::isKnownLT(NextVF.Width, FixedMainLoopVF) && 6020 (Result.Width.getFixedValue() == 1 || 6021 isMoreProfitable(NextVF, Result)) && 6022 LVP.hasPlanWithVF(NextVF.Width)) 6023 Result = NextVF; 6024 6025 if (Result != VectorizationFactor::Disabled()) 6026 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 6027 << Result.Width.getFixedValue() << "\n";); 6028 return Result; 6029 } 6030 6031 std::pair<unsigned, unsigned> 6032 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6033 unsigned MinWidth = -1U; 6034 unsigned MaxWidth = 8; 6035 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6036 for (Type *T : ElementTypesInLoop) { 6037 MinWidth = std::min<unsigned>( 6038 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 6039 MaxWidth = std::max<unsigned>( 6040 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 6041 } 6042 return {MinWidth, MaxWidth}; 6043 } 6044 6045 void LoopVectorizationCostModel::collectElementTypesForWidening() { 6046 ElementTypesInLoop.clear(); 6047 // For each block. 6048 for (BasicBlock *BB : TheLoop->blocks()) { 6049 // For each instruction in the loop. 6050 for (Instruction &I : BB->instructionsWithoutDebug()) { 6051 Type *T = I.getType(); 6052 6053 // Skip ignored values. 6054 if (ValuesToIgnore.count(&I)) 6055 continue; 6056 6057 // Only examine Loads, Stores and PHINodes. 6058 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6059 continue; 6060 6061 // Examine PHI nodes that are reduction variables. Update the type to 6062 // account for the recurrence type. 6063 if (auto *PN = dyn_cast<PHINode>(&I)) { 6064 if (!Legal->isReductionVariable(PN)) 6065 continue; 6066 const RecurrenceDescriptor &RdxDesc = 6067 Legal->getReductionVars().find(PN)->second; 6068 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 6069 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 6070 RdxDesc.getRecurrenceType(), 6071 TargetTransformInfo::ReductionFlags())) 6072 continue; 6073 T = RdxDesc.getRecurrenceType(); 6074 } 6075 6076 // Examine the stored values. 6077 if (auto *ST = dyn_cast<StoreInst>(&I)) 6078 T = ST->getValueOperand()->getType(); 6079 6080 // Ignore loaded pointer types and stored pointer types that are not 6081 // vectorizable. 6082 // 6083 // FIXME: The check here attempts to predict whether a load or store will 6084 // be vectorized. We only know this for certain after a VF has 6085 // been selected. Here, we assume that if an access can be 6086 // vectorized, it will be. We should also look at extending this 6087 // optimization to non-pointer types. 6088 // 6089 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6090 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 6091 continue; 6092 6093 ElementTypesInLoop.insert(T); 6094 } 6095 } 6096 } 6097 6098 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 6099 unsigned LoopCost) { 6100 // -- The interleave heuristics -- 6101 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6102 // There are many micro-architectural considerations that we can't predict 6103 // at this level. For example, frontend pressure (on decode or fetch) due to 6104 // code size, or the number and capabilities of the execution ports. 6105 // 6106 // We use the following heuristics to select the interleave count: 6107 // 1. If the code has reductions, then we interleave to break the cross 6108 // iteration dependency. 6109 // 2. If the loop is really small, then we interleave to reduce the loop 6110 // overhead. 6111 // 3. We don't interleave if we think that we will spill registers to memory 6112 // due to the increased register pressure. 6113 6114 if (!isScalarEpilogueAllowed()) 6115 return 1; 6116 6117 // We used the distance for the interleave count. 6118 if (Legal->getMaxSafeDepDistBytes() != -1U) 6119 return 1; 6120 6121 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6122 const bool HasReductions = !Legal->getReductionVars().empty(); 6123 // Do not interleave loops with a relatively small known or estimated trip 6124 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6125 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6126 // because with the above conditions interleaving can expose ILP and break 6127 // cross iteration dependences for reductions. 6128 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6129 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6130 return 1; 6131 6132 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6133 // We divide by these constants so assume that we have at least one 6134 // instruction that uses at least one register. 6135 for (auto& pair : R.MaxLocalUsers) { 6136 pair.second = std::max(pair.second, 1U); 6137 } 6138 6139 // We calculate the interleave count using the following formula. 6140 // Subtract the number of loop invariants from the number of available 6141 // registers. These registers are used by all of the interleaved instances. 6142 // Next, divide the remaining registers by the number of registers that is 6143 // required by the loop, in order to estimate how many parallel instances 6144 // fit without causing spills. All of this is rounded down if necessary to be 6145 // a power of two. We want power of two interleave count to simplify any 6146 // addressing operations or alignment considerations. 6147 // We also want power of two interleave counts to ensure that the induction 6148 // variable of the vector loop wraps to zero, when tail is folded by masking; 6149 // this currently happens when OptForSize, in which case IC is set to 1 above. 6150 unsigned IC = UINT_MAX; 6151 6152 for (auto& pair : R.MaxLocalUsers) { 6153 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6154 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6155 << " registers of " 6156 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6157 if (VF.isScalar()) { 6158 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6159 TargetNumRegisters = ForceTargetNumScalarRegs; 6160 } else { 6161 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6162 TargetNumRegisters = ForceTargetNumVectorRegs; 6163 } 6164 unsigned MaxLocalUsers = pair.second; 6165 unsigned LoopInvariantRegs = 0; 6166 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6167 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6168 6169 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6170 // Don't count the induction variable as interleaved. 6171 if (EnableIndVarRegisterHeur) { 6172 TmpIC = 6173 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6174 std::max(1U, (MaxLocalUsers - 1))); 6175 } 6176 6177 IC = std::min(IC, TmpIC); 6178 } 6179 6180 // Clamp the interleave ranges to reasonable counts. 6181 unsigned MaxInterleaveCount = 6182 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6183 6184 // Check if the user has overridden the max. 6185 if (VF.isScalar()) { 6186 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6187 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6188 } else { 6189 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6190 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6191 } 6192 6193 // If trip count is known or estimated compile time constant, limit the 6194 // interleave count to be less than the trip count divided by VF, provided it 6195 // is at least 1. 6196 // 6197 // For scalable vectors we can't know if interleaving is beneficial. It may 6198 // not be beneficial for small loops if none of the lanes in the second vector 6199 // iterations is enabled. However, for larger loops, there is likely to be a 6200 // similar benefit as for fixed-width vectors. For now, we choose to leave 6201 // the InterleaveCount as if vscale is '1', although if some information about 6202 // the vector is known (e.g. min vector size), we can make a better decision. 6203 if (BestKnownTC) { 6204 MaxInterleaveCount = 6205 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6206 // Make sure MaxInterleaveCount is greater than 0. 6207 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6208 } 6209 6210 assert(MaxInterleaveCount > 0 && 6211 "Maximum interleave count must be greater than 0"); 6212 6213 // Clamp the calculated IC to be between the 1 and the max interleave count 6214 // that the target and trip count allows. 6215 if (IC > MaxInterleaveCount) 6216 IC = MaxInterleaveCount; 6217 else 6218 // Make sure IC is greater than 0. 6219 IC = std::max(1u, IC); 6220 6221 assert(IC > 0 && "Interleave count must be greater than 0."); 6222 6223 // If we did not calculate the cost for VF (because the user selected the VF) 6224 // then we calculate the cost of VF here. 6225 if (LoopCost == 0) { 6226 InstructionCost C = expectedCost(VF).first; 6227 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 6228 LoopCost = *C.getValue(); 6229 } 6230 6231 assert(LoopCost && "Non-zero loop cost expected"); 6232 6233 // Interleave if we vectorized this loop and there is a reduction that could 6234 // benefit from interleaving. 6235 if (VF.isVector() && HasReductions) { 6236 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6237 return IC; 6238 } 6239 6240 // Note that if we've already vectorized the loop we will have done the 6241 // runtime check and so interleaving won't require further checks. 6242 bool InterleavingRequiresRuntimePointerCheck = 6243 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6244 6245 // We want to interleave small loops in order to reduce the loop overhead and 6246 // potentially expose ILP opportunities. 6247 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6248 << "LV: IC is " << IC << '\n' 6249 << "LV: VF is " << VF << '\n'); 6250 const bool AggressivelyInterleaveReductions = 6251 TTI.enableAggressiveInterleaving(HasReductions); 6252 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6253 // We assume that the cost overhead is 1 and we use the cost model 6254 // to estimate the cost of the loop and interleave until the cost of the 6255 // loop overhead is about 5% of the cost of the loop. 6256 unsigned SmallIC = 6257 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6258 6259 // Interleave until store/load ports (estimated by max interleave count) are 6260 // saturated. 6261 unsigned NumStores = Legal->getNumStores(); 6262 unsigned NumLoads = Legal->getNumLoads(); 6263 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6264 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6265 6266 // There is little point in interleaving for reductions containing selects 6267 // and compares when VF=1 since it may just create more overhead than it's 6268 // worth for loops with small trip counts. This is because we still have to 6269 // do the final reduction after the loop. 6270 bool HasSelectCmpReductions = 6271 HasReductions && 6272 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6273 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6274 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 6275 RdxDesc.getRecurrenceKind()); 6276 }); 6277 if (HasSelectCmpReductions) { 6278 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 6279 return 1; 6280 } 6281 6282 // If we have a scalar reduction (vector reductions are already dealt with 6283 // by this point), we can increase the critical path length if the loop 6284 // we're interleaving is inside another loop. For tree-wise reductions 6285 // set the limit to 2, and for ordered reductions it's best to disable 6286 // interleaving entirely. 6287 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6288 bool HasOrderedReductions = 6289 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6290 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6291 return RdxDesc.isOrdered(); 6292 }); 6293 if (HasOrderedReductions) { 6294 LLVM_DEBUG( 6295 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 6296 return 1; 6297 } 6298 6299 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6300 SmallIC = std::min(SmallIC, F); 6301 StoresIC = std::min(StoresIC, F); 6302 LoadsIC = std::min(LoadsIC, F); 6303 } 6304 6305 if (EnableLoadStoreRuntimeInterleave && 6306 std::max(StoresIC, LoadsIC) > SmallIC) { 6307 LLVM_DEBUG( 6308 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6309 return std::max(StoresIC, LoadsIC); 6310 } 6311 6312 // If there are scalar reductions and TTI has enabled aggressive 6313 // interleaving for reductions, we will interleave to expose ILP. 6314 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6315 AggressivelyInterleaveReductions) { 6316 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6317 // Interleave no less than SmallIC but not as aggressive as the normal IC 6318 // to satisfy the rare situation when resources are too limited. 6319 return std::max(IC / 2, SmallIC); 6320 } else { 6321 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6322 return SmallIC; 6323 } 6324 } 6325 6326 // Interleave if this is a large loop (small loops are already dealt with by 6327 // this point) that could benefit from interleaving. 6328 if (AggressivelyInterleaveReductions) { 6329 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6330 return IC; 6331 } 6332 6333 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6334 return 1; 6335 } 6336 6337 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6338 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6339 // This function calculates the register usage by measuring the highest number 6340 // of values that are alive at a single location. Obviously, this is a very 6341 // rough estimation. We scan the loop in a topological order in order and 6342 // assign a number to each instruction. We use RPO to ensure that defs are 6343 // met before their users. We assume that each instruction that has in-loop 6344 // users starts an interval. We record every time that an in-loop value is 6345 // used, so we have a list of the first and last occurrences of each 6346 // instruction. Next, we transpose this data structure into a multi map that 6347 // holds the list of intervals that *end* at a specific location. This multi 6348 // map allows us to perform a linear search. We scan the instructions linearly 6349 // and record each time that a new interval starts, by placing it in a set. 6350 // If we find this value in the multi-map then we remove it from the set. 6351 // The max register usage is the maximum size of the set. 6352 // We also search for instructions that are defined outside the loop, but are 6353 // used inside the loop. We need this number separately from the max-interval 6354 // usage number because when we unroll, loop-invariant values do not take 6355 // more register. 6356 LoopBlocksDFS DFS(TheLoop); 6357 DFS.perform(LI); 6358 6359 RegisterUsage RU; 6360 6361 // Each 'key' in the map opens a new interval. The values 6362 // of the map are the index of the 'last seen' usage of the 6363 // instruction that is the key. 6364 using IntervalMap = DenseMap<Instruction *, unsigned>; 6365 6366 // Maps instruction to its index. 6367 SmallVector<Instruction *, 64> IdxToInstr; 6368 // Marks the end of each interval. 6369 IntervalMap EndPoint; 6370 // Saves the list of instruction indices that are used in the loop. 6371 SmallPtrSet<Instruction *, 8> Ends; 6372 // Saves the list of values that are used in the loop but are 6373 // defined outside the loop, such as arguments and constants. 6374 SmallPtrSet<Value *, 8> LoopInvariants; 6375 6376 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6377 for (Instruction &I : BB->instructionsWithoutDebug()) { 6378 IdxToInstr.push_back(&I); 6379 6380 // Save the end location of each USE. 6381 for (Value *U : I.operands()) { 6382 auto *Instr = dyn_cast<Instruction>(U); 6383 6384 // Ignore non-instruction values such as arguments, constants, etc. 6385 if (!Instr) 6386 continue; 6387 6388 // If this instruction is outside the loop then record it and continue. 6389 if (!TheLoop->contains(Instr)) { 6390 LoopInvariants.insert(Instr); 6391 continue; 6392 } 6393 6394 // Overwrite previous end points. 6395 EndPoint[Instr] = IdxToInstr.size(); 6396 Ends.insert(Instr); 6397 } 6398 } 6399 } 6400 6401 // Saves the list of intervals that end with the index in 'key'. 6402 using InstrList = SmallVector<Instruction *, 2>; 6403 DenseMap<unsigned, InstrList> TransposeEnds; 6404 6405 // Transpose the EndPoints to a list of values that end at each index. 6406 for (auto &Interval : EndPoint) 6407 TransposeEnds[Interval.second].push_back(Interval.first); 6408 6409 SmallPtrSet<Instruction *, 8> OpenIntervals; 6410 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6411 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6412 6413 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6414 6415 // A lambda that gets the register usage for the given type and VF. 6416 const auto &TTICapture = TTI; 6417 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 6418 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6419 return 0; 6420 InstructionCost::CostType RegUsage = 6421 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 6422 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 6423 "Nonsensical values for register usage."); 6424 return RegUsage; 6425 }; 6426 6427 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6428 Instruction *I = IdxToInstr[i]; 6429 6430 // Remove all of the instructions that end at this location. 6431 InstrList &List = TransposeEnds[i]; 6432 for (Instruction *ToRemove : List) 6433 OpenIntervals.erase(ToRemove); 6434 6435 // Ignore instructions that are never used within the loop. 6436 if (!Ends.count(I)) 6437 continue; 6438 6439 // Skip ignored values. 6440 if (ValuesToIgnore.count(I)) 6441 continue; 6442 6443 // For each VF find the maximum usage of registers. 6444 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6445 // Count the number of live intervals. 6446 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6447 6448 if (VFs[j].isScalar()) { 6449 for (auto Inst : OpenIntervals) { 6450 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6451 if (RegUsage.find(ClassID) == RegUsage.end()) 6452 RegUsage[ClassID] = 1; 6453 else 6454 RegUsage[ClassID] += 1; 6455 } 6456 } else { 6457 collectUniformsAndScalars(VFs[j]); 6458 for (auto Inst : OpenIntervals) { 6459 // Skip ignored values for VF > 1. 6460 if (VecValuesToIgnore.count(Inst)) 6461 continue; 6462 if (isScalarAfterVectorization(Inst, VFs[j])) { 6463 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6464 if (RegUsage.find(ClassID) == RegUsage.end()) 6465 RegUsage[ClassID] = 1; 6466 else 6467 RegUsage[ClassID] += 1; 6468 } else { 6469 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6470 if (RegUsage.find(ClassID) == RegUsage.end()) 6471 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6472 else 6473 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6474 } 6475 } 6476 } 6477 6478 for (auto& pair : RegUsage) { 6479 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6480 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6481 else 6482 MaxUsages[j][pair.first] = pair.second; 6483 } 6484 } 6485 6486 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6487 << OpenIntervals.size() << '\n'); 6488 6489 // Add the current instruction to the list of open intervals. 6490 OpenIntervals.insert(I); 6491 } 6492 6493 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6494 SmallMapVector<unsigned, unsigned, 4> Invariant; 6495 6496 for (auto Inst : LoopInvariants) { 6497 unsigned Usage = 6498 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6499 unsigned ClassID = 6500 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6501 if (Invariant.find(ClassID) == Invariant.end()) 6502 Invariant[ClassID] = Usage; 6503 else 6504 Invariant[ClassID] += Usage; 6505 } 6506 6507 LLVM_DEBUG({ 6508 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6509 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6510 << " item\n"; 6511 for (const auto &pair : MaxUsages[i]) { 6512 dbgs() << "LV(REG): RegisterClass: " 6513 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6514 << " registers\n"; 6515 } 6516 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6517 << " item\n"; 6518 for (const auto &pair : Invariant) { 6519 dbgs() << "LV(REG): RegisterClass: " 6520 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6521 << " registers\n"; 6522 } 6523 }); 6524 6525 RU.LoopInvariantRegs = Invariant; 6526 RU.MaxLocalUsers = MaxUsages[i]; 6527 RUs[i] = RU; 6528 } 6529 6530 return RUs; 6531 } 6532 6533 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6534 // TODO: Cost model for emulated masked load/store is completely 6535 // broken. This hack guides the cost model to use an artificially 6536 // high enough value to practically disable vectorization with such 6537 // operations, except where previously deployed legality hack allowed 6538 // using very low cost values. This is to avoid regressions coming simply 6539 // from moving "masked load/store" check from legality to cost model. 6540 // Masked Load/Gather emulation was previously never allowed. 6541 // Limited number of Masked Store/Scatter emulation was allowed. 6542 assert(isPredicatedInst(I) && 6543 "Expecting a scalar emulated instruction"); 6544 return isa<LoadInst>(I) || 6545 (isa<StoreInst>(I) && 6546 NumPredStores > NumberOfStoresToPredicate); 6547 } 6548 6549 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6550 // If we aren't vectorizing the loop, or if we've already collected the 6551 // instructions to scalarize, there's nothing to do. Collection may already 6552 // have occurred if we have a user-selected VF and are now computing the 6553 // expected cost for interleaving. 6554 if (VF.isScalar() || VF.isZero() || 6555 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6556 return; 6557 6558 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6559 // not profitable to scalarize any instructions, the presence of VF in the 6560 // map will indicate that we've analyzed it already. 6561 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6562 6563 // Find all the instructions that are scalar with predication in the loop and 6564 // determine if it would be better to not if-convert the blocks they are in. 6565 // If so, we also record the instructions to scalarize. 6566 for (BasicBlock *BB : TheLoop->blocks()) { 6567 if (!blockNeedsPredicationForAnyReason(BB)) 6568 continue; 6569 for (Instruction &I : *BB) 6570 if (isScalarWithPredication(&I)) { 6571 ScalarCostsTy ScalarCosts; 6572 // Do not apply discount if scalable, because that would lead to 6573 // invalid scalarization costs. 6574 // Do not apply discount logic if hacked cost is needed 6575 // for emulated masked memrefs. 6576 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I) && 6577 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6578 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6579 // Remember that BB will remain after vectorization. 6580 PredicatedBBsAfterVectorization.insert(BB); 6581 } 6582 } 6583 } 6584 6585 int LoopVectorizationCostModel::computePredInstDiscount( 6586 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6587 assert(!isUniformAfterVectorization(PredInst, VF) && 6588 "Instruction marked uniform-after-vectorization will be predicated"); 6589 6590 // Initialize the discount to zero, meaning that the scalar version and the 6591 // vector version cost the same. 6592 InstructionCost Discount = 0; 6593 6594 // Holds instructions to analyze. The instructions we visit are mapped in 6595 // ScalarCosts. Those instructions are the ones that would be scalarized if 6596 // we find that the scalar version costs less. 6597 SmallVector<Instruction *, 8> Worklist; 6598 6599 // Returns true if the given instruction can be scalarized. 6600 auto canBeScalarized = [&](Instruction *I) -> bool { 6601 // We only attempt to scalarize instructions forming a single-use chain 6602 // from the original predicated block that would otherwise be vectorized. 6603 // Although not strictly necessary, we give up on instructions we know will 6604 // already be scalar to avoid traversing chains that are unlikely to be 6605 // beneficial. 6606 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6607 isScalarAfterVectorization(I, VF)) 6608 return false; 6609 6610 // If the instruction is scalar with predication, it will be analyzed 6611 // separately. We ignore it within the context of PredInst. 6612 if (isScalarWithPredication(I)) 6613 return false; 6614 6615 // If any of the instruction's operands are uniform after vectorization, 6616 // the instruction cannot be scalarized. This prevents, for example, a 6617 // masked load from being scalarized. 6618 // 6619 // We assume we will only emit a value for lane zero of an instruction 6620 // marked uniform after vectorization, rather than VF identical values. 6621 // Thus, if we scalarize an instruction that uses a uniform, we would 6622 // create uses of values corresponding to the lanes we aren't emitting code 6623 // for. This behavior can be changed by allowing getScalarValue to clone 6624 // the lane zero values for uniforms rather than asserting. 6625 for (Use &U : I->operands()) 6626 if (auto *J = dyn_cast<Instruction>(U.get())) 6627 if (isUniformAfterVectorization(J, VF)) 6628 return false; 6629 6630 // Otherwise, we can scalarize the instruction. 6631 return true; 6632 }; 6633 6634 // Compute the expected cost discount from scalarizing the entire expression 6635 // feeding the predicated instruction. We currently only consider expressions 6636 // that are single-use instruction chains. 6637 Worklist.push_back(PredInst); 6638 while (!Worklist.empty()) { 6639 Instruction *I = Worklist.pop_back_val(); 6640 6641 // If we've already analyzed the instruction, there's nothing to do. 6642 if (ScalarCosts.find(I) != ScalarCosts.end()) 6643 continue; 6644 6645 // Compute the cost of the vector instruction. Note that this cost already 6646 // includes the scalarization overhead of the predicated instruction. 6647 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6648 6649 // Compute the cost of the scalarized instruction. This cost is the cost of 6650 // the instruction as if it wasn't if-converted and instead remained in the 6651 // predicated block. We will scale this cost by block probability after 6652 // computing the scalarization overhead. 6653 InstructionCost ScalarCost = 6654 VF.getFixedValue() * 6655 getInstructionCost(I, ElementCount::getFixed(1)).first; 6656 6657 // Compute the scalarization overhead of needed insertelement instructions 6658 // and phi nodes. 6659 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6660 ScalarCost += TTI.getScalarizationOverhead( 6661 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6662 APInt::getAllOnes(VF.getFixedValue()), true, false); 6663 ScalarCost += 6664 VF.getFixedValue() * 6665 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6666 } 6667 6668 // Compute the scalarization overhead of needed extractelement 6669 // instructions. For each of the instruction's operands, if the operand can 6670 // be scalarized, add it to the worklist; otherwise, account for the 6671 // overhead. 6672 for (Use &U : I->operands()) 6673 if (auto *J = dyn_cast<Instruction>(U.get())) { 6674 assert(VectorType::isValidElementType(J->getType()) && 6675 "Instruction has non-scalar type"); 6676 if (canBeScalarized(J)) 6677 Worklist.push_back(J); 6678 else if (needsExtract(J, VF)) { 6679 ScalarCost += TTI.getScalarizationOverhead( 6680 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6681 APInt::getAllOnes(VF.getFixedValue()), false, true); 6682 } 6683 } 6684 6685 // Scale the total scalar cost by block probability. 6686 ScalarCost /= getReciprocalPredBlockProb(); 6687 6688 // Compute the discount. A non-negative discount means the vector version 6689 // of the instruction costs more, and scalarizing would be beneficial. 6690 Discount += VectorCost - ScalarCost; 6691 ScalarCosts[I] = ScalarCost; 6692 } 6693 6694 return *Discount.getValue(); 6695 } 6696 6697 LoopVectorizationCostModel::VectorizationCostTy 6698 LoopVectorizationCostModel::expectedCost( 6699 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6700 VectorizationCostTy Cost; 6701 6702 // For each block. 6703 for (BasicBlock *BB : TheLoop->blocks()) { 6704 VectorizationCostTy BlockCost; 6705 6706 // For each instruction in the old loop. 6707 for (Instruction &I : BB->instructionsWithoutDebug()) { 6708 // Skip ignored values. 6709 if (ValuesToIgnore.count(&I) || 6710 (VF.isVector() && VecValuesToIgnore.count(&I))) 6711 continue; 6712 6713 VectorizationCostTy C = getInstructionCost(&I, VF); 6714 6715 // Check if we should override the cost. 6716 if (C.first.isValid() && 6717 ForceTargetInstructionCost.getNumOccurrences() > 0) 6718 C.first = InstructionCost(ForceTargetInstructionCost); 6719 6720 // Keep a list of instructions with invalid costs. 6721 if (Invalid && !C.first.isValid()) 6722 Invalid->emplace_back(&I, VF); 6723 6724 BlockCost.first += C.first; 6725 BlockCost.second |= C.second; 6726 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6727 << " for VF " << VF << " For instruction: " << I 6728 << '\n'); 6729 } 6730 6731 // If we are vectorizing a predicated block, it will have been 6732 // if-converted. This means that the block's instructions (aside from 6733 // stores and instructions that may divide by zero) will now be 6734 // unconditionally executed. For the scalar case, we may not always execute 6735 // the predicated block, if it is an if-else block. Thus, scale the block's 6736 // cost by the probability of executing it. blockNeedsPredication from 6737 // Legal is used so as to not include all blocks in tail folded loops. 6738 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6739 BlockCost.first /= getReciprocalPredBlockProb(); 6740 6741 Cost.first += BlockCost.first; 6742 Cost.second |= BlockCost.second; 6743 } 6744 6745 return Cost; 6746 } 6747 6748 /// Gets Address Access SCEV after verifying that the access pattern 6749 /// is loop invariant except the induction variable dependence. 6750 /// 6751 /// This SCEV can be sent to the Target in order to estimate the address 6752 /// calculation cost. 6753 static const SCEV *getAddressAccessSCEV( 6754 Value *Ptr, 6755 LoopVectorizationLegality *Legal, 6756 PredicatedScalarEvolution &PSE, 6757 const Loop *TheLoop) { 6758 6759 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6760 if (!Gep) 6761 return nullptr; 6762 6763 // We are looking for a gep with all loop invariant indices except for one 6764 // which should be an induction variable. 6765 auto SE = PSE.getSE(); 6766 unsigned NumOperands = Gep->getNumOperands(); 6767 for (unsigned i = 1; i < NumOperands; ++i) { 6768 Value *Opd = Gep->getOperand(i); 6769 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6770 !Legal->isInductionVariable(Opd)) 6771 return nullptr; 6772 } 6773 6774 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6775 return PSE.getSCEV(Ptr); 6776 } 6777 6778 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6779 return Legal->hasStride(I->getOperand(0)) || 6780 Legal->hasStride(I->getOperand(1)); 6781 } 6782 6783 InstructionCost 6784 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6785 ElementCount VF) { 6786 assert(VF.isVector() && 6787 "Scalarization cost of instruction implies vectorization."); 6788 if (VF.isScalable()) 6789 return InstructionCost::getInvalid(); 6790 6791 Type *ValTy = getLoadStoreType(I); 6792 auto SE = PSE.getSE(); 6793 6794 unsigned AS = getLoadStoreAddressSpace(I); 6795 Value *Ptr = getLoadStorePointerOperand(I); 6796 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6797 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost` 6798 // that it is being called from this specific place. 6799 6800 // Figure out whether the access is strided and get the stride value 6801 // if it's known in compile time 6802 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6803 6804 // Get the cost of the scalar memory instruction and address computation. 6805 InstructionCost Cost = 6806 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6807 6808 // Don't pass *I here, since it is scalar but will actually be part of a 6809 // vectorized loop where the user of it is a vectorized instruction. 6810 const Align Alignment = getLoadStoreAlignment(I); 6811 Cost += VF.getKnownMinValue() * 6812 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6813 AS, TTI::TCK_RecipThroughput); 6814 6815 // Get the overhead of the extractelement and insertelement instructions 6816 // we might create due to scalarization. 6817 Cost += getScalarizationOverhead(I, VF); 6818 6819 // If we have a predicated load/store, it will need extra i1 extracts and 6820 // conditional branches, but may not be executed for each vector lane. Scale 6821 // the cost by the probability of executing the predicated block. 6822 if (isPredicatedInst(I)) { 6823 Cost /= getReciprocalPredBlockProb(); 6824 6825 // Add the cost of an i1 extract and a branch 6826 auto *Vec_i1Ty = 6827 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6828 Cost += TTI.getScalarizationOverhead( 6829 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 6830 /*Insert=*/false, /*Extract=*/true); 6831 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6832 6833 if (useEmulatedMaskMemRefHack(I)) 6834 // Artificially setting to a high enough value to practically disable 6835 // vectorization with such operations. 6836 Cost = 3000000; 6837 } 6838 6839 return Cost; 6840 } 6841 6842 InstructionCost 6843 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6844 ElementCount VF) { 6845 Type *ValTy = getLoadStoreType(I); 6846 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6847 Value *Ptr = getLoadStorePointerOperand(I); 6848 unsigned AS = getLoadStoreAddressSpace(I); 6849 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 6850 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6851 6852 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6853 "Stride should be 1 or -1 for consecutive memory access"); 6854 const Align Alignment = getLoadStoreAlignment(I); 6855 InstructionCost Cost = 0; 6856 if (Legal->isMaskRequired(I)) 6857 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6858 CostKind); 6859 else 6860 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6861 CostKind, I); 6862 6863 bool Reverse = ConsecutiveStride < 0; 6864 if (Reverse) 6865 Cost += 6866 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6867 return Cost; 6868 } 6869 6870 InstructionCost 6871 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6872 ElementCount VF) { 6873 assert(Legal->isUniformMemOp(*I)); 6874 6875 Type *ValTy = getLoadStoreType(I); 6876 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6877 const Align Alignment = getLoadStoreAlignment(I); 6878 unsigned AS = getLoadStoreAddressSpace(I); 6879 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6880 if (isa<LoadInst>(I)) { 6881 return TTI.getAddressComputationCost(ValTy) + 6882 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6883 CostKind) + 6884 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6885 } 6886 StoreInst *SI = cast<StoreInst>(I); 6887 6888 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6889 return TTI.getAddressComputationCost(ValTy) + 6890 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6891 CostKind) + 6892 (isLoopInvariantStoreValue 6893 ? 0 6894 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6895 VF.getKnownMinValue() - 1)); 6896 } 6897 6898 InstructionCost 6899 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6900 ElementCount VF) { 6901 Type *ValTy = getLoadStoreType(I); 6902 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6903 const Align Alignment = getLoadStoreAlignment(I); 6904 const Value *Ptr = getLoadStorePointerOperand(I); 6905 6906 return TTI.getAddressComputationCost(VectorTy) + 6907 TTI.getGatherScatterOpCost( 6908 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6909 TargetTransformInfo::TCK_RecipThroughput, I); 6910 } 6911 6912 InstructionCost 6913 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6914 ElementCount VF) { 6915 // TODO: Once we have support for interleaving with scalable vectors 6916 // we can calculate the cost properly here. 6917 if (VF.isScalable()) 6918 return InstructionCost::getInvalid(); 6919 6920 Type *ValTy = getLoadStoreType(I); 6921 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6922 unsigned AS = getLoadStoreAddressSpace(I); 6923 6924 auto Group = getInterleavedAccessGroup(I); 6925 assert(Group && "Fail to get an interleaved access group."); 6926 6927 unsigned InterleaveFactor = Group->getFactor(); 6928 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6929 6930 // Holds the indices of existing members in the interleaved group. 6931 SmallVector<unsigned, 4> Indices; 6932 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 6933 if (Group->getMember(IF)) 6934 Indices.push_back(IF); 6935 6936 // Calculate the cost of the whole interleaved group. 6937 bool UseMaskForGaps = 6938 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 6939 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 6940 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6941 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6942 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6943 6944 if (Group->isReverse()) { 6945 // TODO: Add support for reversed masked interleaved access. 6946 assert(!Legal->isMaskRequired(I) && 6947 "Reverse masked interleaved access not supported."); 6948 Cost += 6949 Group->getNumMembers() * 6950 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6951 } 6952 return Cost; 6953 } 6954 6955 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 6956 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6957 using namespace llvm::PatternMatch; 6958 // Early exit for no inloop reductions 6959 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6960 return None; 6961 auto *VectorTy = cast<VectorType>(Ty); 6962 6963 // We are looking for a pattern of, and finding the minimal acceptable cost: 6964 // reduce(mul(ext(A), ext(B))) or 6965 // reduce(mul(A, B)) or 6966 // reduce(ext(A)) or 6967 // reduce(A). 6968 // The basic idea is that we walk down the tree to do that, finding the root 6969 // reduction instruction in InLoopReductionImmediateChains. From there we find 6970 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6971 // of the components. If the reduction cost is lower then we return it for the 6972 // reduction instruction and 0 for the other instructions in the pattern. If 6973 // it is not we return an invalid cost specifying the orignal cost method 6974 // should be used. 6975 Instruction *RetI = I; 6976 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 6977 if (!RetI->hasOneUser()) 6978 return None; 6979 RetI = RetI->user_back(); 6980 } 6981 if (match(RetI, m_Mul(m_Value(), m_Value())) && 6982 RetI->user_back()->getOpcode() == Instruction::Add) { 6983 if (!RetI->hasOneUser()) 6984 return None; 6985 RetI = RetI->user_back(); 6986 } 6987 6988 // Test if the found instruction is a reduction, and if not return an invalid 6989 // cost specifying the parent to use the original cost modelling. 6990 if (!InLoopReductionImmediateChains.count(RetI)) 6991 return None; 6992 6993 // Find the reduction this chain is a part of and calculate the basic cost of 6994 // the reduction on its own. 6995 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6996 Instruction *ReductionPhi = LastChain; 6997 while (!isa<PHINode>(ReductionPhi)) 6998 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6999 7000 const RecurrenceDescriptor &RdxDesc = 7001 Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second; 7002 7003 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 7004 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 7005 7006 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a 7007 // normal fmul instruction to the cost of the fadd reduction. 7008 if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd) 7009 BaseCost += 7010 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind); 7011 7012 // If we're using ordered reductions then we can just return the base cost 7013 // here, since getArithmeticReductionCost calculates the full ordered 7014 // reduction cost when FP reassociation is not allowed. 7015 if (useOrderedReductions(RdxDesc)) 7016 return BaseCost; 7017 7018 // Get the operand that was not the reduction chain and match it to one of the 7019 // patterns, returning the better cost if it is found. 7020 Instruction *RedOp = RetI->getOperand(1) == LastChain 7021 ? dyn_cast<Instruction>(RetI->getOperand(0)) 7022 : dyn_cast<Instruction>(RetI->getOperand(1)); 7023 7024 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 7025 7026 Instruction *Op0, *Op1; 7027 if (RedOp && 7028 match(RedOp, 7029 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 7030 match(Op0, m_ZExtOrSExt(m_Value())) && 7031 Op0->getOpcode() == Op1->getOpcode() && 7032 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7033 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 7034 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 7035 7036 // Matched reduce(ext(mul(ext(A), ext(B))) 7037 // Note that the extend opcodes need to all match, or if A==B they will have 7038 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 7039 // which is equally fine. 7040 bool IsUnsigned = isa<ZExtInst>(Op0); 7041 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7042 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 7043 7044 InstructionCost ExtCost = 7045 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 7046 TTI::CastContextHint::None, CostKind, Op0); 7047 InstructionCost MulCost = 7048 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 7049 InstructionCost Ext2Cost = 7050 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 7051 TTI::CastContextHint::None, CostKind, RedOp); 7052 7053 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7054 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7055 CostKind); 7056 7057 if (RedCost.isValid() && 7058 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 7059 return I == RetI ? RedCost : 0; 7060 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 7061 !TheLoop->isLoopInvariant(RedOp)) { 7062 // Matched reduce(ext(A)) 7063 bool IsUnsigned = isa<ZExtInst>(RedOp); 7064 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 7065 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7066 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7067 CostKind); 7068 7069 InstructionCost ExtCost = 7070 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 7071 TTI::CastContextHint::None, CostKind, RedOp); 7072 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 7073 return I == RetI ? RedCost : 0; 7074 } else if (RedOp && 7075 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 7076 if (match(Op0, m_ZExtOrSExt(m_Value())) && 7077 Op0->getOpcode() == Op1->getOpcode() && 7078 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7079 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 7080 bool IsUnsigned = isa<ZExtInst>(Op0); 7081 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7082 // Matched reduce(mul(ext, ext)) 7083 InstructionCost ExtCost = 7084 TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, 7085 TTI::CastContextHint::None, CostKind, Op0); 7086 InstructionCost MulCost = 7087 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7088 7089 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7090 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7091 CostKind); 7092 7093 if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) 7094 return I == RetI ? RedCost : 0; 7095 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 7096 // Matched reduce(mul()) 7097 InstructionCost MulCost = 7098 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7099 7100 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7101 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 7102 CostKind); 7103 7104 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 7105 return I == RetI ? RedCost : 0; 7106 } 7107 } 7108 7109 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 7110 } 7111 7112 InstructionCost 7113 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7114 ElementCount VF) { 7115 // Calculate scalar cost only. Vectorization cost should be ready at this 7116 // moment. 7117 if (VF.isScalar()) { 7118 Type *ValTy = getLoadStoreType(I); 7119 const Align Alignment = getLoadStoreAlignment(I); 7120 unsigned AS = getLoadStoreAddressSpace(I); 7121 7122 return TTI.getAddressComputationCost(ValTy) + 7123 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 7124 TTI::TCK_RecipThroughput, I); 7125 } 7126 return getWideningCost(I, VF); 7127 } 7128 7129 LoopVectorizationCostModel::VectorizationCostTy 7130 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7131 ElementCount VF) { 7132 // If we know that this instruction will remain uniform, check the cost of 7133 // the scalar version. 7134 if (isUniformAfterVectorization(I, VF)) 7135 VF = ElementCount::getFixed(1); 7136 7137 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7138 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7139 7140 // Forced scalars do not have any scalarization overhead. 7141 auto ForcedScalar = ForcedScalars.find(VF); 7142 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7143 auto InstSet = ForcedScalar->second; 7144 if (InstSet.count(I)) 7145 return VectorizationCostTy( 7146 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7147 VF.getKnownMinValue()), 7148 false); 7149 } 7150 7151 Type *VectorTy; 7152 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7153 7154 bool TypeNotScalarized = false; 7155 if (VF.isVector() && VectorTy->isVectorTy()) { 7156 unsigned NumParts = TTI.getNumberOfParts(VectorTy); 7157 if (NumParts) 7158 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 7159 else 7160 C = InstructionCost::getInvalid(); 7161 } 7162 return VectorizationCostTy(C, TypeNotScalarized); 7163 } 7164 7165 InstructionCost 7166 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7167 ElementCount VF) const { 7168 7169 // There is no mechanism yet to create a scalable scalarization loop, 7170 // so this is currently Invalid. 7171 if (VF.isScalable()) 7172 return InstructionCost::getInvalid(); 7173 7174 if (VF.isScalar()) 7175 return 0; 7176 7177 InstructionCost Cost = 0; 7178 Type *RetTy = ToVectorTy(I->getType(), VF); 7179 if (!RetTy->isVoidTy() && 7180 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7181 Cost += TTI.getScalarizationOverhead( 7182 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 7183 false); 7184 7185 // Some targets keep addresses scalar. 7186 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7187 return Cost; 7188 7189 // Some targets support efficient element stores. 7190 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7191 return Cost; 7192 7193 // Collect operands to consider. 7194 CallInst *CI = dyn_cast<CallInst>(I); 7195 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 7196 7197 // Skip operands that do not require extraction/scalarization and do not incur 7198 // any overhead. 7199 SmallVector<Type *> Tys; 7200 for (auto *V : filterExtractingOperands(Ops, VF)) 7201 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 7202 return Cost + TTI.getOperandsScalarizationOverhead( 7203 filterExtractingOperands(Ops, VF), Tys); 7204 } 7205 7206 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7207 if (VF.isScalar()) 7208 return; 7209 NumPredStores = 0; 7210 for (BasicBlock *BB : TheLoop->blocks()) { 7211 // For each instruction in the old loop. 7212 for (Instruction &I : *BB) { 7213 Value *Ptr = getLoadStorePointerOperand(&I); 7214 if (!Ptr) 7215 continue; 7216 7217 // TODO: We should generate better code and update the cost model for 7218 // predicated uniform stores. Today they are treated as any other 7219 // predicated store (see added test cases in 7220 // invariant-store-vectorization.ll). 7221 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 7222 NumPredStores++; 7223 7224 if (Legal->isUniformMemOp(I)) { 7225 // TODO: Avoid replicating loads and stores instead of 7226 // relying on instcombine to remove them. 7227 // Load: Scalar load + broadcast 7228 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7229 InstructionCost Cost; 7230 if (isa<StoreInst>(&I) && VF.isScalable() && 7231 isLegalGatherOrScatter(&I)) { 7232 Cost = getGatherScatterCost(&I, VF); 7233 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 7234 } else { 7235 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 7236 "Cannot yet scalarize uniform stores"); 7237 Cost = getUniformMemOpCost(&I, VF); 7238 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7239 } 7240 continue; 7241 } 7242 7243 // We assume that widening is the best solution when possible. 7244 if (memoryInstructionCanBeWidened(&I, VF)) { 7245 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7246 int ConsecutiveStride = Legal->isConsecutivePtr( 7247 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 7248 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7249 "Expected consecutive stride."); 7250 InstWidening Decision = 7251 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7252 setWideningDecision(&I, VF, Decision, Cost); 7253 continue; 7254 } 7255 7256 // Choose between Interleaving, Gather/Scatter or Scalarization. 7257 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7258 unsigned NumAccesses = 1; 7259 if (isAccessInterleaved(&I)) { 7260 auto Group = getInterleavedAccessGroup(&I); 7261 assert(Group && "Fail to get an interleaved access group."); 7262 7263 // Make one decision for the whole group. 7264 if (getWideningDecision(&I, VF) != CM_Unknown) 7265 continue; 7266 7267 NumAccesses = Group->getNumMembers(); 7268 if (interleavedAccessCanBeWidened(&I, VF)) 7269 InterleaveCost = getInterleaveGroupCost(&I, VF); 7270 } 7271 7272 InstructionCost GatherScatterCost = 7273 isLegalGatherOrScatter(&I) 7274 ? getGatherScatterCost(&I, VF) * NumAccesses 7275 : InstructionCost::getInvalid(); 7276 7277 InstructionCost ScalarizationCost = 7278 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7279 7280 // Choose better solution for the current VF, 7281 // write down this decision and use it during vectorization. 7282 InstructionCost Cost; 7283 InstWidening Decision; 7284 if (InterleaveCost <= GatherScatterCost && 7285 InterleaveCost < ScalarizationCost) { 7286 Decision = CM_Interleave; 7287 Cost = InterleaveCost; 7288 } else if (GatherScatterCost < ScalarizationCost) { 7289 Decision = CM_GatherScatter; 7290 Cost = GatherScatterCost; 7291 } else { 7292 Decision = CM_Scalarize; 7293 Cost = ScalarizationCost; 7294 } 7295 // If the instructions belongs to an interleave group, the whole group 7296 // receives the same decision. The whole group receives the cost, but 7297 // the cost will actually be assigned to one instruction. 7298 if (auto Group = getInterleavedAccessGroup(&I)) 7299 setWideningDecision(Group, VF, Decision, Cost); 7300 else 7301 setWideningDecision(&I, VF, Decision, Cost); 7302 } 7303 } 7304 7305 // Make sure that any load of address and any other address computation 7306 // remains scalar unless there is gather/scatter support. This avoids 7307 // inevitable extracts into address registers, and also has the benefit of 7308 // activating LSR more, since that pass can't optimize vectorized 7309 // addresses. 7310 if (TTI.prefersVectorizedAddressing()) 7311 return; 7312 7313 // Start with all scalar pointer uses. 7314 SmallPtrSet<Instruction *, 8> AddrDefs; 7315 for (BasicBlock *BB : TheLoop->blocks()) 7316 for (Instruction &I : *BB) { 7317 Instruction *PtrDef = 7318 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7319 if (PtrDef && TheLoop->contains(PtrDef) && 7320 getWideningDecision(&I, VF) != CM_GatherScatter) 7321 AddrDefs.insert(PtrDef); 7322 } 7323 7324 // Add all instructions used to generate the addresses. 7325 SmallVector<Instruction *, 4> Worklist; 7326 append_range(Worklist, AddrDefs); 7327 while (!Worklist.empty()) { 7328 Instruction *I = Worklist.pop_back_val(); 7329 for (auto &Op : I->operands()) 7330 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7331 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7332 AddrDefs.insert(InstOp).second) 7333 Worklist.push_back(InstOp); 7334 } 7335 7336 for (auto *I : AddrDefs) { 7337 if (isa<LoadInst>(I)) { 7338 // Setting the desired widening decision should ideally be handled in 7339 // by cost functions, but since this involves the task of finding out 7340 // if the loaded register is involved in an address computation, it is 7341 // instead changed here when we know this is the case. 7342 InstWidening Decision = getWideningDecision(I, VF); 7343 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7344 // Scalarize a widened load of address. 7345 setWideningDecision( 7346 I, VF, CM_Scalarize, 7347 (VF.getKnownMinValue() * 7348 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7349 else if (auto Group = getInterleavedAccessGroup(I)) { 7350 // Scalarize an interleave group of address loads. 7351 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7352 if (Instruction *Member = Group->getMember(I)) 7353 setWideningDecision( 7354 Member, VF, CM_Scalarize, 7355 (VF.getKnownMinValue() * 7356 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7357 } 7358 } 7359 } else 7360 // Make sure I gets scalarized and a cost estimate without 7361 // scalarization overhead. 7362 ForcedScalars[VF].insert(I); 7363 } 7364 } 7365 7366 InstructionCost 7367 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7368 Type *&VectorTy) { 7369 Type *RetTy = I->getType(); 7370 if (canTruncateToMinimalBitwidth(I, VF)) 7371 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7372 auto SE = PSE.getSE(); 7373 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7374 7375 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 7376 ElementCount VF) -> bool { 7377 if (VF.isScalar()) 7378 return true; 7379 7380 auto Scalarized = InstsToScalarize.find(VF); 7381 assert(Scalarized != InstsToScalarize.end() && 7382 "VF not yet analyzed for scalarization profitability"); 7383 return !Scalarized->second.count(I) && 7384 llvm::all_of(I->users(), [&](User *U) { 7385 auto *UI = cast<Instruction>(U); 7386 return !Scalarized->second.count(UI); 7387 }); 7388 }; 7389 (void) hasSingleCopyAfterVectorization; 7390 7391 if (isScalarAfterVectorization(I, VF)) { 7392 // With the exception of GEPs and PHIs, after scalarization there should 7393 // only be one copy of the instruction generated in the loop. This is 7394 // because the VF is either 1, or any instructions that need scalarizing 7395 // have already been dealt with by the the time we get here. As a result, 7396 // it means we don't have to multiply the instruction cost by VF. 7397 assert(I->getOpcode() == Instruction::GetElementPtr || 7398 I->getOpcode() == Instruction::PHI || 7399 (I->getOpcode() == Instruction::BitCast && 7400 I->getType()->isPointerTy()) || 7401 hasSingleCopyAfterVectorization(I, VF)); 7402 VectorTy = RetTy; 7403 } else 7404 VectorTy = ToVectorTy(RetTy, VF); 7405 7406 // TODO: We need to estimate the cost of intrinsic calls. 7407 switch (I->getOpcode()) { 7408 case Instruction::GetElementPtr: 7409 // We mark this instruction as zero-cost because the cost of GEPs in 7410 // vectorized code depends on whether the corresponding memory instruction 7411 // is scalarized or not. Therefore, we handle GEPs with the memory 7412 // instruction cost. 7413 return 0; 7414 case Instruction::Br: { 7415 // In cases of scalarized and predicated instructions, there will be VF 7416 // predicated blocks in the vectorized loop. Each branch around these 7417 // blocks requires also an extract of its vector compare i1 element. 7418 bool ScalarPredicatedBB = false; 7419 BranchInst *BI = cast<BranchInst>(I); 7420 if (VF.isVector() && BI->isConditional() && 7421 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7422 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7423 ScalarPredicatedBB = true; 7424 7425 if (ScalarPredicatedBB) { 7426 // Not possible to scalarize scalable vector with predicated instructions. 7427 if (VF.isScalable()) 7428 return InstructionCost::getInvalid(); 7429 // Return cost for branches around scalarized and predicated blocks. 7430 auto *Vec_i1Ty = 7431 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7432 return ( 7433 TTI.getScalarizationOverhead( 7434 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7435 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7436 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7437 // The back-edge branch will remain, as will all scalar branches. 7438 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7439 else 7440 // This branch will be eliminated by if-conversion. 7441 return 0; 7442 // Note: We currently assume zero cost for an unconditional branch inside 7443 // a predicated block since it will become a fall-through, although we 7444 // may decide in the future to call TTI for all branches. 7445 } 7446 case Instruction::PHI: { 7447 auto *Phi = cast<PHINode>(I); 7448 7449 // First-order recurrences are replaced by vector shuffles inside the loop. 7450 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7451 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7452 return TTI.getShuffleCost( 7453 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7454 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7455 7456 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7457 // converted into select instructions. We require N - 1 selects per phi 7458 // node, where N is the number of incoming values. 7459 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7460 return (Phi->getNumIncomingValues() - 1) * 7461 TTI.getCmpSelInstrCost( 7462 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7463 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7464 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7465 7466 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7467 } 7468 case Instruction::UDiv: 7469 case Instruction::SDiv: 7470 case Instruction::URem: 7471 case Instruction::SRem: 7472 // If we have a predicated instruction, it may not be executed for each 7473 // vector lane. Get the scalarization cost and scale this amount by the 7474 // probability of executing the predicated block. If the instruction is not 7475 // predicated, we fall through to the next case. 7476 if (VF.isVector() && isScalarWithPredication(I)) { 7477 InstructionCost Cost = 0; 7478 7479 // These instructions have a non-void type, so account for the phi nodes 7480 // that we will create. This cost is likely to be zero. The phi node 7481 // cost, if any, should be scaled by the block probability because it 7482 // models a copy at the end of each predicated block. 7483 Cost += VF.getKnownMinValue() * 7484 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7485 7486 // The cost of the non-predicated instruction. 7487 Cost += VF.getKnownMinValue() * 7488 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7489 7490 // The cost of insertelement and extractelement instructions needed for 7491 // scalarization. 7492 Cost += getScalarizationOverhead(I, VF); 7493 7494 // Scale the cost by the probability of executing the predicated blocks. 7495 // This assumes the predicated block for each vector lane is equally 7496 // likely. 7497 return Cost / getReciprocalPredBlockProb(); 7498 } 7499 LLVM_FALLTHROUGH; 7500 case Instruction::Add: 7501 case Instruction::FAdd: 7502 case Instruction::Sub: 7503 case Instruction::FSub: 7504 case Instruction::Mul: 7505 case Instruction::FMul: 7506 case Instruction::FDiv: 7507 case Instruction::FRem: 7508 case Instruction::Shl: 7509 case Instruction::LShr: 7510 case Instruction::AShr: 7511 case Instruction::And: 7512 case Instruction::Or: 7513 case Instruction::Xor: { 7514 // Since we will replace the stride by 1 the multiplication should go away. 7515 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7516 return 0; 7517 7518 // Detect reduction patterns 7519 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7520 return *RedCost; 7521 7522 // Certain instructions can be cheaper to vectorize if they have a constant 7523 // second vector operand. One example of this are shifts on x86. 7524 Value *Op2 = I->getOperand(1); 7525 TargetTransformInfo::OperandValueProperties Op2VP; 7526 TargetTransformInfo::OperandValueKind Op2VK = 7527 TTI.getOperandInfo(Op2, Op2VP); 7528 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7529 Op2VK = TargetTransformInfo::OK_UniformValue; 7530 7531 SmallVector<const Value *, 4> Operands(I->operand_values()); 7532 return TTI.getArithmeticInstrCost( 7533 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7534 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7535 } 7536 case Instruction::FNeg: { 7537 return TTI.getArithmeticInstrCost( 7538 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7539 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7540 TargetTransformInfo::OP_None, I->getOperand(0), I); 7541 } 7542 case Instruction::Select: { 7543 SelectInst *SI = cast<SelectInst>(I); 7544 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7545 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7546 7547 const Value *Op0, *Op1; 7548 using namespace llvm::PatternMatch; 7549 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7550 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7551 // select x, y, false --> x & y 7552 // select x, true, y --> x | y 7553 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7554 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7555 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7556 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7557 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7558 Op1->getType()->getScalarSizeInBits() == 1); 7559 7560 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7561 return TTI.getArithmeticInstrCost( 7562 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7563 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7564 } 7565 7566 Type *CondTy = SI->getCondition()->getType(); 7567 if (!ScalarCond) 7568 CondTy = VectorType::get(CondTy, VF); 7569 7570 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 7571 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition())) 7572 Pred = Cmp->getPredicate(); 7573 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred, 7574 CostKind, I); 7575 } 7576 case Instruction::ICmp: 7577 case Instruction::FCmp: { 7578 Type *ValTy = I->getOperand(0)->getType(); 7579 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7580 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7581 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7582 VectorTy = ToVectorTy(ValTy, VF); 7583 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7584 cast<CmpInst>(I)->getPredicate(), CostKind, 7585 I); 7586 } 7587 case Instruction::Store: 7588 case Instruction::Load: { 7589 ElementCount Width = VF; 7590 if (Width.isVector()) { 7591 InstWidening Decision = getWideningDecision(I, Width); 7592 assert(Decision != CM_Unknown && 7593 "CM decision should be taken at this point"); 7594 if (Decision == CM_Scalarize) 7595 Width = ElementCount::getFixed(1); 7596 } 7597 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7598 return getMemoryInstructionCost(I, VF); 7599 } 7600 case Instruction::BitCast: 7601 if (I->getType()->isPointerTy()) 7602 return 0; 7603 LLVM_FALLTHROUGH; 7604 case Instruction::ZExt: 7605 case Instruction::SExt: 7606 case Instruction::FPToUI: 7607 case Instruction::FPToSI: 7608 case Instruction::FPExt: 7609 case Instruction::PtrToInt: 7610 case Instruction::IntToPtr: 7611 case Instruction::SIToFP: 7612 case Instruction::UIToFP: 7613 case Instruction::Trunc: 7614 case Instruction::FPTrunc: { 7615 // Computes the CastContextHint from a Load/Store instruction. 7616 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7617 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7618 "Expected a load or a store!"); 7619 7620 if (VF.isScalar() || !TheLoop->contains(I)) 7621 return TTI::CastContextHint::Normal; 7622 7623 switch (getWideningDecision(I, VF)) { 7624 case LoopVectorizationCostModel::CM_GatherScatter: 7625 return TTI::CastContextHint::GatherScatter; 7626 case LoopVectorizationCostModel::CM_Interleave: 7627 return TTI::CastContextHint::Interleave; 7628 case LoopVectorizationCostModel::CM_Scalarize: 7629 case LoopVectorizationCostModel::CM_Widen: 7630 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7631 : TTI::CastContextHint::Normal; 7632 case LoopVectorizationCostModel::CM_Widen_Reverse: 7633 return TTI::CastContextHint::Reversed; 7634 case LoopVectorizationCostModel::CM_Unknown: 7635 llvm_unreachable("Instr did not go through cost modelling?"); 7636 } 7637 7638 llvm_unreachable("Unhandled case!"); 7639 }; 7640 7641 unsigned Opcode = I->getOpcode(); 7642 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7643 // For Trunc, the context is the only user, which must be a StoreInst. 7644 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7645 if (I->hasOneUse()) 7646 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7647 CCH = ComputeCCH(Store); 7648 } 7649 // For Z/Sext, the context is the operand, which must be a LoadInst. 7650 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7651 Opcode == Instruction::FPExt) { 7652 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7653 CCH = ComputeCCH(Load); 7654 } 7655 7656 // We optimize the truncation of induction variables having constant 7657 // integer steps. The cost of these truncations is the same as the scalar 7658 // operation. 7659 if (isOptimizableIVTruncate(I, VF)) { 7660 auto *Trunc = cast<TruncInst>(I); 7661 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7662 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7663 } 7664 7665 // Detect reduction patterns 7666 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7667 return *RedCost; 7668 7669 Type *SrcScalarTy = I->getOperand(0)->getType(); 7670 Type *SrcVecTy = 7671 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7672 if (canTruncateToMinimalBitwidth(I, VF)) { 7673 // This cast is going to be shrunk. This may remove the cast or it might 7674 // turn it into slightly different cast. For example, if MinBW == 16, 7675 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7676 // 7677 // Calculate the modified src and dest types. 7678 Type *MinVecTy = VectorTy; 7679 if (Opcode == Instruction::Trunc) { 7680 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7681 VectorTy = 7682 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7683 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7684 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7685 VectorTy = 7686 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7687 } 7688 } 7689 7690 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7691 } 7692 case Instruction::Call: { 7693 if (RecurrenceDescriptor::isFMulAddIntrinsic(I)) 7694 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7695 return *RedCost; 7696 bool NeedToScalarize; 7697 CallInst *CI = cast<CallInst>(I); 7698 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7699 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7700 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7701 return std::min(CallCost, IntrinsicCost); 7702 } 7703 return CallCost; 7704 } 7705 case Instruction::ExtractValue: 7706 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7707 case Instruction::Alloca: 7708 // We cannot easily widen alloca to a scalable alloca, as 7709 // the result would need to be a vector of pointers. 7710 if (VF.isScalable()) 7711 return InstructionCost::getInvalid(); 7712 LLVM_FALLTHROUGH; 7713 default: 7714 // This opcode is unknown. Assume that it is the same as 'mul'. 7715 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7716 } // end of switch. 7717 } 7718 7719 char LoopVectorize::ID = 0; 7720 7721 static const char lv_name[] = "Loop Vectorization"; 7722 7723 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7724 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7725 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7726 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7727 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7728 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7729 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7730 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7731 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7732 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7733 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7734 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7735 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7736 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7737 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7738 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7739 7740 namespace llvm { 7741 7742 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7743 7744 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7745 bool VectorizeOnlyWhenForced) { 7746 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7747 } 7748 7749 } // end namespace llvm 7750 7751 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7752 // Check if the pointer operand of a load or store instruction is 7753 // consecutive. 7754 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7755 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 7756 return false; 7757 } 7758 7759 void LoopVectorizationCostModel::collectValuesToIgnore() { 7760 // Ignore ephemeral values. 7761 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7762 7763 // Ignore type-promoting instructions we identified during reduction 7764 // detection. 7765 for (auto &Reduction : Legal->getReductionVars()) { 7766 const RecurrenceDescriptor &RedDes = Reduction.second; 7767 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7768 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7769 } 7770 // Ignore type-casting instructions we identified during induction 7771 // detection. 7772 for (auto &Induction : Legal->getInductionVars()) { 7773 const InductionDescriptor &IndDes = Induction.second; 7774 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7775 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7776 } 7777 } 7778 7779 void LoopVectorizationCostModel::collectInLoopReductions() { 7780 for (auto &Reduction : Legal->getReductionVars()) { 7781 PHINode *Phi = Reduction.first; 7782 const RecurrenceDescriptor &RdxDesc = Reduction.second; 7783 7784 // We don't collect reductions that are type promoted (yet). 7785 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7786 continue; 7787 7788 // If the target would prefer this reduction to happen "in-loop", then we 7789 // want to record it as such. 7790 unsigned Opcode = RdxDesc.getOpcode(); 7791 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7792 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7793 TargetTransformInfo::ReductionFlags())) 7794 continue; 7795 7796 // Check that we can correctly put the reductions into the loop, by 7797 // finding the chain of operations that leads from the phi to the loop 7798 // exit value. 7799 SmallVector<Instruction *, 4> ReductionOperations = 7800 RdxDesc.getReductionOpChain(Phi, TheLoop); 7801 bool InLoop = !ReductionOperations.empty(); 7802 if (InLoop) { 7803 InLoopReductionChains[Phi] = ReductionOperations; 7804 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7805 Instruction *LastChain = Phi; 7806 for (auto *I : ReductionOperations) { 7807 InLoopReductionImmediateChains[I] = LastChain; 7808 LastChain = I; 7809 } 7810 } 7811 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7812 << " reduction for phi: " << *Phi << "\n"); 7813 } 7814 } 7815 7816 // TODO: we could return a pair of values that specify the max VF and 7817 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7818 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7819 // doesn't have a cost model that can choose which plan to execute if 7820 // more than one is generated. 7821 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7822 LoopVectorizationCostModel &CM) { 7823 unsigned WidestType; 7824 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7825 return WidestVectorRegBits / WidestType; 7826 } 7827 7828 VectorizationFactor 7829 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7830 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7831 ElementCount VF = UserVF; 7832 // Outer loop handling: They may require CFG and instruction level 7833 // transformations before even evaluating whether vectorization is profitable. 7834 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7835 // the vectorization pipeline. 7836 if (!OrigLoop->isInnermost()) { 7837 // If the user doesn't provide a vectorization factor, determine a 7838 // reasonable one. 7839 if (UserVF.isZero()) { 7840 VF = ElementCount::getFixed(determineVPlanVF( 7841 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7842 .getFixedSize(), 7843 CM)); 7844 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7845 7846 // Make sure we have a VF > 1 for stress testing. 7847 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7848 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7849 << "overriding computed VF.\n"); 7850 VF = ElementCount::getFixed(4); 7851 } 7852 } 7853 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7854 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7855 "VF needs to be a power of two"); 7856 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7857 << "VF " << VF << " to build VPlans.\n"); 7858 buildVPlans(VF, VF); 7859 7860 // For VPlan build stress testing, we bail out after VPlan construction. 7861 if (VPlanBuildStressTest) 7862 return VectorizationFactor::Disabled(); 7863 7864 return {VF, 0 /*Cost*/}; 7865 } 7866 7867 LLVM_DEBUG( 7868 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7869 "VPlan-native path.\n"); 7870 return VectorizationFactor::Disabled(); 7871 } 7872 7873 Optional<VectorizationFactor> 7874 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7875 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7876 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 7877 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 7878 return None; 7879 7880 // Invalidate interleave groups if all blocks of loop will be predicated. 7881 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 7882 !useMaskedInterleavedAccesses(*TTI)) { 7883 LLVM_DEBUG( 7884 dbgs() 7885 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7886 "which requires masked-interleaved support.\n"); 7887 if (CM.InterleaveInfo.invalidateGroups()) 7888 // Invalidating interleave groups also requires invalidating all decisions 7889 // based on them, which includes widening decisions and uniform and scalar 7890 // values. 7891 CM.invalidateCostModelingDecisions(); 7892 } 7893 7894 ElementCount MaxUserVF = 7895 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 7896 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 7897 if (!UserVF.isZero() && UserVFIsLegal) { 7898 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 7899 "VF needs to be a power of two"); 7900 // Collect the instructions (and their associated costs) that will be more 7901 // profitable to scalarize. 7902 if (CM.selectUserVectorizationFactor(UserVF)) { 7903 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7904 CM.collectInLoopReductions(); 7905 buildVPlansWithVPRecipes(UserVF, UserVF); 7906 LLVM_DEBUG(printPlans(dbgs())); 7907 return {{UserVF, 0}}; 7908 } else 7909 reportVectorizationInfo("UserVF ignored because of invalid costs.", 7910 "InvalidCost", ORE, OrigLoop); 7911 } 7912 7913 // Populate the set of Vectorization Factor Candidates. 7914 ElementCountSet VFCandidates; 7915 for (auto VF = ElementCount::getFixed(1); 7916 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 7917 VFCandidates.insert(VF); 7918 for (auto VF = ElementCount::getScalable(1); 7919 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 7920 VFCandidates.insert(VF); 7921 7922 for (const auto &VF : VFCandidates) { 7923 // Collect Uniform and Scalar instructions after vectorization with VF. 7924 CM.collectUniformsAndScalars(VF); 7925 7926 // Collect the instructions (and their associated costs) that will be more 7927 // profitable to scalarize. 7928 if (VF.isVector()) 7929 CM.collectInstsToScalarize(VF); 7930 } 7931 7932 CM.collectInLoopReductions(); 7933 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 7934 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 7935 7936 LLVM_DEBUG(printPlans(dbgs())); 7937 if (!MaxFactors.hasVector()) 7938 return VectorizationFactor::Disabled(); 7939 7940 // Select the optimal vectorization factor. 7941 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 7942 7943 // Check if it is profitable to vectorize with runtime checks. 7944 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 7945 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 7946 bool PragmaThresholdReached = 7947 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 7948 bool ThresholdReached = 7949 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 7950 if ((ThresholdReached && !Hints.allowReordering()) || 7951 PragmaThresholdReached) { 7952 ORE->emit([&]() { 7953 return OptimizationRemarkAnalysisAliasing( 7954 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 7955 OrigLoop->getHeader()) 7956 << "loop not vectorized: cannot prove it is safe to reorder " 7957 "memory operations"; 7958 }); 7959 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 7960 Hints.emitRemarkWithHints(); 7961 return VectorizationFactor::Disabled(); 7962 } 7963 } 7964 return SelectedVF; 7965 } 7966 7967 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 7968 assert(count_if(VPlans, 7969 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 7970 1 && 7971 "Best VF has not a single VPlan."); 7972 7973 for (const VPlanPtr &Plan : VPlans) { 7974 if (Plan->hasVF(VF)) 7975 return *Plan.get(); 7976 } 7977 llvm_unreachable("No plan found!"); 7978 } 7979 7980 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 7981 VPlan &BestVPlan, 7982 InnerLoopVectorizer &ILV, 7983 DominatorTree *DT) { 7984 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 7985 << '\n'); 7986 7987 // Perform the actual loop transformation. 7988 7989 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7990 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 7991 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 7992 State.TripCount = ILV.getOrCreateTripCount(nullptr); 7993 State.CanonicalIV = ILV.Induction; 7994 ILV.collectPoisonGeneratingRecipes(State); 7995 7996 ILV.printDebugTracesAtStart(); 7997 7998 //===------------------------------------------------===// 7999 // 8000 // Notice: any optimization or new instruction that go 8001 // into the code below should also be implemented in 8002 // the cost-model. 8003 // 8004 //===------------------------------------------------===// 8005 8006 // 2. Copy and widen instructions from the old loop into the new loop. 8007 BestVPlan.execute(&State); 8008 8009 // 3. Fix the vectorized code: take care of header phi's, live-outs, 8010 // predication, updating analyses. 8011 ILV.fixVectorizedLoop(State); 8012 8013 ILV.printDebugTracesAtEnd(); 8014 } 8015 8016 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 8017 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 8018 for (const auto &Plan : VPlans) 8019 if (PrintVPlansInDotFormat) 8020 Plan->printDOT(O); 8021 else 8022 Plan->print(O); 8023 } 8024 #endif 8025 8026 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 8027 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 8028 8029 // We create new control-flow for the vectorized loop, so the original exit 8030 // conditions will be dead after vectorization if it's only used by the 8031 // terminator 8032 SmallVector<BasicBlock*> ExitingBlocks; 8033 OrigLoop->getExitingBlocks(ExitingBlocks); 8034 for (auto *BB : ExitingBlocks) { 8035 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 8036 if (!Cmp || !Cmp->hasOneUse()) 8037 continue; 8038 8039 // TODO: we should introduce a getUniqueExitingBlocks on Loop 8040 if (!DeadInstructions.insert(Cmp).second) 8041 continue; 8042 8043 // The operands of the icmp is often a dead trunc, used by IndUpdate. 8044 // TODO: can recurse through operands in general 8045 for (Value *Op : Cmp->operands()) { 8046 if (isa<TruncInst>(Op) && Op->hasOneUse()) 8047 DeadInstructions.insert(cast<Instruction>(Op)); 8048 } 8049 } 8050 8051 // We create new "steps" for induction variable updates to which the original 8052 // induction variables map. An original update instruction will be dead if 8053 // all its users except the induction variable are dead. 8054 auto *Latch = OrigLoop->getLoopLatch(); 8055 for (auto &Induction : Legal->getInductionVars()) { 8056 PHINode *Ind = Induction.first; 8057 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 8058 8059 // If the tail is to be folded by masking, the primary induction variable, 8060 // if exists, isn't dead: it will be used for masking. Don't kill it. 8061 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 8062 continue; 8063 8064 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 8065 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 8066 })) 8067 DeadInstructions.insert(IndUpdate); 8068 8069 // We record as "Dead" also the type-casting instructions we had identified 8070 // during induction analysis. We don't need any handling for them in the 8071 // vectorized loop because we have proven that, under a proper runtime 8072 // test guarding the vectorized loop, the value of the phi, and the casted 8073 // value of the phi, are the same. The last instruction in this casting chain 8074 // will get its scalar/vector/widened def from the scalar/vector/widened def 8075 // of the respective phi node. Any other casts in the induction def-use chain 8076 // have no other uses outside the phi update chain, and will be ignored. 8077 const InductionDescriptor &IndDes = Induction.second; 8078 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 8079 DeadInstructions.insert(Casts.begin(), Casts.end()); 8080 } 8081 } 8082 8083 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 8084 8085 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 8086 8087 Value *InnerLoopUnroller::getStepVector(Value *Val, Value *StartIdx, 8088 Value *Step, 8089 Instruction::BinaryOps BinOp) { 8090 // When unrolling and the VF is 1, we only need to add a simple scalar. 8091 Type *Ty = Val->getType(); 8092 assert(!Ty->isVectorTy() && "Val must be a scalar"); 8093 8094 if (Ty->isFloatingPointTy()) { 8095 // Floating-point operations inherit FMF via the builder's flags. 8096 Value *MulOp = Builder.CreateFMul(StartIdx, Step); 8097 return Builder.CreateBinOp(BinOp, Val, MulOp); 8098 } 8099 return Builder.CreateAdd(Val, Builder.CreateMul(StartIdx, Step), "induction"); 8100 } 8101 8102 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 8103 SmallVector<Metadata *, 4> MDs; 8104 // Reserve first location for self reference to the LoopID metadata node. 8105 MDs.push_back(nullptr); 8106 bool IsUnrollMetadata = false; 8107 MDNode *LoopID = L->getLoopID(); 8108 if (LoopID) { 8109 // First find existing loop unrolling disable metadata. 8110 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 8111 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 8112 if (MD) { 8113 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 8114 IsUnrollMetadata = 8115 S && S->getString().startswith("llvm.loop.unroll.disable"); 8116 } 8117 MDs.push_back(LoopID->getOperand(i)); 8118 } 8119 } 8120 8121 if (!IsUnrollMetadata) { 8122 // Add runtime unroll disable metadata. 8123 LLVMContext &Context = L->getHeader()->getContext(); 8124 SmallVector<Metadata *, 1> DisableOperands; 8125 DisableOperands.push_back( 8126 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 8127 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 8128 MDs.push_back(DisableNode); 8129 MDNode *NewLoopID = MDNode::get(Context, MDs); 8130 // Set operand 0 to refer to the loop id itself. 8131 NewLoopID->replaceOperandWith(0, NewLoopID); 8132 L->setLoopID(NewLoopID); 8133 } 8134 } 8135 8136 //===--------------------------------------------------------------------===// 8137 // EpilogueVectorizerMainLoop 8138 //===--------------------------------------------------------------------===// 8139 8140 /// This function is partially responsible for generating the control flow 8141 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8142 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 8143 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8144 Loop *Lp = createVectorLoopSkeleton(""); 8145 8146 // Generate the code to check the minimum iteration count of the vector 8147 // epilogue (see below). 8148 EPI.EpilogueIterationCountCheck = 8149 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 8150 EPI.EpilogueIterationCountCheck->setName("iter.check"); 8151 8152 // Generate the code to check any assumptions that we've made for SCEV 8153 // expressions. 8154 EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); 8155 8156 // Generate the code that checks at runtime if arrays overlap. We put the 8157 // checks into a separate block to make the more common case of few elements 8158 // faster. 8159 EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 8160 8161 // Generate the iteration count check for the main loop, *after* the check 8162 // for the epilogue loop, so that the path-length is shorter for the case 8163 // that goes directly through the vector epilogue. The longer-path length for 8164 // the main loop is compensated for, by the gain from vectorizing the larger 8165 // trip count. Note: the branch will get updated later on when we vectorize 8166 // the epilogue. 8167 EPI.MainLoopIterationCountCheck = 8168 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 8169 8170 // Generate the induction variable. 8171 OldInduction = Legal->getPrimaryInduction(); 8172 Type *IdxTy = Legal->getWidestInductionType(); 8173 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8174 8175 IRBuilder<> B(&*Lp->getLoopPreheader()->getFirstInsertionPt()); 8176 Value *Step = getRuntimeVF(B, IdxTy, VF * UF); 8177 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8178 EPI.VectorTripCount = CountRoundDown; 8179 Induction = 8180 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8181 getDebugLocFromInstOrOperands(OldInduction)); 8182 8183 // Skip induction resume value creation here because they will be created in 8184 // the second pass. If we created them here, they wouldn't be used anyway, 8185 // because the vplan in the second pass still contains the inductions from the 8186 // original loop. 8187 8188 return completeLoopSkeleton(Lp, OrigLoopID); 8189 } 8190 8191 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 8192 LLVM_DEBUG({ 8193 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 8194 << "Main Loop VF:" << EPI.MainLoopVF 8195 << ", Main Loop UF:" << EPI.MainLoopUF 8196 << ", Epilogue Loop VF:" << EPI.EpilogueVF 8197 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8198 }); 8199 } 8200 8201 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 8202 DEBUG_WITH_TYPE(VerboseDebug, { 8203 dbgs() << "intermediate fn:\n" 8204 << *OrigLoop->getHeader()->getParent() << "\n"; 8205 }); 8206 } 8207 8208 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 8209 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 8210 assert(L && "Expected valid Loop."); 8211 assert(Bypass && "Expected valid bypass basic block."); 8212 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 8213 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 8214 Value *Count = getOrCreateTripCount(L); 8215 // Reuse existing vector loop preheader for TC checks. 8216 // Note that new preheader block is generated for vector loop. 8217 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 8218 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8219 8220 // Generate code to check if the loop's trip count is less than VF * UF of the 8221 // main vector loop. 8222 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 8223 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8224 8225 Value *CheckMinIters = Builder.CreateICmp( 8226 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 8227 "min.iters.check"); 8228 8229 if (!ForEpilogue) 8230 TCCheckBlock->setName("vector.main.loop.iter.check"); 8231 8232 // Create new preheader for vector loop. 8233 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8234 DT, LI, nullptr, "vector.ph"); 8235 8236 if (ForEpilogue) { 8237 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8238 DT->getNode(Bypass)->getIDom()) && 8239 "TC check is expected to dominate Bypass"); 8240 8241 // Update dominator for Bypass & LoopExit. 8242 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8243 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8244 // For loops with multiple exits, there's no edge from the middle block 8245 // to exit blocks (as the epilogue must run) and thus no need to update 8246 // the immediate dominator of the exit blocks. 8247 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8248 8249 LoopBypassBlocks.push_back(TCCheckBlock); 8250 8251 // Save the trip count so we don't have to regenerate it in the 8252 // vec.epilog.iter.check. This is safe to do because the trip count 8253 // generated here dominates the vector epilog iter check. 8254 EPI.TripCount = Count; 8255 } 8256 8257 ReplaceInstWithInst( 8258 TCCheckBlock->getTerminator(), 8259 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8260 8261 return TCCheckBlock; 8262 } 8263 8264 //===--------------------------------------------------------------------===// 8265 // EpilogueVectorizerEpilogueLoop 8266 //===--------------------------------------------------------------------===// 8267 8268 /// This function is partially responsible for generating the control flow 8269 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8270 BasicBlock * 8271 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8272 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8273 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8274 8275 // Now, compare the remaining count and if there aren't enough iterations to 8276 // execute the vectorized epilogue skip to the scalar part. 8277 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8278 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8279 LoopVectorPreHeader = 8280 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8281 LI, nullptr, "vec.epilog.ph"); 8282 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8283 VecEpilogueIterationCountCheck); 8284 8285 // Adjust the control flow taking the state info from the main loop 8286 // vectorization into account. 8287 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8288 "expected this to be saved from the previous pass."); 8289 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8290 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8291 8292 DT->changeImmediateDominator(LoopVectorPreHeader, 8293 EPI.MainLoopIterationCountCheck); 8294 8295 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8296 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8297 8298 if (EPI.SCEVSafetyCheck) 8299 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8300 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8301 if (EPI.MemSafetyCheck) 8302 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8303 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8304 8305 DT->changeImmediateDominator( 8306 VecEpilogueIterationCountCheck, 8307 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8308 8309 DT->changeImmediateDominator(LoopScalarPreHeader, 8310 EPI.EpilogueIterationCountCheck); 8311 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8312 // If there is an epilogue which must run, there's no edge from the 8313 // middle block to exit blocks and thus no need to update the immediate 8314 // dominator of the exit blocks. 8315 DT->changeImmediateDominator(LoopExitBlock, 8316 EPI.EpilogueIterationCountCheck); 8317 8318 // Keep track of bypass blocks, as they feed start values to the induction 8319 // phis in the scalar loop preheader. 8320 if (EPI.SCEVSafetyCheck) 8321 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8322 if (EPI.MemSafetyCheck) 8323 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8324 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8325 8326 // Generate a resume induction for the vector epilogue and put it in the 8327 // vector epilogue preheader 8328 Type *IdxTy = Legal->getWidestInductionType(); 8329 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8330 LoopVectorPreHeader->getFirstNonPHI()); 8331 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8332 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8333 EPI.MainLoopIterationCountCheck); 8334 8335 // Generate the induction variable. 8336 OldInduction = Legal->getPrimaryInduction(); 8337 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8338 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8339 Value *StartIdx = EPResumeVal; 8340 Induction = 8341 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8342 getDebugLocFromInstOrOperands(OldInduction)); 8343 8344 // Generate induction resume values. These variables save the new starting 8345 // indexes for the scalar loop. They are used to test if there are any tail 8346 // iterations left once the vector loop has completed. 8347 // Note that when the vectorized epilogue is skipped due to iteration count 8348 // check, then the resume value for the induction variable comes from 8349 // the trip count of the main vector loop, hence passing the AdditionalBypass 8350 // argument. 8351 createInductionResumeValues(Lp, CountRoundDown, 8352 {VecEpilogueIterationCountCheck, 8353 EPI.VectorTripCount} /* AdditionalBypass */); 8354 8355 AddRuntimeUnrollDisableMetaData(Lp); 8356 return completeLoopSkeleton(Lp, OrigLoopID); 8357 } 8358 8359 BasicBlock * 8360 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8361 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8362 8363 assert(EPI.TripCount && 8364 "Expected trip count to have been safed in the first pass."); 8365 assert( 8366 (!isa<Instruction>(EPI.TripCount) || 8367 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8368 "saved trip count does not dominate insertion point."); 8369 Value *TC = EPI.TripCount; 8370 IRBuilder<> Builder(Insert->getTerminator()); 8371 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8372 8373 // Generate code to check if the loop's trip count is less than VF * UF of the 8374 // vector epilogue loop. 8375 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 8376 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8377 8378 Value *CheckMinIters = 8379 Builder.CreateICmp(P, Count, 8380 createStepForVF(Builder, Count->getType(), 8381 EPI.EpilogueVF, EPI.EpilogueUF), 8382 "min.epilog.iters.check"); 8383 8384 ReplaceInstWithInst( 8385 Insert->getTerminator(), 8386 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8387 8388 LoopBypassBlocks.push_back(Insert); 8389 return Insert; 8390 } 8391 8392 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8393 LLVM_DEBUG({ 8394 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8395 << "Epilogue Loop VF:" << EPI.EpilogueVF 8396 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8397 }); 8398 } 8399 8400 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8401 DEBUG_WITH_TYPE(VerboseDebug, { 8402 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n"; 8403 }); 8404 } 8405 8406 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8407 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8408 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8409 bool PredicateAtRangeStart = Predicate(Range.Start); 8410 8411 for (ElementCount TmpVF = Range.Start * 2; 8412 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8413 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8414 Range.End = TmpVF; 8415 break; 8416 } 8417 8418 return PredicateAtRangeStart; 8419 } 8420 8421 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8422 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8423 /// of VF's starting at a given VF and extending it as much as possible. Each 8424 /// vectorization decision can potentially shorten this sub-range during 8425 /// buildVPlan(). 8426 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8427 ElementCount MaxVF) { 8428 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8429 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8430 VFRange SubRange = {VF, MaxVFPlusOne}; 8431 VPlans.push_back(buildVPlan(SubRange)); 8432 VF = SubRange.End; 8433 } 8434 } 8435 8436 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8437 VPlanPtr &Plan) { 8438 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8439 8440 // Look for cached value. 8441 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8442 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8443 if (ECEntryIt != EdgeMaskCache.end()) 8444 return ECEntryIt->second; 8445 8446 VPValue *SrcMask = createBlockInMask(Src, Plan); 8447 8448 // The terminator has to be a branch inst! 8449 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8450 assert(BI && "Unexpected terminator found"); 8451 8452 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8453 return EdgeMaskCache[Edge] = SrcMask; 8454 8455 // If source is an exiting block, we know the exit edge is dynamically dead 8456 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8457 // adding uses of an otherwise potentially dead instruction. 8458 if (OrigLoop->isLoopExiting(Src)) 8459 return EdgeMaskCache[Edge] = SrcMask; 8460 8461 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8462 assert(EdgeMask && "No Edge Mask found for condition"); 8463 8464 if (BI->getSuccessor(0) != Dst) 8465 EdgeMask = Builder.createNot(EdgeMask); 8466 8467 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8468 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8469 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8470 // The select version does not introduce new UB if SrcMask is false and 8471 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8472 VPValue *False = Plan->getOrAddVPValue( 8473 ConstantInt::getFalse(BI->getCondition()->getType())); 8474 EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False); 8475 } 8476 8477 return EdgeMaskCache[Edge] = EdgeMask; 8478 } 8479 8480 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8481 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8482 8483 // Look for cached value. 8484 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8485 if (BCEntryIt != BlockMaskCache.end()) 8486 return BCEntryIt->second; 8487 8488 // All-one mask is modelled as no-mask following the convention for masked 8489 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8490 VPValue *BlockMask = nullptr; 8491 8492 if (OrigLoop->getHeader() == BB) { 8493 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8494 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8495 8496 // Create the block in mask as the first non-phi instruction in the block. 8497 VPBuilder::InsertPointGuard Guard(Builder); 8498 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8499 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8500 8501 // Introduce the early-exit compare IV <= BTC to form header block mask. 8502 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8503 // Start by constructing the desired canonical IV. 8504 VPValue *IV = nullptr; 8505 if (Legal->getPrimaryInduction()) 8506 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8507 else { 8508 auto *IVRecipe = new VPWidenCanonicalIVRecipe(); 8509 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8510 IV = IVRecipe; 8511 } 8512 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8513 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8514 8515 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8516 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8517 // as a second argument, we only pass the IV here and extract the 8518 // tripcount from the transform state where codegen of the VP instructions 8519 // happen. 8520 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8521 } else { 8522 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8523 } 8524 return BlockMaskCache[BB] = BlockMask; 8525 } 8526 8527 // This is the block mask. We OR all incoming edges. 8528 for (auto *Predecessor : predecessors(BB)) { 8529 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8530 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8531 return BlockMaskCache[BB] = EdgeMask; 8532 8533 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8534 BlockMask = EdgeMask; 8535 continue; 8536 } 8537 8538 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8539 } 8540 8541 return BlockMaskCache[BB] = BlockMask; 8542 } 8543 8544 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8545 ArrayRef<VPValue *> Operands, 8546 VFRange &Range, 8547 VPlanPtr &Plan) { 8548 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8549 "Must be called with either a load or store"); 8550 8551 auto willWiden = [&](ElementCount VF) -> bool { 8552 if (VF.isScalar()) 8553 return false; 8554 LoopVectorizationCostModel::InstWidening Decision = 8555 CM.getWideningDecision(I, VF); 8556 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8557 "CM decision should be taken at this point."); 8558 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8559 return true; 8560 if (CM.isScalarAfterVectorization(I, VF) || 8561 CM.isProfitableToScalarize(I, VF)) 8562 return false; 8563 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8564 }; 8565 8566 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8567 return nullptr; 8568 8569 VPValue *Mask = nullptr; 8570 if (Legal->isMaskRequired(I)) 8571 Mask = createBlockInMask(I->getParent(), Plan); 8572 8573 // Determine if the pointer operand of the access is either consecutive or 8574 // reverse consecutive. 8575 LoopVectorizationCostModel::InstWidening Decision = 8576 CM.getWideningDecision(I, Range.Start); 8577 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8578 bool Consecutive = 8579 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8580 8581 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8582 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8583 Consecutive, Reverse); 8584 8585 StoreInst *Store = cast<StoreInst>(I); 8586 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8587 Mask, Consecutive, Reverse); 8588 } 8589 8590 VPWidenIntOrFpInductionRecipe * 8591 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, 8592 ArrayRef<VPValue *> Operands) const { 8593 // Check if this is an integer or fp induction. If so, build the recipe that 8594 // produces its scalar and vector values. 8595 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8596 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8597 II.getKind() == InductionDescriptor::IK_FpInduction) { 8598 assert(II.getStartValue() == 8599 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8600 const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); 8601 return new VPWidenIntOrFpInductionRecipe( 8602 Phi, Operands[0], Casts.empty() ? nullptr : Casts.front()); 8603 } 8604 8605 return nullptr; 8606 } 8607 8608 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8609 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8610 VPlan &Plan) const { 8611 // Optimize the special case where the source is a constant integer 8612 // induction variable. Notice that we can only optimize the 'trunc' case 8613 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8614 // (c) other casts depend on pointer size. 8615 8616 // Determine whether \p K is a truncation based on an induction variable that 8617 // can be optimized. 8618 auto isOptimizableIVTruncate = 8619 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8620 return [=](ElementCount VF) -> bool { 8621 return CM.isOptimizableIVTruncate(K, VF); 8622 }; 8623 }; 8624 8625 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8626 isOptimizableIVTruncate(I), Range)) { 8627 8628 InductionDescriptor II = 8629 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 8630 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8631 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8632 Start, I); 8633 } 8634 return nullptr; 8635 } 8636 8637 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8638 ArrayRef<VPValue *> Operands, 8639 VPlanPtr &Plan) { 8640 // If all incoming values are equal, the incoming VPValue can be used directly 8641 // instead of creating a new VPBlendRecipe. 8642 VPValue *FirstIncoming = Operands[0]; 8643 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8644 return FirstIncoming == Inc; 8645 })) { 8646 return Operands[0]; 8647 } 8648 8649 // We know that all PHIs in non-header blocks are converted into selects, so 8650 // we don't have to worry about the insertion order and we can just use the 8651 // builder. At this point we generate the predication tree. There may be 8652 // duplications since this is a simple recursive scan, but future 8653 // optimizations will clean it up. 8654 SmallVector<VPValue *, 2> OperandsWithMask; 8655 unsigned NumIncoming = Phi->getNumIncomingValues(); 8656 8657 for (unsigned In = 0; In < NumIncoming; In++) { 8658 VPValue *EdgeMask = 8659 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8660 assert((EdgeMask || NumIncoming == 1) && 8661 "Multiple predecessors with one having a full mask"); 8662 OperandsWithMask.push_back(Operands[In]); 8663 if (EdgeMask) 8664 OperandsWithMask.push_back(EdgeMask); 8665 } 8666 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8667 } 8668 8669 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8670 ArrayRef<VPValue *> Operands, 8671 VFRange &Range) const { 8672 8673 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8674 [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); }, 8675 Range); 8676 8677 if (IsPredicated) 8678 return nullptr; 8679 8680 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8681 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8682 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8683 ID == Intrinsic::pseudoprobe || 8684 ID == Intrinsic::experimental_noalias_scope_decl)) 8685 return nullptr; 8686 8687 auto willWiden = [&](ElementCount VF) -> bool { 8688 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8689 // The following case may be scalarized depending on the VF. 8690 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8691 // version of the instruction. 8692 // Is it beneficial to perform intrinsic call compared to lib call? 8693 bool NeedToScalarize = false; 8694 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8695 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8696 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8697 return UseVectorIntrinsic || !NeedToScalarize; 8698 }; 8699 8700 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8701 return nullptr; 8702 8703 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8704 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8705 } 8706 8707 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8708 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8709 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8710 // Instruction should be widened, unless it is scalar after vectorization, 8711 // scalarization is profitable or it is predicated. 8712 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8713 return CM.isScalarAfterVectorization(I, VF) || 8714 CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I); 8715 }; 8716 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8717 Range); 8718 } 8719 8720 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8721 ArrayRef<VPValue *> Operands) const { 8722 auto IsVectorizableOpcode = [](unsigned Opcode) { 8723 switch (Opcode) { 8724 case Instruction::Add: 8725 case Instruction::And: 8726 case Instruction::AShr: 8727 case Instruction::BitCast: 8728 case Instruction::FAdd: 8729 case Instruction::FCmp: 8730 case Instruction::FDiv: 8731 case Instruction::FMul: 8732 case Instruction::FNeg: 8733 case Instruction::FPExt: 8734 case Instruction::FPToSI: 8735 case Instruction::FPToUI: 8736 case Instruction::FPTrunc: 8737 case Instruction::FRem: 8738 case Instruction::FSub: 8739 case Instruction::ICmp: 8740 case Instruction::IntToPtr: 8741 case Instruction::LShr: 8742 case Instruction::Mul: 8743 case Instruction::Or: 8744 case Instruction::PtrToInt: 8745 case Instruction::SDiv: 8746 case Instruction::Select: 8747 case Instruction::SExt: 8748 case Instruction::Shl: 8749 case Instruction::SIToFP: 8750 case Instruction::SRem: 8751 case Instruction::Sub: 8752 case Instruction::Trunc: 8753 case Instruction::UDiv: 8754 case Instruction::UIToFP: 8755 case Instruction::URem: 8756 case Instruction::Xor: 8757 case Instruction::ZExt: 8758 return true; 8759 } 8760 return false; 8761 }; 8762 8763 if (!IsVectorizableOpcode(I->getOpcode())) 8764 return nullptr; 8765 8766 // Success: widen this instruction. 8767 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8768 } 8769 8770 void VPRecipeBuilder::fixHeaderPhis() { 8771 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8772 for (VPWidenPHIRecipe *R : PhisToFix) { 8773 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8774 VPRecipeBase *IncR = 8775 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8776 R->addOperand(IncR->getVPSingleValue()); 8777 } 8778 } 8779 8780 VPBasicBlock *VPRecipeBuilder::handleReplication( 8781 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8782 VPlanPtr &Plan) { 8783 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8784 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8785 Range); 8786 8787 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8788 [&](ElementCount VF) { return CM.isPredicatedInst(I, IsUniform); }, 8789 Range); 8790 8791 // Even if the instruction is not marked as uniform, there are certain 8792 // intrinsic calls that can be effectively treated as such, so we check for 8793 // them here. Conservatively, we only do this for scalable vectors, since 8794 // for fixed-width VFs we can always fall back on full scalarization. 8795 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 8796 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 8797 case Intrinsic::assume: 8798 case Intrinsic::lifetime_start: 8799 case Intrinsic::lifetime_end: 8800 // For scalable vectors if one of the operands is variant then we still 8801 // want to mark as uniform, which will generate one instruction for just 8802 // the first lane of the vector. We can't scalarize the call in the same 8803 // way as for fixed-width vectors because we don't know how many lanes 8804 // there are. 8805 // 8806 // The reasons for doing it this way for scalable vectors are: 8807 // 1. For the assume intrinsic generating the instruction for the first 8808 // lane is still be better than not generating any at all. For 8809 // example, the input may be a splat across all lanes. 8810 // 2. For the lifetime start/end intrinsics the pointer operand only 8811 // does anything useful when the input comes from a stack object, 8812 // which suggests it should always be uniform. For non-stack objects 8813 // the effect is to poison the object, which still allows us to 8814 // remove the call. 8815 IsUniform = true; 8816 break; 8817 default: 8818 break; 8819 } 8820 } 8821 8822 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8823 IsUniform, IsPredicated); 8824 setRecipe(I, Recipe); 8825 Plan->addVPValue(I, Recipe); 8826 8827 // Find if I uses a predicated instruction. If so, it will use its scalar 8828 // value. Avoid hoisting the insert-element which packs the scalar value into 8829 // a vector value, as that happens iff all users use the vector value. 8830 for (VPValue *Op : Recipe->operands()) { 8831 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8832 if (!PredR) 8833 continue; 8834 auto *RepR = 8835 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8836 assert(RepR->isPredicated() && 8837 "expected Replicate recipe to be predicated"); 8838 RepR->setAlsoPack(false); 8839 } 8840 8841 // Finalize the recipe for Instr, first if it is not predicated. 8842 if (!IsPredicated) { 8843 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8844 VPBB->appendRecipe(Recipe); 8845 return VPBB; 8846 } 8847 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8848 assert(VPBB->getSuccessors().empty() && 8849 "VPBB has successors when handling predicated replication."); 8850 // Record predicated instructions for above packing optimizations. 8851 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8852 VPBlockUtils::insertBlockAfter(Region, VPBB); 8853 auto *RegSucc = new VPBasicBlock(); 8854 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8855 return RegSucc; 8856 } 8857 8858 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8859 VPRecipeBase *PredRecipe, 8860 VPlanPtr &Plan) { 8861 // Instructions marked for predication are replicated and placed under an 8862 // if-then construct to prevent side-effects. 8863 8864 // Generate recipes to compute the block mask for this region. 8865 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8866 8867 // Build the triangular if-then region. 8868 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8869 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8870 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8871 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8872 auto *PHIRecipe = Instr->getType()->isVoidTy() 8873 ? nullptr 8874 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8875 if (PHIRecipe) { 8876 Plan->removeVPValueFor(Instr); 8877 Plan->addVPValue(Instr, PHIRecipe); 8878 } 8879 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8880 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8881 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8882 8883 // Note: first set Entry as region entry and then connect successors starting 8884 // from it in order, to propagate the "parent" of each VPBasicBlock. 8885 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8886 VPBlockUtils::connectBlocks(Pred, Exit); 8887 8888 return Region; 8889 } 8890 8891 VPRecipeOrVPValueTy 8892 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8893 ArrayRef<VPValue *> Operands, 8894 VFRange &Range, VPlanPtr &Plan) { 8895 // First, check for specific widening recipes that deal with calls, memory 8896 // operations, inductions and Phi nodes. 8897 if (auto *CI = dyn_cast<CallInst>(Instr)) 8898 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8899 8900 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8901 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8902 8903 VPRecipeBase *Recipe; 8904 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8905 if (Phi->getParent() != OrigLoop->getHeader()) 8906 return tryToBlend(Phi, Operands, Plan); 8907 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands))) 8908 return toVPRecipeResult(Recipe); 8909 8910 VPWidenPHIRecipe *PhiRecipe = nullptr; 8911 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 8912 VPValue *StartV = Operands[0]; 8913 if (Legal->isReductionVariable(Phi)) { 8914 const RecurrenceDescriptor &RdxDesc = 8915 Legal->getReductionVars().find(Phi)->second; 8916 assert(RdxDesc.getRecurrenceStartValue() == 8917 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8918 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 8919 CM.isInLoopReduction(Phi), 8920 CM.useOrderedReductions(RdxDesc)); 8921 } else { 8922 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 8923 } 8924 8925 // Record the incoming value from the backedge, so we can add the incoming 8926 // value from the backedge after all recipes have been created. 8927 recordRecipeOf(cast<Instruction>( 8928 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8929 PhisToFix.push_back(PhiRecipe); 8930 } else { 8931 // TODO: record start and backedge value for remaining pointer induction 8932 // phis. 8933 assert(Phi->getType()->isPointerTy() && 8934 "only pointer phis should be handled here"); 8935 PhiRecipe = new VPWidenPHIRecipe(Phi); 8936 } 8937 8938 return toVPRecipeResult(PhiRecipe); 8939 } 8940 8941 if (isa<TruncInst>(Instr) && 8942 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8943 Range, *Plan))) 8944 return toVPRecipeResult(Recipe); 8945 8946 if (!shouldWiden(Instr, Range)) 8947 return nullptr; 8948 8949 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8950 return toVPRecipeResult(new VPWidenGEPRecipe( 8951 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8952 8953 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8954 bool InvariantCond = 8955 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8956 return toVPRecipeResult(new VPWidenSelectRecipe( 8957 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8958 } 8959 8960 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8961 } 8962 8963 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8964 ElementCount MaxVF) { 8965 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8966 8967 // Collect instructions from the original loop that will become trivially dead 8968 // in the vectorized loop. We don't need to vectorize these instructions. For 8969 // example, original induction update instructions can become dead because we 8970 // separately emit induction "steps" when generating code for the new loop. 8971 // Similarly, we create a new latch condition when setting up the structure 8972 // of the new loop, so the old one can become dead. 8973 SmallPtrSet<Instruction *, 4> DeadInstructions; 8974 collectTriviallyDeadInstructions(DeadInstructions); 8975 8976 // Add assume instructions we need to drop to DeadInstructions, to prevent 8977 // them from being added to the VPlan. 8978 // TODO: We only need to drop assumes in blocks that get flattend. If the 8979 // control flow is preserved, we should keep them. 8980 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8981 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8982 8983 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8984 // Dead instructions do not need sinking. Remove them from SinkAfter. 8985 for (Instruction *I : DeadInstructions) 8986 SinkAfter.erase(I); 8987 8988 // Cannot sink instructions after dead instructions (there won't be any 8989 // recipes for them). Instead, find the first non-dead previous instruction. 8990 for (auto &P : Legal->getSinkAfter()) { 8991 Instruction *SinkTarget = P.second; 8992 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 8993 (void)FirstInst; 8994 while (DeadInstructions.contains(SinkTarget)) { 8995 assert( 8996 SinkTarget != FirstInst && 8997 "Must find a live instruction (at least the one feeding the " 8998 "first-order recurrence PHI) before reaching beginning of the block"); 8999 SinkTarget = SinkTarget->getPrevNode(); 9000 assert(SinkTarget != P.first && 9001 "sink source equals target, no sinking required"); 9002 } 9003 P.second = SinkTarget; 9004 } 9005 9006 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 9007 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 9008 VFRange SubRange = {VF, MaxVFPlusOne}; 9009 VPlans.push_back( 9010 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 9011 VF = SubRange.End; 9012 } 9013 } 9014 9015 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 9016 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 9017 const MapVector<Instruction *, Instruction *> &SinkAfter) { 9018 9019 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 9020 9021 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 9022 9023 // --------------------------------------------------------------------------- 9024 // Pre-construction: record ingredients whose recipes we'll need to further 9025 // process after constructing the initial VPlan. 9026 // --------------------------------------------------------------------------- 9027 9028 // Mark instructions we'll need to sink later and their targets as 9029 // ingredients whose recipe we'll need to record. 9030 for (auto &Entry : SinkAfter) { 9031 RecipeBuilder.recordRecipeOf(Entry.first); 9032 RecipeBuilder.recordRecipeOf(Entry.second); 9033 } 9034 for (auto &Reduction : CM.getInLoopReductionChains()) { 9035 PHINode *Phi = Reduction.first; 9036 RecurKind Kind = 9037 Legal->getReductionVars().find(Phi)->second.getRecurrenceKind(); 9038 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9039 9040 RecipeBuilder.recordRecipeOf(Phi); 9041 for (auto &R : ReductionOperations) { 9042 RecipeBuilder.recordRecipeOf(R); 9043 // For min/max reducitons, where we have a pair of icmp/select, we also 9044 // need to record the ICmp recipe, so it can be removed later. 9045 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9046 "Only min/max recurrences allowed for inloop reductions"); 9047 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 9048 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 9049 } 9050 } 9051 9052 // For each interleave group which is relevant for this (possibly trimmed) 9053 // Range, add it to the set of groups to be later applied to the VPlan and add 9054 // placeholders for its members' Recipes which we'll be replacing with a 9055 // single VPInterleaveRecipe. 9056 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 9057 auto applyIG = [IG, this](ElementCount VF) -> bool { 9058 return (VF.isVector() && // Query is illegal for VF == 1 9059 CM.getWideningDecision(IG->getInsertPos(), VF) == 9060 LoopVectorizationCostModel::CM_Interleave); 9061 }; 9062 if (!getDecisionAndClampRange(applyIG, Range)) 9063 continue; 9064 InterleaveGroups.insert(IG); 9065 for (unsigned i = 0; i < IG->getFactor(); i++) 9066 if (Instruction *Member = IG->getMember(i)) 9067 RecipeBuilder.recordRecipeOf(Member); 9068 }; 9069 9070 // --------------------------------------------------------------------------- 9071 // Build initial VPlan: Scan the body of the loop in a topological order to 9072 // visit each basic block after having visited its predecessor basic blocks. 9073 // --------------------------------------------------------------------------- 9074 9075 auto Plan = std::make_unique<VPlan>(); 9076 9077 // Scan the body of the loop in a topological order to visit each basic block 9078 // after having visited its predecessor basic blocks. 9079 LoopBlocksDFS DFS(OrigLoop); 9080 DFS.perform(LI); 9081 9082 VPBasicBlock *VPBB = nullptr; 9083 VPBasicBlock *HeaderVPBB = nullptr; 9084 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 9085 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 9086 // Relevant instructions from basic block BB will be grouped into VPRecipe 9087 // ingredients and fill a new VPBasicBlock. 9088 unsigned VPBBsForBB = 0; 9089 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 9090 if (VPBB) 9091 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 9092 else { 9093 auto *TopRegion = new VPRegionBlock("vector loop"); 9094 TopRegion->setEntry(FirstVPBBForBB); 9095 Plan->setEntry(TopRegion); 9096 HeaderVPBB = FirstVPBBForBB; 9097 } 9098 VPBB = FirstVPBBForBB; 9099 Builder.setInsertPoint(VPBB); 9100 9101 // Introduce each ingredient into VPlan. 9102 // TODO: Model and preserve debug instrinsics in VPlan. 9103 for (Instruction &I : BB->instructionsWithoutDebug()) { 9104 Instruction *Instr = &I; 9105 9106 // First filter out irrelevant instructions, to ensure no recipes are 9107 // built for them. 9108 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 9109 continue; 9110 9111 SmallVector<VPValue *, 4> Operands; 9112 auto *Phi = dyn_cast<PHINode>(Instr); 9113 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 9114 Operands.push_back(Plan->getOrAddVPValue( 9115 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 9116 } else { 9117 auto OpRange = Plan->mapToVPValues(Instr->operands()); 9118 Operands = {OpRange.begin(), OpRange.end()}; 9119 } 9120 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 9121 Instr, Operands, Range, Plan)) { 9122 // If Instr can be simplified to an existing VPValue, use it. 9123 if (RecipeOrValue.is<VPValue *>()) { 9124 auto *VPV = RecipeOrValue.get<VPValue *>(); 9125 Plan->addVPValue(Instr, VPV); 9126 // If the re-used value is a recipe, register the recipe for the 9127 // instruction, in case the recipe for Instr needs to be recorded. 9128 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 9129 RecipeBuilder.setRecipe(Instr, R); 9130 continue; 9131 } 9132 // Otherwise, add the new recipe. 9133 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 9134 for (auto *Def : Recipe->definedValues()) { 9135 auto *UV = Def->getUnderlyingValue(); 9136 Plan->addVPValue(UV, Def); 9137 } 9138 9139 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 9140 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 9141 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 9142 // of the header block. That can happen for truncates of induction 9143 // variables. Those recipes are moved to the phi section of the header 9144 // block after applying SinkAfter, which relies on the original 9145 // position of the trunc. 9146 assert(isa<TruncInst>(Instr)); 9147 InductionsToMove.push_back( 9148 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 9149 } 9150 RecipeBuilder.setRecipe(Instr, Recipe); 9151 VPBB->appendRecipe(Recipe); 9152 continue; 9153 } 9154 9155 // Otherwise, if all widening options failed, Instruction is to be 9156 // replicated. This may create a successor for VPBB. 9157 VPBasicBlock *NextVPBB = 9158 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 9159 if (NextVPBB != VPBB) { 9160 VPBB = NextVPBB; 9161 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 9162 : ""); 9163 } 9164 } 9165 } 9166 9167 assert(isa<VPRegionBlock>(Plan->getEntry()) && 9168 !Plan->getEntry()->getEntryBasicBlock()->empty() && 9169 "entry block must be set to a VPRegionBlock having a non-empty entry " 9170 "VPBasicBlock"); 9171 RecipeBuilder.fixHeaderPhis(); 9172 9173 // --------------------------------------------------------------------------- 9174 // Transform initial VPlan: Apply previously taken decisions, in order, to 9175 // bring the VPlan to its final state. 9176 // --------------------------------------------------------------------------- 9177 9178 // Apply Sink-After legal constraints. 9179 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 9180 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 9181 if (Region && Region->isReplicator()) { 9182 assert(Region->getNumSuccessors() == 1 && 9183 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 9184 assert(R->getParent()->size() == 1 && 9185 "A recipe in an original replicator region must be the only " 9186 "recipe in its block"); 9187 return Region; 9188 } 9189 return nullptr; 9190 }; 9191 for (auto &Entry : SinkAfter) { 9192 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 9193 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 9194 9195 auto *TargetRegion = GetReplicateRegion(Target); 9196 auto *SinkRegion = GetReplicateRegion(Sink); 9197 if (!SinkRegion) { 9198 // If the sink source is not a replicate region, sink the recipe directly. 9199 if (TargetRegion) { 9200 // The target is in a replication region, make sure to move Sink to 9201 // the block after it, not into the replication region itself. 9202 VPBasicBlock *NextBlock = 9203 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 9204 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 9205 } else 9206 Sink->moveAfter(Target); 9207 continue; 9208 } 9209 9210 // The sink source is in a replicate region. Unhook the region from the CFG. 9211 auto *SinkPred = SinkRegion->getSinglePredecessor(); 9212 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 9213 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 9214 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 9215 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 9216 9217 if (TargetRegion) { 9218 // The target recipe is also in a replicate region, move the sink region 9219 // after the target region. 9220 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 9221 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 9222 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 9223 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 9224 } else { 9225 // The sink source is in a replicate region, we need to move the whole 9226 // replicate region, which should only contain a single recipe in the 9227 // main block. 9228 auto *SplitBlock = 9229 Target->getParent()->splitAt(std::next(Target->getIterator())); 9230 9231 auto *SplitPred = SplitBlock->getSinglePredecessor(); 9232 9233 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 9234 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 9235 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 9236 if (VPBB == SplitPred) 9237 VPBB = SplitBlock; 9238 } 9239 } 9240 9241 cast<VPRegionBlock>(Plan->getEntry())->setExit(VPBB); 9242 9243 // Now that sink-after is done, move induction recipes for optimized truncates 9244 // to the phi section of the header block. 9245 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 9246 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 9247 9248 // Adjust the recipes for any inloop reductions. 9249 adjustRecipesForReductions(VPBB, Plan, RecipeBuilder, Range.Start); 9250 9251 // Introduce a recipe to combine the incoming and previous values of a 9252 // first-order recurrence. 9253 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9254 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 9255 if (!RecurPhi) 9256 continue; 9257 9258 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 9259 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 9260 auto *Region = GetReplicateRegion(PrevRecipe); 9261 if (Region) 9262 InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor()); 9263 if (Region || PrevRecipe->isPhi()) 9264 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 9265 else 9266 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 9267 9268 auto *RecurSplice = cast<VPInstruction>( 9269 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 9270 {RecurPhi, RecurPhi->getBackedgeValue()})); 9271 9272 RecurPhi->replaceAllUsesWith(RecurSplice); 9273 // Set the first operand of RecurSplice to RecurPhi again, after replacing 9274 // all users. 9275 RecurSplice->setOperand(0, RecurPhi); 9276 } 9277 9278 // Interleave memory: for each Interleave Group we marked earlier as relevant 9279 // for this VPlan, replace the Recipes widening its memory instructions with a 9280 // single VPInterleaveRecipe at its insertion point. 9281 for (auto IG : InterleaveGroups) { 9282 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9283 RecipeBuilder.getRecipe(IG->getInsertPos())); 9284 SmallVector<VPValue *, 4> StoredValues; 9285 for (unsigned i = 0; i < IG->getFactor(); ++i) 9286 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 9287 auto *StoreR = 9288 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 9289 StoredValues.push_back(StoreR->getStoredValue()); 9290 } 9291 9292 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9293 Recipe->getMask()); 9294 VPIG->insertBefore(Recipe); 9295 unsigned J = 0; 9296 for (unsigned i = 0; i < IG->getFactor(); ++i) 9297 if (Instruction *Member = IG->getMember(i)) { 9298 if (!Member->getType()->isVoidTy()) { 9299 VPValue *OriginalV = Plan->getVPValue(Member); 9300 Plan->removeVPValueFor(Member); 9301 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9302 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9303 J++; 9304 } 9305 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9306 } 9307 } 9308 9309 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9310 // in ways that accessing values using original IR values is incorrect. 9311 Plan->disableValue2VPValue(); 9312 9313 VPlanTransforms::sinkScalarOperands(*Plan); 9314 VPlanTransforms::mergeReplicateRegions(*Plan); 9315 9316 std::string PlanName; 9317 raw_string_ostream RSO(PlanName); 9318 ElementCount VF = Range.Start; 9319 Plan->addVF(VF); 9320 RSO << "Initial VPlan for VF={" << VF; 9321 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9322 Plan->addVF(VF); 9323 RSO << "," << VF; 9324 } 9325 RSO << "},UF>=1"; 9326 RSO.flush(); 9327 Plan->setName(PlanName); 9328 9329 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9330 return Plan; 9331 } 9332 9333 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9334 // Outer loop handling: They may require CFG and instruction level 9335 // transformations before even evaluating whether vectorization is profitable. 9336 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9337 // the vectorization pipeline. 9338 assert(!OrigLoop->isInnermost()); 9339 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9340 9341 // Create new empty VPlan 9342 auto Plan = std::make_unique<VPlan>(); 9343 9344 // Build hierarchical CFG 9345 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9346 HCFGBuilder.buildHierarchicalCFG(); 9347 9348 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9349 VF *= 2) 9350 Plan->addVF(VF); 9351 9352 if (EnableVPlanPredication) { 9353 VPlanPredicator VPP(*Plan); 9354 VPP.predicate(); 9355 9356 // Avoid running transformation to recipes until masked code generation in 9357 // VPlan-native path is in place. 9358 return Plan; 9359 } 9360 9361 SmallPtrSet<Instruction *, 1> DeadInstructions; 9362 VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan, 9363 Legal->getInductionVars(), 9364 DeadInstructions, *PSE.getSE()); 9365 return Plan; 9366 } 9367 9368 // Adjust the recipes for reductions. For in-loop reductions the chain of 9369 // instructions leading from the loop exit instr to the phi need to be converted 9370 // to reductions, with one operand being vector and the other being the scalar 9371 // reduction chain. For other reductions, a select is introduced between the phi 9372 // and live-out recipes when folding the tail. 9373 void LoopVectorizationPlanner::adjustRecipesForReductions( 9374 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9375 ElementCount MinVF) { 9376 for (auto &Reduction : CM.getInLoopReductionChains()) { 9377 PHINode *Phi = Reduction.first; 9378 const RecurrenceDescriptor &RdxDesc = 9379 Legal->getReductionVars().find(Phi)->second; 9380 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9381 9382 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9383 continue; 9384 9385 // ReductionOperations are orders top-down from the phi's use to the 9386 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9387 // which of the two operands will remain scalar and which will be reduced. 9388 // For minmax the chain will be the select instructions. 9389 Instruction *Chain = Phi; 9390 for (Instruction *R : ReductionOperations) { 9391 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9392 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9393 9394 VPValue *ChainOp = Plan->getVPValue(Chain); 9395 unsigned FirstOpId; 9396 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9397 "Only min/max recurrences allowed for inloop reductions"); 9398 // Recognize a call to the llvm.fmuladd intrinsic. 9399 bool IsFMulAdd = (Kind == RecurKind::FMulAdd); 9400 assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) && 9401 "Expected instruction to be a call to the llvm.fmuladd intrinsic"); 9402 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9403 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9404 "Expected to replace a VPWidenSelectSC"); 9405 FirstOpId = 1; 9406 } else { 9407 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) || 9408 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) && 9409 "Expected to replace a VPWidenSC"); 9410 FirstOpId = 0; 9411 } 9412 unsigned VecOpId = 9413 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9414 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9415 9416 auto *CondOp = CM.foldTailByMasking() 9417 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9418 : nullptr; 9419 9420 if (IsFMulAdd) { 9421 // If the instruction is a call to the llvm.fmuladd intrinsic then we 9422 // need to create an fmul recipe to use as the vector operand for the 9423 // fadd reduction. 9424 VPInstruction *FMulRecipe = new VPInstruction( 9425 Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))}); 9426 FMulRecipe->setFastMathFlags(R->getFastMathFlags()); 9427 WidenRecipe->getParent()->insert(FMulRecipe, 9428 WidenRecipe->getIterator()); 9429 VecOp = FMulRecipe; 9430 } 9431 VPReductionRecipe *RedRecipe = 9432 new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9433 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9434 Plan->removeVPValueFor(R); 9435 Plan->addVPValue(R, RedRecipe); 9436 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9437 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9438 WidenRecipe->eraseFromParent(); 9439 9440 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9441 VPRecipeBase *CompareRecipe = 9442 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9443 assert(isa<VPWidenRecipe>(CompareRecipe) && 9444 "Expected to replace a VPWidenSC"); 9445 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9446 "Expected no remaining users"); 9447 CompareRecipe->eraseFromParent(); 9448 } 9449 Chain = R; 9450 } 9451 } 9452 9453 // If tail is folded by masking, introduce selects between the phi 9454 // and the live-out instruction of each reduction, at the end of the latch. 9455 if (CM.foldTailByMasking()) { 9456 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9457 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9458 if (!PhiR || PhiR->isInLoop()) 9459 continue; 9460 Builder.setInsertPoint(LatchVPBB); 9461 VPValue *Cond = 9462 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9463 VPValue *Red = PhiR->getBackedgeValue(); 9464 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9465 } 9466 } 9467 } 9468 9469 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9470 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9471 VPSlotTracker &SlotTracker) const { 9472 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9473 IG->getInsertPos()->printAsOperand(O, false); 9474 O << ", "; 9475 getAddr()->printAsOperand(O, SlotTracker); 9476 VPValue *Mask = getMask(); 9477 if (Mask) { 9478 O << ", "; 9479 Mask->printAsOperand(O, SlotTracker); 9480 } 9481 9482 unsigned OpIdx = 0; 9483 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9484 if (!IG->getMember(i)) 9485 continue; 9486 if (getNumStoreOperands() > 0) { 9487 O << "\n" << Indent << " store "; 9488 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9489 O << " to index " << i; 9490 } else { 9491 O << "\n" << Indent << " "; 9492 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9493 O << " = load from index " << i; 9494 } 9495 ++OpIdx; 9496 } 9497 } 9498 #endif 9499 9500 void VPWidenCallRecipe::execute(VPTransformState &State) { 9501 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9502 *this, State); 9503 } 9504 9505 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9506 auto &I = *cast<SelectInst>(getUnderlyingInstr()); 9507 State.ILV->setDebugLocFromInst(&I); 9508 9509 // The condition can be loop invariant but still defined inside the 9510 // loop. This means that we can't just use the original 'cond' value. 9511 // We have to take the 'vectorized' value and pick the first lane. 9512 // Instcombine will make this a no-op. 9513 auto *InvarCond = 9514 InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr; 9515 9516 for (unsigned Part = 0; Part < State.UF; ++Part) { 9517 Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part); 9518 Value *Op0 = State.get(getOperand(1), Part); 9519 Value *Op1 = State.get(getOperand(2), Part); 9520 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1); 9521 State.set(this, Sel, Part); 9522 State.ILV->addMetadata(Sel, &I); 9523 } 9524 } 9525 9526 void VPWidenRecipe::execute(VPTransformState &State) { 9527 auto &I = *cast<Instruction>(getUnderlyingValue()); 9528 auto &Builder = State.Builder; 9529 switch (I.getOpcode()) { 9530 case Instruction::Call: 9531 case Instruction::Br: 9532 case Instruction::PHI: 9533 case Instruction::GetElementPtr: 9534 case Instruction::Select: 9535 llvm_unreachable("This instruction is handled by a different recipe."); 9536 case Instruction::UDiv: 9537 case Instruction::SDiv: 9538 case Instruction::SRem: 9539 case Instruction::URem: 9540 case Instruction::Add: 9541 case Instruction::FAdd: 9542 case Instruction::Sub: 9543 case Instruction::FSub: 9544 case Instruction::FNeg: 9545 case Instruction::Mul: 9546 case Instruction::FMul: 9547 case Instruction::FDiv: 9548 case Instruction::FRem: 9549 case Instruction::Shl: 9550 case Instruction::LShr: 9551 case Instruction::AShr: 9552 case Instruction::And: 9553 case Instruction::Or: 9554 case Instruction::Xor: { 9555 // Just widen unops and binops. 9556 State.ILV->setDebugLocFromInst(&I); 9557 9558 for (unsigned Part = 0; Part < State.UF; ++Part) { 9559 SmallVector<Value *, 2> Ops; 9560 for (VPValue *VPOp : operands()) 9561 Ops.push_back(State.get(VPOp, Part)); 9562 9563 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 9564 9565 if (auto *VecOp = dyn_cast<Instruction>(V)) { 9566 VecOp->copyIRFlags(&I); 9567 9568 // If the instruction is vectorized and was in a basic block that needed 9569 // predication, we can't propagate poison-generating flags (nuw/nsw, 9570 // exact, etc.). The control flow has been linearized and the 9571 // instruction is no longer guarded by the predicate, which could make 9572 // the flag properties to no longer hold. 9573 if (State.MayGeneratePoisonRecipes.count(this) > 0) 9574 VecOp->dropPoisonGeneratingFlags(); 9575 } 9576 9577 // Use this vector value for all users of the original instruction. 9578 State.set(this, V, Part); 9579 State.ILV->addMetadata(V, &I); 9580 } 9581 9582 break; 9583 } 9584 case Instruction::ICmp: 9585 case Instruction::FCmp: { 9586 // Widen compares. Generate vector compares. 9587 bool FCmp = (I.getOpcode() == Instruction::FCmp); 9588 auto *Cmp = cast<CmpInst>(&I); 9589 State.ILV->setDebugLocFromInst(Cmp); 9590 for (unsigned Part = 0; Part < State.UF; ++Part) { 9591 Value *A = State.get(getOperand(0), Part); 9592 Value *B = State.get(getOperand(1), Part); 9593 Value *C = nullptr; 9594 if (FCmp) { 9595 // Propagate fast math flags. 9596 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9597 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 9598 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 9599 } else { 9600 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 9601 } 9602 State.set(this, C, Part); 9603 State.ILV->addMetadata(C, &I); 9604 } 9605 9606 break; 9607 } 9608 9609 case Instruction::ZExt: 9610 case Instruction::SExt: 9611 case Instruction::FPToUI: 9612 case Instruction::FPToSI: 9613 case Instruction::FPExt: 9614 case Instruction::PtrToInt: 9615 case Instruction::IntToPtr: 9616 case Instruction::SIToFP: 9617 case Instruction::UIToFP: 9618 case Instruction::Trunc: 9619 case Instruction::FPTrunc: 9620 case Instruction::BitCast: { 9621 auto *CI = cast<CastInst>(&I); 9622 State.ILV->setDebugLocFromInst(CI); 9623 9624 /// Vectorize casts. 9625 Type *DestTy = (State.VF.isScalar()) 9626 ? CI->getType() 9627 : VectorType::get(CI->getType(), State.VF); 9628 9629 for (unsigned Part = 0; Part < State.UF; ++Part) { 9630 Value *A = State.get(getOperand(0), Part); 9631 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 9632 State.set(this, Cast, Part); 9633 State.ILV->addMetadata(Cast, &I); 9634 } 9635 break; 9636 } 9637 default: 9638 // This instruction is not vectorized by simple widening. 9639 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 9640 llvm_unreachable("Unhandled instruction!"); 9641 } // end of switch. 9642 } 9643 9644 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9645 auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr()); 9646 // Construct a vector GEP by widening the operands of the scalar GEP as 9647 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 9648 // results in a vector of pointers when at least one operand of the GEP 9649 // is vector-typed. Thus, to keep the representation compact, we only use 9650 // vector-typed operands for loop-varying values. 9651 9652 if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 9653 // If we are vectorizing, but the GEP has only loop-invariant operands, 9654 // the GEP we build (by only using vector-typed operands for 9655 // loop-varying values) would be a scalar pointer. Thus, to ensure we 9656 // produce a vector of pointers, we need to either arbitrarily pick an 9657 // operand to broadcast, or broadcast a clone of the original GEP. 9658 // Here, we broadcast a clone of the original. 9659 // 9660 // TODO: If at some point we decide to scalarize instructions having 9661 // loop-invariant operands, this special case will no longer be 9662 // required. We would add the scalarization decision to 9663 // collectLoopScalars() and teach getVectorValue() to broadcast 9664 // the lane-zero scalar value. 9665 auto *Clone = State.Builder.Insert(GEP->clone()); 9666 for (unsigned Part = 0; Part < State.UF; ++Part) { 9667 Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone); 9668 State.set(this, EntryPart, Part); 9669 State.ILV->addMetadata(EntryPart, GEP); 9670 } 9671 } else { 9672 // If the GEP has at least one loop-varying operand, we are sure to 9673 // produce a vector of pointers. But if we are only unrolling, we want 9674 // to produce a scalar GEP for each unroll part. Thus, the GEP we 9675 // produce with the code below will be scalar (if VF == 1) or vector 9676 // (otherwise). Note that for the unroll-only case, we still maintain 9677 // values in the vector mapping with initVector, as we do for other 9678 // instructions. 9679 for (unsigned Part = 0; Part < State.UF; ++Part) { 9680 // The pointer operand of the new GEP. If it's loop-invariant, we 9681 // won't broadcast it. 9682 auto *Ptr = IsPtrLoopInvariant 9683 ? State.get(getOperand(0), VPIteration(0, 0)) 9684 : State.get(getOperand(0), Part); 9685 9686 // Collect all the indices for the new GEP. If any index is 9687 // loop-invariant, we won't broadcast it. 9688 SmallVector<Value *, 4> Indices; 9689 for (unsigned I = 1, E = getNumOperands(); I < E; I++) { 9690 VPValue *Operand = getOperand(I); 9691 if (IsIndexLoopInvariant[I - 1]) 9692 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 9693 else 9694 Indices.push_back(State.get(Operand, Part)); 9695 } 9696 9697 // If the GEP instruction is vectorized and was in a basic block that 9698 // needed predication, we can't propagate the poison-generating 'inbounds' 9699 // flag. The control flow has been linearized and the GEP is no longer 9700 // guarded by the predicate, which could make the 'inbounds' properties to 9701 // no longer hold. 9702 bool IsInBounds = 9703 GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0; 9704 9705 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 9706 // but it should be a vector, otherwise. 9707 auto *NewGEP = IsInBounds 9708 ? State.Builder.CreateInBoundsGEP( 9709 GEP->getSourceElementType(), Ptr, Indices) 9710 : State.Builder.CreateGEP(GEP->getSourceElementType(), 9711 Ptr, Indices); 9712 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) && 9713 "NewGEP is not a pointer vector"); 9714 State.set(this, NewGEP, Part); 9715 State.ILV->addMetadata(NewGEP, GEP); 9716 } 9717 } 9718 } 9719 9720 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9721 assert(!State.Instance && "Int or FP induction being replicated."); 9722 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 9723 getTruncInst(), getVPValue(0), 9724 getCastValue(), State); 9725 } 9726 9727 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9728 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9729 State); 9730 } 9731 9732 void VPBlendRecipe::execute(VPTransformState &State) { 9733 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9734 // We know that all PHIs in non-header blocks are converted into 9735 // selects, so we don't have to worry about the insertion order and we 9736 // can just use the builder. 9737 // At this point we generate the predication tree. There may be 9738 // duplications since this is a simple recursive scan, but future 9739 // optimizations will clean it up. 9740 9741 unsigned NumIncoming = getNumIncomingValues(); 9742 9743 // Generate a sequence of selects of the form: 9744 // SELECT(Mask3, In3, 9745 // SELECT(Mask2, In2, 9746 // SELECT(Mask1, In1, 9747 // In0))) 9748 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9749 // are essentially undef are taken from In0. 9750 InnerLoopVectorizer::VectorParts Entry(State.UF); 9751 for (unsigned In = 0; In < NumIncoming; ++In) { 9752 for (unsigned Part = 0; Part < State.UF; ++Part) { 9753 // We might have single edge PHIs (blocks) - use an identity 9754 // 'select' for the first PHI operand. 9755 Value *In0 = State.get(getIncomingValue(In), Part); 9756 if (In == 0) 9757 Entry[Part] = In0; // Initialize with the first incoming value. 9758 else { 9759 // Select between the current value and the previous incoming edge 9760 // based on the incoming mask. 9761 Value *Cond = State.get(getMask(In), Part); 9762 Entry[Part] = 9763 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9764 } 9765 } 9766 } 9767 for (unsigned Part = 0; Part < State.UF; ++Part) 9768 State.set(this, Entry[Part], Part); 9769 } 9770 9771 void VPInterleaveRecipe::execute(VPTransformState &State) { 9772 assert(!State.Instance && "Interleave group being replicated."); 9773 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9774 getStoredValues(), getMask()); 9775 } 9776 9777 void VPReductionRecipe::execute(VPTransformState &State) { 9778 assert(!State.Instance && "Reduction being replicated."); 9779 Value *PrevInChain = State.get(getChainOp(), 0); 9780 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9781 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9782 // Propagate the fast-math flags carried by the underlying instruction. 9783 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9784 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9785 for (unsigned Part = 0; Part < State.UF; ++Part) { 9786 Value *NewVecOp = State.get(getVecOp(), Part); 9787 if (VPValue *Cond = getCondOp()) { 9788 Value *NewCond = State.get(Cond, Part); 9789 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9790 Value *Iden = RdxDesc->getRecurrenceIdentity( 9791 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9792 Value *IdenVec = 9793 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9794 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9795 NewVecOp = Select; 9796 } 9797 Value *NewRed; 9798 Value *NextInChain; 9799 if (IsOrdered) { 9800 if (State.VF.isVector()) 9801 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9802 PrevInChain); 9803 else 9804 NewRed = State.Builder.CreateBinOp( 9805 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9806 NewVecOp); 9807 PrevInChain = NewRed; 9808 } else { 9809 PrevInChain = State.get(getChainOp(), Part); 9810 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9811 } 9812 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9813 NextInChain = 9814 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9815 NewRed, PrevInChain); 9816 } else if (IsOrdered) 9817 NextInChain = NewRed; 9818 else 9819 NextInChain = State.Builder.CreateBinOp( 9820 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9821 PrevInChain); 9822 State.set(this, NextInChain, Part); 9823 } 9824 } 9825 9826 void VPReplicateRecipe::execute(VPTransformState &State) { 9827 if (State.Instance) { // Generate a single instance. 9828 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9829 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance, 9830 IsPredicated, State); 9831 // Insert scalar instance packing it into a vector. 9832 if (AlsoPack && State.VF.isVector()) { 9833 // If we're constructing lane 0, initialize to start from poison. 9834 if (State.Instance->Lane.isFirstLane()) { 9835 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9836 Value *Poison = PoisonValue::get( 9837 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9838 State.set(this, Poison, State.Instance->Part); 9839 } 9840 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9841 } 9842 return; 9843 } 9844 9845 // Generate scalar instances for all VF lanes of all UF parts, unless the 9846 // instruction is uniform inwhich case generate only the first lane for each 9847 // of the UF parts. 9848 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9849 assert((!State.VF.isScalable() || IsUniform) && 9850 "Can't scalarize a scalable vector"); 9851 for (unsigned Part = 0; Part < State.UF; ++Part) 9852 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9853 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 9854 VPIteration(Part, Lane), IsPredicated, 9855 State); 9856 } 9857 9858 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9859 assert(State.Instance && "Branch on Mask works only on single instance."); 9860 9861 unsigned Part = State.Instance->Part; 9862 unsigned Lane = State.Instance->Lane.getKnownLane(); 9863 9864 Value *ConditionBit = nullptr; 9865 VPValue *BlockInMask = getMask(); 9866 if (BlockInMask) { 9867 ConditionBit = State.get(BlockInMask, Part); 9868 if (ConditionBit->getType()->isVectorTy()) 9869 ConditionBit = State.Builder.CreateExtractElement( 9870 ConditionBit, State.Builder.getInt32(Lane)); 9871 } else // Block in mask is all-one. 9872 ConditionBit = State.Builder.getTrue(); 9873 9874 // Replace the temporary unreachable terminator with a new conditional branch, 9875 // whose two destinations will be set later when they are created. 9876 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9877 assert(isa<UnreachableInst>(CurrentTerminator) && 9878 "Expected to replace unreachable terminator with conditional branch."); 9879 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9880 CondBr->setSuccessor(0, nullptr); 9881 ReplaceInstWithInst(CurrentTerminator, CondBr); 9882 } 9883 9884 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9885 assert(State.Instance && "Predicated instruction PHI works per instance."); 9886 Instruction *ScalarPredInst = 9887 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9888 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9889 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9890 assert(PredicatingBB && "Predicated block has no single predecessor."); 9891 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9892 "operand must be VPReplicateRecipe"); 9893 9894 // By current pack/unpack logic we need to generate only a single phi node: if 9895 // a vector value for the predicated instruction exists at this point it means 9896 // the instruction has vector users only, and a phi for the vector value is 9897 // needed. In this case the recipe of the predicated instruction is marked to 9898 // also do that packing, thereby "hoisting" the insert-element sequence. 9899 // Otherwise, a phi node for the scalar value is needed. 9900 unsigned Part = State.Instance->Part; 9901 if (State.hasVectorValue(getOperand(0), Part)) { 9902 Value *VectorValue = State.get(getOperand(0), Part); 9903 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9904 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9905 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9906 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9907 if (State.hasVectorValue(this, Part)) 9908 State.reset(this, VPhi, Part); 9909 else 9910 State.set(this, VPhi, Part); 9911 // NOTE: Currently we need to update the value of the operand, so the next 9912 // predicated iteration inserts its generated value in the correct vector. 9913 State.reset(getOperand(0), VPhi, Part); 9914 } else { 9915 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9916 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9917 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9918 PredicatingBB); 9919 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9920 if (State.hasScalarValue(this, *State.Instance)) 9921 State.reset(this, Phi, *State.Instance); 9922 else 9923 State.set(this, Phi, *State.Instance); 9924 // NOTE: Currently we need to update the value of the operand, so the next 9925 // predicated iteration inserts its generated value in the correct vector. 9926 State.reset(getOperand(0), Phi, *State.Instance); 9927 } 9928 } 9929 9930 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9931 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9932 9933 // Attempt to issue a wide load. 9934 LoadInst *LI = dyn_cast<LoadInst>(&Ingredient); 9935 StoreInst *SI = dyn_cast<StoreInst>(&Ingredient); 9936 9937 assert((LI || SI) && "Invalid Load/Store instruction"); 9938 assert((!SI || StoredValue) && "No stored value provided for widened store"); 9939 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 9940 9941 Type *ScalarDataTy = getLoadStoreType(&Ingredient); 9942 9943 auto *DataTy = VectorType::get(ScalarDataTy, State.VF); 9944 const Align Alignment = getLoadStoreAlignment(&Ingredient); 9945 bool CreateGatherScatter = !Consecutive; 9946 9947 auto &Builder = State.Builder; 9948 InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF); 9949 bool isMaskRequired = getMask(); 9950 if (isMaskRequired) 9951 for (unsigned Part = 0; Part < State.UF; ++Part) 9952 BlockInMaskParts[Part] = State.get(getMask(), Part); 9953 9954 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 9955 // Calculate the pointer for the specific unroll-part. 9956 GetElementPtrInst *PartPtr = nullptr; 9957 9958 bool InBounds = false; 9959 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 9960 InBounds = gep->isInBounds(); 9961 if (Reverse) { 9962 // If the address is consecutive but reversed, then the 9963 // wide store needs to start at the last vector element. 9964 // RunTimeVF = VScale * VF.getKnownMinValue() 9965 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 9966 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF); 9967 // NumElt = -Part * RunTimeVF 9968 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 9969 // LastLane = 1 - RunTimeVF 9970 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 9971 PartPtr = 9972 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 9973 PartPtr->setIsInBounds(InBounds); 9974 PartPtr = cast<GetElementPtrInst>( 9975 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 9976 PartPtr->setIsInBounds(InBounds); 9977 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 9978 BlockInMaskParts[Part] = 9979 Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse"); 9980 } else { 9981 Value *Increment = 9982 createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part); 9983 PartPtr = cast<GetElementPtrInst>( 9984 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 9985 PartPtr->setIsInBounds(InBounds); 9986 } 9987 9988 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 9989 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 9990 }; 9991 9992 // Handle Stores: 9993 if (SI) { 9994 State.ILV->setDebugLocFromInst(SI); 9995 9996 for (unsigned Part = 0; Part < State.UF; ++Part) { 9997 Instruction *NewSI = nullptr; 9998 Value *StoredVal = State.get(StoredValue, Part); 9999 if (CreateGatherScatter) { 10000 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 10001 Value *VectorGep = State.get(getAddr(), Part); 10002 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 10003 MaskPart); 10004 } else { 10005 if (Reverse) { 10006 // If we store to reverse consecutive memory locations, then we need 10007 // to reverse the order of elements in the stored value. 10008 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse"); 10009 // We don't want to update the value in the map as it might be used in 10010 // another expression. So don't call resetVectorValue(StoredVal). 10011 } 10012 auto *VecPtr = 10013 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10014 if (isMaskRequired) 10015 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 10016 BlockInMaskParts[Part]); 10017 else 10018 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 10019 } 10020 State.ILV->addMetadata(NewSI, SI); 10021 } 10022 return; 10023 } 10024 10025 // Handle loads. 10026 assert(LI && "Must have a load instruction"); 10027 State.ILV->setDebugLocFromInst(LI); 10028 for (unsigned Part = 0; Part < State.UF; ++Part) { 10029 Value *NewLI; 10030 if (CreateGatherScatter) { 10031 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 10032 Value *VectorGep = State.get(getAddr(), Part); 10033 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 10034 nullptr, "wide.masked.gather"); 10035 State.ILV->addMetadata(NewLI, LI); 10036 } else { 10037 auto *VecPtr = 10038 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10039 if (isMaskRequired) 10040 NewLI = Builder.CreateMaskedLoad( 10041 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 10042 PoisonValue::get(DataTy), "wide.masked.load"); 10043 else 10044 NewLI = 10045 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 10046 10047 // Add metadata to the load, but setVectorValue to the reverse shuffle. 10048 State.ILV->addMetadata(NewLI, LI); 10049 if (Reverse) 10050 NewLI = Builder.CreateVectorReverse(NewLI, "reverse"); 10051 } 10052 10053 State.set(getVPSingleValue(), NewLI, Part); 10054 } 10055 } 10056 10057 // Determine how to lower the scalar epilogue, which depends on 1) optimising 10058 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 10059 // predication, and 4) a TTI hook that analyses whether the loop is suitable 10060 // for predication. 10061 static ScalarEpilogueLowering getScalarEpilogueLowering( 10062 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 10063 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 10064 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 10065 LoopVectorizationLegality &LVL) { 10066 // 1) OptSize takes precedence over all other options, i.e. if this is set, 10067 // don't look at hints or options, and don't request a scalar epilogue. 10068 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 10069 // LoopAccessInfo (due to code dependency and not being able to reliably get 10070 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 10071 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 10072 // versioning when the vectorization is forced, unlike hasOptSize. So revert 10073 // back to the old way and vectorize with versioning when forced. See D81345.) 10074 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 10075 PGSOQueryType::IRPass) && 10076 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 10077 return CM_ScalarEpilogueNotAllowedOptSize; 10078 10079 // 2) If set, obey the directives 10080 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 10081 switch (PreferPredicateOverEpilogue) { 10082 case PreferPredicateTy::ScalarEpilogue: 10083 return CM_ScalarEpilogueAllowed; 10084 case PreferPredicateTy::PredicateElseScalarEpilogue: 10085 return CM_ScalarEpilogueNotNeededUsePredicate; 10086 case PreferPredicateTy::PredicateOrDontVectorize: 10087 return CM_ScalarEpilogueNotAllowedUsePredicate; 10088 }; 10089 } 10090 10091 // 3) If set, obey the hints 10092 switch (Hints.getPredicate()) { 10093 case LoopVectorizeHints::FK_Enabled: 10094 return CM_ScalarEpilogueNotNeededUsePredicate; 10095 case LoopVectorizeHints::FK_Disabled: 10096 return CM_ScalarEpilogueAllowed; 10097 }; 10098 10099 // 4) if the TTI hook indicates this is profitable, request predication. 10100 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 10101 LVL.getLAI())) 10102 return CM_ScalarEpilogueNotNeededUsePredicate; 10103 10104 return CM_ScalarEpilogueAllowed; 10105 } 10106 10107 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 10108 // If Values have been set for this Def return the one relevant for \p Part. 10109 if (hasVectorValue(Def, Part)) 10110 return Data.PerPartOutput[Def][Part]; 10111 10112 if (!hasScalarValue(Def, {Part, 0})) { 10113 Value *IRV = Def->getLiveInIRValue(); 10114 Value *B = ILV->getBroadcastInstrs(IRV); 10115 set(Def, B, Part); 10116 return B; 10117 } 10118 10119 Value *ScalarValue = get(Def, {Part, 0}); 10120 // If we aren't vectorizing, we can just copy the scalar map values over 10121 // to the vector map. 10122 if (VF.isScalar()) { 10123 set(Def, ScalarValue, Part); 10124 return ScalarValue; 10125 } 10126 10127 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 10128 bool IsUniform = RepR && RepR->isUniform(); 10129 10130 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 10131 // Check if there is a scalar value for the selected lane. 10132 if (!hasScalarValue(Def, {Part, LastLane})) { 10133 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 10134 assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && 10135 "unexpected recipe found to be invariant"); 10136 IsUniform = true; 10137 LastLane = 0; 10138 } 10139 10140 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 10141 // Set the insert point after the last scalarized instruction or after the 10142 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 10143 // will directly follow the scalar definitions. 10144 auto OldIP = Builder.saveIP(); 10145 auto NewIP = 10146 isa<PHINode>(LastInst) 10147 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 10148 : std::next(BasicBlock::iterator(LastInst)); 10149 Builder.SetInsertPoint(&*NewIP); 10150 10151 // However, if we are vectorizing, we need to construct the vector values. 10152 // If the value is known to be uniform after vectorization, we can just 10153 // broadcast the scalar value corresponding to lane zero for each unroll 10154 // iteration. Otherwise, we construct the vector values using 10155 // insertelement instructions. Since the resulting vectors are stored in 10156 // State, we will only generate the insertelements once. 10157 Value *VectorValue = nullptr; 10158 if (IsUniform) { 10159 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 10160 set(Def, VectorValue, Part); 10161 } else { 10162 // Initialize packing with insertelements to start from undef. 10163 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 10164 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10165 set(Def, Undef, Part); 10166 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10167 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10168 VectorValue = get(Def, Part); 10169 } 10170 Builder.restoreIP(OldIP); 10171 return VectorValue; 10172 } 10173 10174 // Process the loop in the VPlan-native vectorization path. This path builds 10175 // VPlan upfront in the vectorization pipeline, which allows to apply 10176 // VPlan-to-VPlan transformations from the very beginning without modifying the 10177 // input LLVM IR. 10178 static bool processLoopInVPlanNativePath( 10179 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10180 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10181 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10182 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10183 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10184 LoopVectorizationRequirements &Requirements) { 10185 10186 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10187 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10188 return false; 10189 } 10190 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10191 Function *F = L->getHeader()->getParent(); 10192 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10193 10194 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10195 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10196 10197 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10198 &Hints, IAI); 10199 // Use the planner for outer loop vectorization. 10200 // TODO: CM is not used at this point inside the planner. Turn CM into an 10201 // optional argument if we don't need it in the future. 10202 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10203 Requirements, ORE); 10204 10205 // Get user vectorization factor. 10206 ElementCount UserVF = Hints.getWidth(); 10207 10208 CM.collectElementTypesForWidening(); 10209 10210 // Plan how to best vectorize, return the best VF and its cost. 10211 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10212 10213 // If we are stress testing VPlan builds, do not attempt to generate vector 10214 // code. Masked vector code generation support will follow soon. 10215 // Also, do not attempt to vectorize if no vector code will be produced. 10216 if (VPlanBuildStressTest || EnableVPlanPredication || 10217 VectorizationFactor::Disabled() == VF) 10218 return false; 10219 10220 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10221 10222 { 10223 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10224 F->getParent()->getDataLayout()); 10225 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10226 &CM, BFI, PSI, Checks); 10227 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10228 << L->getHeader()->getParent()->getName() << "\"\n"); 10229 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT); 10230 } 10231 10232 // Mark the loop as already vectorized to avoid vectorizing again. 10233 Hints.setAlreadyVectorized(); 10234 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10235 return true; 10236 } 10237 10238 // Emit a remark if there are stores to floats that required a floating point 10239 // extension. If the vectorized loop was generated with floating point there 10240 // will be a performance penalty from the conversion overhead and the change in 10241 // the vector width. 10242 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10243 SmallVector<Instruction *, 4> Worklist; 10244 for (BasicBlock *BB : L->getBlocks()) { 10245 for (Instruction &Inst : *BB) { 10246 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10247 if (S->getValueOperand()->getType()->isFloatTy()) 10248 Worklist.push_back(S); 10249 } 10250 } 10251 } 10252 10253 // Traverse the floating point stores upwards searching, for floating point 10254 // conversions. 10255 SmallPtrSet<const Instruction *, 4> Visited; 10256 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10257 while (!Worklist.empty()) { 10258 auto *I = Worklist.pop_back_val(); 10259 if (!L->contains(I)) 10260 continue; 10261 if (!Visited.insert(I).second) 10262 continue; 10263 10264 // Emit a remark if the floating point store required a floating 10265 // point conversion. 10266 // TODO: More work could be done to identify the root cause such as a 10267 // constant or a function return type and point the user to it. 10268 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10269 ORE->emit([&]() { 10270 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10271 I->getDebugLoc(), L->getHeader()) 10272 << "floating point conversion changes vector width. " 10273 << "Mixed floating point precision requires an up/down " 10274 << "cast that will negatively impact performance."; 10275 }); 10276 10277 for (Use &Op : I->operands()) 10278 if (auto *OpI = dyn_cast<Instruction>(Op)) 10279 Worklist.push_back(OpI); 10280 } 10281 } 10282 10283 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10284 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10285 !EnableLoopInterleaving), 10286 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10287 !EnableLoopVectorization) {} 10288 10289 bool LoopVectorizePass::processLoop(Loop *L) { 10290 assert((EnableVPlanNativePath || L->isInnermost()) && 10291 "VPlan-native path is not enabled. Only process inner loops."); 10292 10293 #ifndef NDEBUG 10294 const std::string DebugLocStr = getDebugLocString(L); 10295 #endif /* NDEBUG */ 10296 10297 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 10298 << L->getHeader()->getParent()->getName() << "\" from " 10299 << DebugLocStr << "\n"); 10300 10301 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 10302 10303 LLVM_DEBUG( 10304 dbgs() << "LV: Loop hints:" 10305 << " force=" 10306 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10307 ? "disabled" 10308 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10309 ? "enabled" 10310 : "?")) 10311 << " width=" << Hints.getWidth() 10312 << " interleave=" << Hints.getInterleave() << "\n"); 10313 10314 // Function containing loop 10315 Function *F = L->getHeader()->getParent(); 10316 10317 // Looking at the diagnostic output is the only way to determine if a loop 10318 // was vectorized (other than looking at the IR or machine code), so it 10319 // is important to generate an optimization remark for each loop. Most of 10320 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10321 // generated as OptimizationRemark and OptimizationRemarkMissed are 10322 // less verbose reporting vectorized loops and unvectorized loops that may 10323 // benefit from vectorization, respectively. 10324 10325 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10326 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10327 return false; 10328 } 10329 10330 PredicatedScalarEvolution PSE(*SE, *L); 10331 10332 // Check if it is legal to vectorize the loop. 10333 LoopVectorizationRequirements Requirements; 10334 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10335 &Requirements, &Hints, DB, AC, BFI, PSI); 10336 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10337 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10338 Hints.emitRemarkWithHints(); 10339 return false; 10340 } 10341 10342 // Check the function attributes and profiles to find out if this function 10343 // should be optimized for size. 10344 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10345 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10346 10347 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10348 // here. They may require CFG and instruction level transformations before 10349 // even evaluating whether vectorization is profitable. Since we cannot modify 10350 // the incoming IR, we need to build VPlan upfront in the vectorization 10351 // pipeline. 10352 if (!L->isInnermost()) 10353 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10354 ORE, BFI, PSI, Hints, Requirements); 10355 10356 assert(L->isInnermost() && "Inner loop expected."); 10357 10358 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10359 // count by optimizing for size, to minimize overheads. 10360 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10361 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10362 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10363 << "This loop is worth vectorizing only if no scalar " 10364 << "iteration overheads are incurred."); 10365 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10366 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10367 else { 10368 LLVM_DEBUG(dbgs() << "\n"); 10369 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10370 } 10371 } 10372 10373 // Check the function attributes to see if implicit floats are allowed. 10374 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10375 // an integer loop and the vector instructions selected are purely integer 10376 // vector instructions? 10377 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10378 reportVectorizationFailure( 10379 "Can't vectorize when the NoImplicitFloat attribute is used", 10380 "loop not vectorized due to NoImplicitFloat attribute", 10381 "NoImplicitFloat", ORE, L); 10382 Hints.emitRemarkWithHints(); 10383 return false; 10384 } 10385 10386 // Check if the target supports potentially unsafe FP vectorization. 10387 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10388 // for the target we're vectorizing for, to make sure none of the 10389 // additional fp-math flags can help. 10390 if (Hints.isPotentiallyUnsafe() && 10391 TTI->isFPVectorizationPotentiallyUnsafe()) { 10392 reportVectorizationFailure( 10393 "Potentially unsafe FP op prevents vectorization", 10394 "loop not vectorized due to unsafe FP support.", 10395 "UnsafeFP", ORE, L); 10396 Hints.emitRemarkWithHints(); 10397 return false; 10398 } 10399 10400 bool AllowOrderedReductions; 10401 // If the flag is set, use that instead and override the TTI behaviour. 10402 if (ForceOrderedReductions.getNumOccurrences() > 0) 10403 AllowOrderedReductions = ForceOrderedReductions; 10404 else 10405 AllowOrderedReductions = TTI->enableOrderedReductions(); 10406 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10407 ORE->emit([&]() { 10408 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10409 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10410 ExactFPMathInst->getDebugLoc(), 10411 ExactFPMathInst->getParent()) 10412 << "loop not vectorized: cannot prove it is safe to reorder " 10413 "floating-point operations"; 10414 }); 10415 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10416 "reorder floating-point operations\n"); 10417 Hints.emitRemarkWithHints(); 10418 return false; 10419 } 10420 10421 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10422 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10423 10424 // If an override option has been passed in for interleaved accesses, use it. 10425 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10426 UseInterleaved = EnableInterleavedMemAccesses; 10427 10428 // Analyze interleaved memory accesses. 10429 if (UseInterleaved) { 10430 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10431 } 10432 10433 // Use the cost model. 10434 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10435 F, &Hints, IAI); 10436 CM.collectValuesToIgnore(); 10437 CM.collectElementTypesForWidening(); 10438 10439 // Use the planner for vectorization. 10440 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10441 Requirements, ORE); 10442 10443 // Get user vectorization factor and interleave count. 10444 ElementCount UserVF = Hints.getWidth(); 10445 unsigned UserIC = Hints.getInterleave(); 10446 10447 // Plan how to best vectorize, return the best VF and its cost. 10448 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10449 10450 VectorizationFactor VF = VectorizationFactor::Disabled(); 10451 unsigned IC = 1; 10452 10453 if (MaybeVF) { 10454 VF = *MaybeVF; 10455 // Select the interleave count. 10456 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10457 } 10458 10459 // Identify the diagnostic messages that should be produced. 10460 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10461 bool VectorizeLoop = true, InterleaveLoop = true; 10462 if (VF.Width.isScalar()) { 10463 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10464 VecDiagMsg = std::make_pair( 10465 "VectorizationNotBeneficial", 10466 "the cost-model indicates that vectorization is not beneficial"); 10467 VectorizeLoop = false; 10468 } 10469 10470 if (!MaybeVF && UserIC > 1) { 10471 // Tell the user interleaving was avoided up-front, despite being explicitly 10472 // requested. 10473 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10474 "interleaving should be avoided up front\n"); 10475 IntDiagMsg = std::make_pair( 10476 "InterleavingAvoided", 10477 "Ignoring UserIC, because interleaving was avoided up front"); 10478 InterleaveLoop = false; 10479 } else if (IC == 1 && UserIC <= 1) { 10480 // Tell the user interleaving is not beneficial. 10481 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10482 IntDiagMsg = std::make_pair( 10483 "InterleavingNotBeneficial", 10484 "the cost-model indicates that interleaving is not beneficial"); 10485 InterleaveLoop = false; 10486 if (UserIC == 1) { 10487 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10488 IntDiagMsg.second += 10489 " and is explicitly disabled or interleave count is set to 1"; 10490 } 10491 } else if (IC > 1 && UserIC == 1) { 10492 // Tell the user interleaving is beneficial, but it explicitly disabled. 10493 LLVM_DEBUG( 10494 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10495 IntDiagMsg = std::make_pair( 10496 "InterleavingBeneficialButDisabled", 10497 "the cost-model indicates that interleaving is beneficial " 10498 "but is explicitly disabled or interleave count is set to 1"); 10499 InterleaveLoop = false; 10500 } 10501 10502 // Override IC if user provided an interleave count. 10503 IC = UserIC > 0 ? UserIC : IC; 10504 10505 // Emit diagnostic messages, if any. 10506 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10507 if (!VectorizeLoop && !InterleaveLoop) { 10508 // Do not vectorize or interleaving the loop. 10509 ORE->emit([&]() { 10510 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10511 L->getStartLoc(), L->getHeader()) 10512 << VecDiagMsg.second; 10513 }); 10514 ORE->emit([&]() { 10515 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10516 L->getStartLoc(), L->getHeader()) 10517 << IntDiagMsg.second; 10518 }); 10519 return false; 10520 } else if (!VectorizeLoop && InterleaveLoop) { 10521 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10522 ORE->emit([&]() { 10523 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10524 L->getStartLoc(), L->getHeader()) 10525 << VecDiagMsg.second; 10526 }); 10527 } else if (VectorizeLoop && !InterleaveLoop) { 10528 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10529 << ") in " << DebugLocStr << '\n'); 10530 ORE->emit([&]() { 10531 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10532 L->getStartLoc(), L->getHeader()) 10533 << IntDiagMsg.second; 10534 }); 10535 } else if (VectorizeLoop && InterleaveLoop) { 10536 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10537 << ") in " << DebugLocStr << '\n'); 10538 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10539 } 10540 10541 bool DisableRuntimeUnroll = false; 10542 MDNode *OrigLoopID = L->getLoopID(); 10543 { 10544 // Optimistically generate runtime checks. Drop them if they turn out to not 10545 // be profitable. Limit the scope of Checks, so the cleanup happens 10546 // immediately after vector codegeneration is done. 10547 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10548 F->getParent()->getDataLayout()); 10549 if (!VF.Width.isScalar() || IC > 1) 10550 Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); 10551 10552 using namespace ore; 10553 if (!VectorizeLoop) { 10554 assert(IC > 1 && "interleave count should not be 1 or 0"); 10555 // If we decided that it is not legal to vectorize the loop, then 10556 // interleave it. 10557 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10558 &CM, BFI, PSI, Checks); 10559 10560 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10561 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT); 10562 10563 ORE->emit([&]() { 10564 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10565 L->getHeader()) 10566 << "interleaved loop (interleaved count: " 10567 << NV("InterleaveCount", IC) << ")"; 10568 }); 10569 } else { 10570 // If we decided that it is *legal* to vectorize the loop, then do it. 10571 10572 // Consider vectorizing the epilogue too if it's profitable. 10573 VectorizationFactor EpilogueVF = 10574 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10575 if (EpilogueVF.Width.isVector()) { 10576 10577 // The first pass vectorizes the main loop and creates a scalar epilogue 10578 // to be vectorized by executing the plan (potentially with a different 10579 // factor) again shortly afterwards. 10580 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10581 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10582 EPI, &LVL, &CM, BFI, PSI, Checks); 10583 10584 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10585 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10586 DT); 10587 ++LoopsVectorized; 10588 10589 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10590 formLCSSARecursively(*L, *DT, LI, SE); 10591 10592 // Second pass vectorizes the epilogue and adjusts the control flow 10593 // edges from the first pass. 10594 EPI.MainLoopVF = EPI.EpilogueVF; 10595 EPI.MainLoopUF = EPI.EpilogueUF; 10596 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10597 ORE, EPI, &LVL, &CM, BFI, PSI, 10598 Checks); 10599 10600 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10601 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10602 DT); 10603 ++LoopsEpilogueVectorized; 10604 10605 if (!MainILV.areSafetyChecksAdded()) 10606 DisableRuntimeUnroll = true; 10607 } else { 10608 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10609 &LVL, &CM, BFI, PSI, Checks); 10610 10611 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10612 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT); 10613 ++LoopsVectorized; 10614 10615 // Add metadata to disable runtime unrolling a scalar loop when there 10616 // are no runtime checks about strides and memory. A scalar loop that is 10617 // rarely used is not worth unrolling. 10618 if (!LB.areSafetyChecksAdded()) 10619 DisableRuntimeUnroll = true; 10620 } 10621 // Report the vectorization decision. 10622 ORE->emit([&]() { 10623 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10624 L->getHeader()) 10625 << "vectorized loop (vectorization width: " 10626 << NV("VectorizationFactor", VF.Width) 10627 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10628 }); 10629 } 10630 10631 if (ORE->allowExtraAnalysis(LV_NAME)) 10632 checkMixedPrecision(L, ORE); 10633 } 10634 10635 Optional<MDNode *> RemainderLoopID = 10636 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10637 LLVMLoopVectorizeFollowupEpilogue}); 10638 if (RemainderLoopID.hasValue()) { 10639 L->setLoopID(RemainderLoopID.getValue()); 10640 } else { 10641 if (DisableRuntimeUnroll) 10642 AddRuntimeUnrollDisableMetaData(L); 10643 10644 // Mark the loop as already vectorized to avoid vectorizing again. 10645 Hints.setAlreadyVectorized(); 10646 } 10647 10648 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10649 return true; 10650 } 10651 10652 LoopVectorizeResult LoopVectorizePass::runImpl( 10653 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10654 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10655 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10656 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10657 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10658 SE = &SE_; 10659 LI = &LI_; 10660 TTI = &TTI_; 10661 DT = &DT_; 10662 BFI = &BFI_; 10663 TLI = TLI_; 10664 AA = &AA_; 10665 AC = &AC_; 10666 GetLAA = &GetLAA_; 10667 DB = &DB_; 10668 ORE = &ORE_; 10669 PSI = PSI_; 10670 10671 // Don't attempt if 10672 // 1. the target claims to have no vector registers, and 10673 // 2. interleaving won't help ILP. 10674 // 10675 // The second condition is necessary because, even if the target has no 10676 // vector registers, loop vectorization may still enable scalar 10677 // interleaving. 10678 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10679 TTI->getMaxInterleaveFactor(1) < 2) 10680 return LoopVectorizeResult(false, false); 10681 10682 bool Changed = false, CFGChanged = false; 10683 10684 // The vectorizer requires loops to be in simplified form. 10685 // Since simplification may add new inner loops, it has to run before the 10686 // legality and profitability checks. This means running the loop vectorizer 10687 // will simplify all loops, regardless of whether anything end up being 10688 // vectorized. 10689 for (auto &L : *LI) 10690 Changed |= CFGChanged |= 10691 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10692 10693 // Build up a worklist of inner-loops to vectorize. This is necessary as 10694 // the act of vectorizing or partially unrolling a loop creates new loops 10695 // and can invalidate iterators across the loops. 10696 SmallVector<Loop *, 8> Worklist; 10697 10698 for (Loop *L : *LI) 10699 collectSupportedLoops(*L, LI, ORE, Worklist); 10700 10701 LoopsAnalyzed += Worklist.size(); 10702 10703 // Now walk the identified inner loops. 10704 while (!Worklist.empty()) { 10705 Loop *L = Worklist.pop_back_val(); 10706 10707 // For the inner loops we actually process, form LCSSA to simplify the 10708 // transform. 10709 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10710 10711 Changed |= CFGChanged |= processLoop(L); 10712 } 10713 10714 // Process each loop nest in the function. 10715 return LoopVectorizeResult(Changed, CFGChanged); 10716 } 10717 10718 PreservedAnalyses LoopVectorizePass::run(Function &F, 10719 FunctionAnalysisManager &AM) { 10720 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10721 auto &LI = AM.getResult<LoopAnalysis>(F); 10722 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10723 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10724 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10725 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10726 auto &AA = AM.getResult<AAManager>(F); 10727 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10728 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10729 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10730 10731 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10732 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10733 [&](Loop &L) -> const LoopAccessInfo & { 10734 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10735 TLI, TTI, nullptr, nullptr, nullptr}; 10736 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10737 }; 10738 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10739 ProfileSummaryInfo *PSI = 10740 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10741 LoopVectorizeResult Result = 10742 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10743 if (!Result.MadeAnyChange) 10744 return PreservedAnalyses::all(); 10745 PreservedAnalyses PA; 10746 10747 // We currently do not preserve loopinfo/dominator analyses with outer loop 10748 // vectorization. Until this is addressed, mark these analyses as preserved 10749 // only for non-VPlan-native path. 10750 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10751 if (!EnableVPlanNativePath) { 10752 PA.preserve<LoopAnalysis>(); 10753 PA.preserve<DominatorTreeAnalysis>(); 10754 } 10755 if (!Result.MadeCFGChange) 10756 PA.preserveSet<CFGAnalyses>(); 10757 return PA; 10758 } 10759 10760 void LoopVectorizePass::printPipeline( 10761 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10762 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10763 OS, MapClassName2PassName); 10764 10765 OS << "<"; 10766 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10767 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10768 OS << ">"; 10769 } 10770