1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/Metadata.h" 116 #include "llvm/IR/Module.h" 117 #include "llvm/IR/Operator.h" 118 #include "llvm/IR/PatternMatch.h" 119 #include "llvm/IR/Type.h" 120 #include "llvm/IR/Use.h" 121 #include "llvm/IR/User.h" 122 #include "llvm/IR/Value.h" 123 #include "llvm/IR/ValueHandle.h" 124 #include "llvm/IR/Verifier.h" 125 #include "llvm/InitializePasses.h" 126 #include "llvm/Pass.h" 127 #include "llvm/Support/Casting.h" 128 #include "llvm/Support/CommandLine.h" 129 #include "llvm/Support/Compiler.h" 130 #include "llvm/Support/Debug.h" 131 #include "llvm/Support/ErrorHandling.h" 132 #include "llvm/Support/InstructionCost.h" 133 #include "llvm/Support/MathExtras.h" 134 #include "llvm/Support/raw_ostream.h" 135 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 136 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 137 #include "llvm/Transforms/Utils/LoopSimplify.h" 138 #include "llvm/Transforms/Utils/LoopUtils.h" 139 #include "llvm/Transforms/Utils/LoopVersioning.h" 140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 141 #include "llvm/Transforms/Utils/SizeOpts.h" 142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 143 #include <algorithm> 144 #include <cassert> 145 #include <cstdint> 146 #include <functional> 147 #include <iterator> 148 #include <limits> 149 #include <memory> 150 #include <string> 151 #include <tuple> 152 #include <utility> 153 154 using namespace llvm; 155 156 #define LV_NAME "loop-vectorize" 157 #define DEBUG_TYPE LV_NAME 158 159 #ifndef NDEBUG 160 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 161 #endif 162 163 /// @{ 164 /// Metadata attribute names 165 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 166 const char LLVMLoopVectorizeFollowupVectorized[] = 167 "llvm.loop.vectorize.followup_vectorized"; 168 const char LLVMLoopVectorizeFollowupEpilogue[] = 169 "llvm.loop.vectorize.followup_epilogue"; 170 /// @} 171 172 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 173 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 174 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 175 176 static cl::opt<bool> EnableEpilogueVectorization( 177 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 178 cl::desc("Enable vectorization of epilogue loops.")); 179 180 static cl::opt<unsigned> EpilogueVectorizationForceVF( 181 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 182 cl::desc("When epilogue vectorization is enabled, and a value greater than " 183 "1 is specified, forces the given VF for all applicable epilogue " 184 "loops.")); 185 186 static cl::opt<unsigned> EpilogueVectorizationMinVF( 187 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 188 cl::desc("Only loops with vectorization factor equal to or larger than " 189 "the specified value are considered for epilogue vectorization.")); 190 191 /// Loops with a known constant trip count below this number are vectorized only 192 /// if no scalar iteration overheads are incurred. 193 static cl::opt<unsigned> TinyTripCountVectorThreshold( 194 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 195 cl::desc("Loops with a constant trip count that is smaller than this " 196 "value are vectorized only if no scalar iteration overheads " 197 "are incurred.")); 198 199 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 200 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 201 cl::desc("The maximum allowed number of runtime memory checks with a " 202 "vectorize(enable) pragma.")); 203 204 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 205 // that predication is preferred, and this lists all options. I.e., the 206 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 207 // and predicate the instructions accordingly. If tail-folding fails, there are 208 // different fallback strategies depending on these values: 209 namespace PreferPredicateTy { 210 enum Option { 211 ScalarEpilogue = 0, 212 PredicateElseScalarEpilogue, 213 PredicateOrDontVectorize 214 }; 215 } // namespace PreferPredicateTy 216 217 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 218 "prefer-predicate-over-epilogue", 219 cl::init(PreferPredicateTy::ScalarEpilogue), 220 cl::Hidden, 221 cl::desc("Tail-folding and predication preferences over creating a scalar " 222 "epilogue loop."), 223 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 224 "scalar-epilogue", 225 "Don't tail-predicate loops, create scalar epilogue"), 226 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 227 "predicate-else-scalar-epilogue", 228 "prefer tail-folding, create scalar epilogue if tail " 229 "folding fails."), 230 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 231 "predicate-dont-vectorize", 232 "prefers tail-folding, don't attempt vectorization if " 233 "tail-folding fails."))); 234 235 static cl::opt<bool> MaximizeBandwidth( 236 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 237 cl::desc("Maximize bandwidth when selecting vectorization factor which " 238 "will be determined by the smallest type in loop.")); 239 240 static cl::opt<bool> EnableInterleavedMemAccesses( 241 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 242 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 243 244 /// An interleave-group may need masking if it resides in a block that needs 245 /// predication, or in order to mask away gaps. 246 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 247 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 248 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 249 250 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 251 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 252 cl::desc("We don't interleave loops with a estimated constant trip count " 253 "below this number")); 254 255 static cl::opt<unsigned> ForceTargetNumScalarRegs( 256 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 257 cl::desc("A flag that overrides the target's number of scalar registers.")); 258 259 static cl::opt<unsigned> ForceTargetNumVectorRegs( 260 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 261 cl::desc("A flag that overrides the target's number of vector registers.")); 262 263 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 264 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 265 cl::desc("A flag that overrides the target's max interleave factor for " 266 "scalar loops.")); 267 268 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 269 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 270 cl::desc("A flag that overrides the target's max interleave factor for " 271 "vectorized loops.")); 272 273 static cl::opt<unsigned> ForceTargetInstructionCost( 274 "force-target-instruction-cost", cl::init(0), cl::Hidden, 275 cl::desc("A flag that overrides the target's expected cost for " 276 "an instruction to a single constant value. Mostly " 277 "useful for getting consistent testing.")); 278 279 static cl::opt<bool> ForceTargetSupportsScalableVectors( 280 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 281 cl::desc( 282 "Pretend that scalable vectors are supported, even if the target does " 283 "not support them. This flag should only be used for testing.")); 284 285 static cl::opt<unsigned> SmallLoopCost( 286 "small-loop-cost", cl::init(20), cl::Hidden, 287 cl::desc( 288 "The cost of a loop that is considered 'small' by the interleaver.")); 289 290 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 291 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 292 cl::desc("Enable the use of the block frequency analysis to access PGO " 293 "heuristics minimizing code growth in cold regions and being more " 294 "aggressive in hot regions.")); 295 296 // Runtime interleave loops for load/store throughput. 297 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 298 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 299 cl::desc( 300 "Enable runtime interleaving until load/store ports are saturated")); 301 302 /// Interleave small loops with scalar reductions. 303 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 304 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 305 cl::desc("Enable interleaving for loops with small iteration counts that " 306 "contain scalar reductions to expose ILP.")); 307 308 /// The number of stores in a loop that are allowed to need predication. 309 static cl::opt<unsigned> NumberOfStoresToPredicate( 310 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 311 cl::desc("Max number of stores to be predicated behind an if.")); 312 313 static cl::opt<bool> EnableIndVarRegisterHeur( 314 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 315 cl::desc("Count the induction variable only once when interleaving")); 316 317 static cl::opt<bool> EnableCondStoresVectorization( 318 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 319 cl::desc("Enable if predication of stores during vectorization.")); 320 321 static cl::opt<unsigned> MaxNestedScalarReductionIC( 322 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 323 cl::desc("The maximum interleave count to use when interleaving a scalar " 324 "reduction in a nested loop.")); 325 326 static cl::opt<bool> 327 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 328 cl::Hidden, 329 cl::desc("Prefer in-loop vector reductions, " 330 "overriding the targets preference.")); 331 332 static cl::opt<bool> ForceOrderedReductions( 333 "force-ordered-reductions", cl::init(false), cl::Hidden, 334 cl::desc("Enable the vectorisation of loops with in-order (strict) " 335 "FP reductions")); 336 337 static cl::opt<bool> PreferPredicatedReductionSelect( 338 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 339 cl::desc( 340 "Prefer predicating a reduction operation over an after loop select.")); 341 342 cl::opt<bool> EnableVPlanNativePath( 343 "enable-vplan-native-path", cl::init(false), cl::Hidden, 344 cl::desc("Enable VPlan-native vectorization path with " 345 "support for outer loop vectorization.")); 346 347 // FIXME: Remove this switch once we have divergence analysis. Currently we 348 // assume divergent non-backedge branches when this switch is true. 349 cl::opt<bool> EnableVPlanPredication( 350 "enable-vplan-predication", cl::init(false), cl::Hidden, 351 cl::desc("Enable VPlan-native vectorization path predicator with " 352 "support for outer loop vectorization.")); 353 354 // This flag enables the stress testing of the VPlan H-CFG construction in the 355 // VPlan-native vectorization path. It must be used in conjuction with 356 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 357 // verification of the H-CFGs built. 358 static cl::opt<bool> VPlanBuildStressTest( 359 "vplan-build-stress-test", cl::init(false), cl::Hidden, 360 cl::desc( 361 "Build VPlan for every supported loop nest in the function and bail " 362 "out right after the build (stress test the VPlan H-CFG construction " 363 "in the VPlan-native vectorization path).")); 364 365 cl::opt<bool> llvm::EnableLoopInterleaving( 366 "interleave-loops", cl::init(true), cl::Hidden, 367 cl::desc("Enable loop interleaving in Loop vectorization passes")); 368 cl::opt<bool> llvm::EnableLoopVectorization( 369 "vectorize-loops", cl::init(true), cl::Hidden, 370 cl::desc("Run the Loop vectorization passes")); 371 372 cl::opt<bool> PrintVPlansInDotFormat( 373 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 374 cl::desc("Use dot format instead of plain text when dumping VPlans")); 375 376 /// A helper function that returns true if the given type is irregular. The 377 /// type is irregular if its allocated size doesn't equal the store size of an 378 /// element of the corresponding vector type. 379 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 380 // Determine if an array of N elements of type Ty is "bitcast compatible" 381 // with a <N x Ty> vector. 382 // This is only true if there is no padding between the array elements. 383 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 384 } 385 386 /// A helper function that returns the reciprocal of the block probability of 387 /// predicated blocks. If we return X, we are assuming the predicated block 388 /// will execute once for every X iterations of the loop header. 389 /// 390 /// TODO: We should use actual block probability here, if available. Currently, 391 /// we always assume predicated blocks have a 50% chance of executing. 392 static unsigned getReciprocalPredBlockProb() { return 2; } 393 394 /// A helper function that returns an integer or floating-point constant with 395 /// value C. 396 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 397 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 398 : ConstantFP::get(Ty, C); 399 } 400 401 /// Returns "best known" trip count for the specified loop \p L as defined by 402 /// the following procedure: 403 /// 1) Returns exact trip count if it is known. 404 /// 2) Returns expected trip count according to profile data if any. 405 /// 3) Returns upper bound estimate if it is known. 406 /// 4) Returns None if all of the above failed. 407 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 408 // Check if exact trip count is known. 409 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 410 return ExpectedTC; 411 412 // Check if there is an expected trip count available from profile data. 413 if (LoopVectorizeWithBlockFrequency) 414 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 415 return EstimatedTC; 416 417 // Check if upper bound estimate is known. 418 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 419 return ExpectedTC; 420 421 return None; 422 } 423 424 // Forward declare GeneratedRTChecks. 425 class GeneratedRTChecks; 426 427 namespace llvm { 428 429 AnalysisKey ShouldRunExtraVectorPasses::Key; 430 431 /// InnerLoopVectorizer vectorizes loops which contain only one basic 432 /// block to a specified vectorization factor (VF). 433 /// This class performs the widening of scalars into vectors, or multiple 434 /// scalars. This class also implements the following features: 435 /// * It inserts an epilogue loop for handling loops that don't have iteration 436 /// counts that are known to be a multiple of the vectorization factor. 437 /// * It handles the code generation for reduction variables. 438 /// * Scalarization (implementation using scalars) of un-vectorizable 439 /// instructions. 440 /// InnerLoopVectorizer does not perform any vectorization-legality 441 /// checks, and relies on the caller to check for the different legality 442 /// aspects. The InnerLoopVectorizer relies on the 443 /// LoopVectorizationLegality class to provide information about the induction 444 /// and reduction variables that were found to a given vectorization factor. 445 class InnerLoopVectorizer { 446 public: 447 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 448 LoopInfo *LI, DominatorTree *DT, 449 const TargetLibraryInfo *TLI, 450 const TargetTransformInfo *TTI, AssumptionCache *AC, 451 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 452 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 453 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 454 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 455 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 456 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 457 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 458 PSI(PSI), RTChecks(RTChecks) { 459 // Query this against the original loop and save it here because the profile 460 // of the original loop header may change as the transformation happens. 461 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 462 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 463 } 464 465 virtual ~InnerLoopVectorizer() = default; 466 467 /// Create a new empty loop that will contain vectorized instructions later 468 /// on, while the old loop will be used as the scalar remainder. Control flow 469 /// is generated around the vectorized (and scalar epilogue) loops consisting 470 /// of various checks and bypasses. Return the pre-header block of the new 471 /// loop and the start value for the canonical induction, if it is != 0. The 472 /// latter is the case when vectorizing the epilogue loop. In the case of 473 /// epilogue vectorization, this function is overriden to handle the more 474 /// complex control flow around the loops. 475 virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton(); 476 477 /// Widen a single call instruction within the innermost loop. 478 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 479 VPTransformState &State); 480 481 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 482 void fixVectorizedLoop(VPTransformState &State); 483 484 // Return true if any runtime check is added. 485 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 486 487 /// A type for vectorized values in the new loop. Each value from the 488 /// original loop, when vectorized, is represented by UF vector values in the 489 /// new unrolled loop, where UF is the unroll factor. 490 using VectorParts = SmallVector<Value *, 2>; 491 492 /// Vectorize a single first-order recurrence or pointer induction PHINode in 493 /// a block. This method handles the induction variable canonicalization. It 494 /// supports both VF = 1 for unrolled loops and arbitrary length vectors. 495 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 496 VPTransformState &State); 497 498 /// A helper function to scalarize a single Instruction in the innermost loop. 499 /// Generates a sequence of scalar instances for each lane between \p MinLane 500 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 501 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p 502 /// Instr's operands. 503 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe, 504 const VPIteration &Instance, bool IfPredicateInstr, 505 VPTransformState &State); 506 507 /// Construct the vector value of a scalarized value \p V one lane at a time. 508 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 509 VPTransformState &State); 510 511 /// Try to vectorize interleaved access group \p Group with the base address 512 /// given in \p Addr, optionally masking the vector operations if \p 513 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 514 /// values in the vectorized loop. 515 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 516 ArrayRef<VPValue *> VPDefs, 517 VPTransformState &State, VPValue *Addr, 518 ArrayRef<VPValue *> StoredValues, 519 VPValue *BlockInMask = nullptr); 520 521 /// Set the debug location in the builder \p Ptr using the debug location in 522 /// \p V. If \p Ptr is None then it uses the class member's Builder. 523 void setDebugLocFromInst(const Value *V, 524 Optional<IRBuilderBase *> CustomBuilder = None); 525 526 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 527 void fixNonInductionPHIs(VPTransformState &State); 528 529 /// Returns true if the reordering of FP operations is not allowed, but we are 530 /// able to vectorize with strict in-order reductions for the given RdxDesc. 531 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc); 532 533 /// Create a broadcast instruction. This method generates a broadcast 534 /// instruction (shuffle) for loop invariant values and for the induction 535 /// value. If this is the induction variable then we extend it to N, N+1, ... 536 /// this is needed because each iteration in the loop corresponds to a SIMD 537 /// element. 538 virtual Value *getBroadcastInstrs(Value *V); 539 540 /// Add metadata from one instruction to another. 541 /// 542 /// This includes both the original MDs from \p From and additional ones (\see 543 /// addNewMetadata). Use this for *newly created* instructions in the vector 544 /// loop. 545 void addMetadata(Instruction *To, Instruction *From); 546 547 /// Similar to the previous function but it adds the metadata to a 548 /// vector of instructions. 549 void addMetadata(ArrayRef<Value *> To, Instruction *From); 550 551 // Returns the resume value (bc.merge.rdx) for a reduction as 552 // generated by fixReduction. 553 PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc); 554 555 protected: 556 friend class LoopVectorizationPlanner; 557 558 /// A small list of PHINodes. 559 using PhiVector = SmallVector<PHINode *, 4>; 560 561 /// A type for scalarized values in the new loop. Each value from the 562 /// original loop, when scalarized, is represented by UF x VF scalar values 563 /// in the new unrolled loop, where UF is the unroll factor and VF is the 564 /// vectorization factor. 565 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 566 567 /// Set up the values of the IVs correctly when exiting the vector loop. 568 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 569 Value *CountRoundDown, Value *EndValue, 570 BasicBlock *MiddleBlock, BasicBlock *VectorHeader); 571 572 /// Introduce a conditional branch (on true, condition to be set later) at the 573 /// end of the header=latch connecting it to itself (across the backedge) and 574 /// to the exit block of \p L. 575 void createHeaderBranch(Loop *L); 576 577 /// Handle all cross-iteration phis in the header. 578 void fixCrossIterationPHIs(VPTransformState &State); 579 580 /// Create the exit value of first order recurrences in the middle block and 581 /// update their users. 582 void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR, 583 VPTransformState &State); 584 585 /// Create code for the loop exit value of the reduction. 586 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 587 588 /// Clear NSW/NUW flags from reduction instructions if necessary. 589 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 590 VPTransformState &State); 591 592 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 593 /// means we need to add the appropriate incoming value from the middle 594 /// block as exiting edges from the scalar epilogue loop (if present) are 595 /// already in place, and we exit the vector loop exclusively to the middle 596 /// block. 597 void fixLCSSAPHIs(VPTransformState &State); 598 599 /// Iteratively sink the scalarized operands of a predicated instruction into 600 /// the block that was created for it. 601 void sinkScalarOperands(Instruction *PredInst); 602 603 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 604 /// represented as. 605 void truncateToMinimalBitwidths(VPTransformState &State); 606 607 /// Returns (and creates if needed) the original loop trip count. 608 Value *getOrCreateTripCount(BasicBlock *InsertBlock); 609 610 /// Returns (and creates if needed) the trip count of the widened loop. 611 Value *getOrCreateVectorTripCount(BasicBlock *InsertBlock); 612 613 /// Returns a bitcasted value to the requested vector type. 614 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 615 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 616 const DataLayout &DL); 617 618 /// Emit a bypass check to see if the vector trip count is zero, including if 619 /// it overflows. 620 void emitMinimumIterationCountCheck(BasicBlock *Bypass); 621 622 /// Emit a bypass check to see if all of the SCEV assumptions we've 623 /// had to make are correct. Returns the block containing the checks or 624 /// nullptr if no checks have been added. 625 BasicBlock *emitSCEVChecks(BasicBlock *Bypass); 626 627 /// Emit bypass checks to check any memory assumptions we may have made. 628 /// Returns the block containing the checks or nullptr if no checks have been 629 /// added. 630 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass); 631 632 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 633 /// vector loop preheader, middle block and scalar preheader. Also 634 /// allocate a loop object for the new vector loop and return it. 635 Loop *createVectorLoopSkeleton(StringRef Prefix); 636 637 /// Create new phi nodes for the induction variables to resume iteration count 638 /// in the scalar epilogue, from where the vectorized loop left off. 639 /// In cases where the loop skeleton is more complicated (eg. epilogue 640 /// vectorization) and the resume values can come from an additional bypass 641 /// block, the \p AdditionalBypass pair provides information about the bypass 642 /// block and the end value on the edge from bypass to this loop. 643 void createInductionResumeValues( 644 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 645 646 /// Complete the loop skeleton by adding debug MDs, creating appropriate 647 /// conditional branches in the middle block, preparing the builder and 648 /// running the verifier. Return the preheader of the completed vector loop. 649 BasicBlock *completeLoopSkeleton(MDNode *OrigLoopID); 650 651 /// Add additional metadata to \p To that was not present on \p Orig. 652 /// 653 /// Currently this is used to add the noalias annotations based on the 654 /// inserted memchecks. Use this for instructions that are *cloned* into the 655 /// vector loop. 656 void addNewMetadata(Instruction *To, const Instruction *Orig); 657 658 /// Collect poison-generating recipes that may generate a poison value that is 659 /// used after vectorization, even when their operands are not poison. Those 660 /// recipes meet the following conditions: 661 /// * Contribute to the address computation of a recipe generating a widen 662 /// memory load/store (VPWidenMemoryInstructionRecipe or 663 /// VPInterleaveRecipe). 664 /// * Such a widen memory load/store has at least one underlying Instruction 665 /// that is in a basic block that needs predication and after vectorization 666 /// the generated instruction won't be predicated. 667 void collectPoisonGeneratingRecipes(VPTransformState &State); 668 669 /// Allow subclasses to override and print debug traces before/after vplan 670 /// execution, when trace information is requested. 671 virtual void printDebugTracesAtStart(){}; 672 virtual void printDebugTracesAtEnd(){}; 673 674 /// The original loop. 675 Loop *OrigLoop; 676 677 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 678 /// dynamic knowledge to simplify SCEV expressions and converts them to a 679 /// more usable form. 680 PredicatedScalarEvolution &PSE; 681 682 /// Loop Info. 683 LoopInfo *LI; 684 685 /// Dominator Tree. 686 DominatorTree *DT; 687 688 /// Alias Analysis. 689 AAResults *AA; 690 691 /// Target Library Info. 692 const TargetLibraryInfo *TLI; 693 694 /// Target Transform Info. 695 const TargetTransformInfo *TTI; 696 697 /// Assumption Cache. 698 AssumptionCache *AC; 699 700 /// Interface to emit optimization remarks. 701 OptimizationRemarkEmitter *ORE; 702 703 /// LoopVersioning. It's only set up (non-null) if memchecks were 704 /// used. 705 /// 706 /// This is currently only used to add no-alias metadata based on the 707 /// memchecks. The actually versioning is performed manually. 708 std::unique_ptr<LoopVersioning> LVer; 709 710 /// The vectorization SIMD factor to use. Each vector will have this many 711 /// vector elements. 712 ElementCount VF; 713 714 /// The vectorization unroll factor to use. Each scalar is vectorized to this 715 /// many different vector instructions. 716 unsigned UF; 717 718 /// The builder that we use 719 IRBuilder<> Builder; 720 721 // --- Vectorization state --- 722 723 /// The vector-loop preheader. 724 BasicBlock *LoopVectorPreHeader; 725 726 /// The scalar-loop preheader. 727 BasicBlock *LoopScalarPreHeader; 728 729 /// Middle Block between the vector and the scalar. 730 BasicBlock *LoopMiddleBlock; 731 732 /// The unique ExitBlock of the scalar loop if one exists. Note that 733 /// there can be multiple exiting edges reaching this block. 734 BasicBlock *LoopExitBlock; 735 736 /// The scalar loop body. 737 BasicBlock *LoopScalarBody; 738 739 /// A list of all bypass blocks. The first block is the entry of the loop. 740 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 741 742 /// Store instructions that were predicated. 743 SmallVector<Instruction *, 4> PredicatedInstructions; 744 745 /// Trip count of the original loop. 746 Value *TripCount = nullptr; 747 748 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 749 Value *VectorTripCount = nullptr; 750 751 /// The legality analysis. 752 LoopVectorizationLegality *Legal; 753 754 /// The profitablity analysis. 755 LoopVectorizationCostModel *Cost; 756 757 // Record whether runtime checks are added. 758 bool AddedSafetyChecks = false; 759 760 // Holds the end values for each induction variable. We save the end values 761 // so we can later fix-up the external users of the induction variables. 762 DenseMap<PHINode *, Value *> IVEndValues; 763 764 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 765 // fixed up at the end of vector code generation. 766 SmallVector<PHINode *, 8> OrigPHIsToFix; 767 768 /// BFI and PSI are used to check for profile guided size optimizations. 769 BlockFrequencyInfo *BFI; 770 ProfileSummaryInfo *PSI; 771 772 // Whether this loop should be optimized for size based on profile guided size 773 // optimizatios. 774 bool OptForSizeBasedOnProfile; 775 776 /// Structure to hold information about generated runtime checks, responsible 777 /// for cleaning the checks, if vectorization turns out unprofitable. 778 GeneratedRTChecks &RTChecks; 779 780 // Holds the resume values for reductions in the loops, used to set the 781 // correct start value of reduction PHIs when vectorizing the epilogue. 782 SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4> 783 ReductionResumeValues; 784 }; 785 786 class InnerLoopUnroller : public InnerLoopVectorizer { 787 public: 788 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 789 LoopInfo *LI, DominatorTree *DT, 790 const TargetLibraryInfo *TLI, 791 const TargetTransformInfo *TTI, AssumptionCache *AC, 792 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 793 LoopVectorizationLegality *LVL, 794 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 795 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 796 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 797 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 798 BFI, PSI, Check) {} 799 800 private: 801 Value *getBroadcastInstrs(Value *V) override; 802 }; 803 804 /// Encapsulate information regarding vectorization of a loop and its epilogue. 805 /// This information is meant to be updated and used across two stages of 806 /// epilogue vectorization. 807 struct EpilogueLoopVectorizationInfo { 808 ElementCount MainLoopVF = ElementCount::getFixed(0); 809 unsigned MainLoopUF = 0; 810 ElementCount EpilogueVF = ElementCount::getFixed(0); 811 unsigned EpilogueUF = 0; 812 BasicBlock *MainLoopIterationCountCheck = nullptr; 813 BasicBlock *EpilogueIterationCountCheck = nullptr; 814 BasicBlock *SCEVSafetyCheck = nullptr; 815 BasicBlock *MemSafetyCheck = nullptr; 816 Value *TripCount = nullptr; 817 Value *VectorTripCount = nullptr; 818 819 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 820 ElementCount EVF, unsigned EUF) 821 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 822 assert(EUF == 1 && 823 "A high UF for the epilogue loop is likely not beneficial."); 824 } 825 }; 826 827 /// An extension of the inner loop vectorizer that creates a skeleton for a 828 /// vectorized loop that has its epilogue (residual) also vectorized. 829 /// The idea is to run the vplan on a given loop twice, firstly to setup the 830 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 831 /// from the first step and vectorize the epilogue. This is achieved by 832 /// deriving two concrete strategy classes from this base class and invoking 833 /// them in succession from the loop vectorizer planner. 834 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 835 public: 836 InnerLoopAndEpilogueVectorizer( 837 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 838 DominatorTree *DT, const TargetLibraryInfo *TLI, 839 const TargetTransformInfo *TTI, AssumptionCache *AC, 840 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 841 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 842 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 843 GeneratedRTChecks &Checks) 844 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 845 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 846 Checks), 847 EPI(EPI) {} 848 849 // Override this function to handle the more complex control flow around the 850 // three loops. 851 std::pair<BasicBlock *, Value *> 852 createVectorizedLoopSkeleton() final override { 853 return createEpilogueVectorizedLoopSkeleton(); 854 } 855 856 /// The interface for creating a vectorized skeleton using one of two 857 /// different strategies, each corresponding to one execution of the vplan 858 /// as described above. 859 virtual std::pair<BasicBlock *, Value *> 860 createEpilogueVectorizedLoopSkeleton() = 0; 861 862 /// Holds and updates state information required to vectorize the main loop 863 /// and its epilogue in two separate passes. This setup helps us avoid 864 /// regenerating and recomputing runtime safety checks. It also helps us to 865 /// shorten the iteration-count-check path length for the cases where the 866 /// iteration count of the loop is so small that the main vector loop is 867 /// completely skipped. 868 EpilogueLoopVectorizationInfo &EPI; 869 }; 870 871 /// A specialized derived class of inner loop vectorizer that performs 872 /// vectorization of *main* loops in the process of vectorizing loops and their 873 /// epilogues. 874 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 875 public: 876 EpilogueVectorizerMainLoop( 877 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 878 DominatorTree *DT, const TargetLibraryInfo *TLI, 879 const TargetTransformInfo *TTI, AssumptionCache *AC, 880 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 881 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 882 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 883 GeneratedRTChecks &Check) 884 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 885 EPI, LVL, CM, BFI, PSI, Check) {} 886 /// Implements the interface for creating a vectorized skeleton using the 887 /// *main loop* strategy (ie the first pass of vplan execution). 888 std::pair<BasicBlock *, Value *> 889 createEpilogueVectorizedLoopSkeleton() final override; 890 891 protected: 892 /// Emits an iteration count bypass check once for the main loop (when \p 893 /// ForEpilogue is false) and once for the epilogue loop (when \p 894 /// ForEpilogue is true). 895 BasicBlock *emitMinimumIterationCountCheck(BasicBlock *Bypass, 896 bool ForEpilogue); 897 void printDebugTracesAtStart() override; 898 void printDebugTracesAtEnd() override; 899 }; 900 901 // A specialized derived class of inner loop vectorizer that performs 902 // vectorization of *epilogue* loops in the process of vectorizing loops and 903 // their epilogues. 904 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 905 public: 906 EpilogueVectorizerEpilogueLoop( 907 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 908 DominatorTree *DT, const TargetLibraryInfo *TLI, 909 const TargetTransformInfo *TTI, AssumptionCache *AC, 910 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 911 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 912 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 913 GeneratedRTChecks &Checks) 914 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 915 EPI, LVL, CM, BFI, PSI, Checks) {} 916 /// Implements the interface for creating a vectorized skeleton using the 917 /// *epilogue loop* strategy (ie the second pass of vplan execution). 918 std::pair<BasicBlock *, Value *> 919 createEpilogueVectorizedLoopSkeleton() final override; 920 921 protected: 922 /// Emits an iteration count bypass check after the main vector loop has 923 /// finished to see if there are any iterations left to execute by either 924 /// the vector epilogue or the scalar epilogue. 925 BasicBlock *emitMinimumVectorEpilogueIterCountCheck( 926 BasicBlock *Bypass, 927 BasicBlock *Insert); 928 void printDebugTracesAtStart() override; 929 void printDebugTracesAtEnd() override; 930 }; 931 } // end namespace llvm 932 933 /// Look for a meaningful debug location on the instruction or it's 934 /// operands. 935 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 936 if (!I) 937 return I; 938 939 DebugLoc Empty; 940 if (I->getDebugLoc() != Empty) 941 return I; 942 943 for (Use &Op : I->operands()) { 944 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 945 if (OpInst->getDebugLoc() != Empty) 946 return OpInst; 947 } 948 949 return I; 950 } 951 952 void InnerLoopVectorizer::setDebugLocFromInst( 953 const Value *V, Optional<IRBuilderBase *> CustomBuilder) { 954 IRBuilderBase *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 955 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 956 const DILocation *DIL = Inst->getDebugLoc(); 957 958 // When a FSDiscriminator is enabled, we don't need to add the multiply 959 // factors to the discriminators. 960 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 961 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 962 // FIXME: For scalable vectors, assume vscale=1. 963 auto NewDIL = 964 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 965 if (NewDIL) 966 B->SetCurrentDebugLocation(NewDIL.getValue()); 967 else 968 LLVM_DEBUG(dbgs() 969 << "Failed to create new discriminator: " 970 << DIL->getFilename() << " Line: " << DIL->getLine()); 971 } else 972 B->SetCurrentDebugLocation(DIL); 973 } else 974 B->SetCurrentDebugLocation(DebugLoc()); 975 } 976 977 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 978 /// is passed, the message relates to that particular instruction. 979 #ifndef NDEBUG 980 static void debugVectorizationMessage(const StringRef Prefix, 981 const StringRef DebugMsg, 982 Instruction *I) { 983 dbgs() << "LV: " << Prefix << DebugMsg; 984 if (I != nullptr) 985 dbgs() << " " << *I; 986 else 987 dbgs() << '.'; 988 dbgs() << '\n'; 989 } 990 #endif 991 992 /// Create an analysis remark that explains why vectorization failed 993 /// 994 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 995 /// RemarkName is the identifier for the remark. If \p I is passed it is an 996 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 997 /// the location of the remark. \return the remark object that can be 998 /// streamed to. 999 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1000 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1001 Value *CodeRegion = TheLoop->getHeader(); 1002 DebugLoc DL = TheLoop->getStartLoc(); 1003 1004 if (I) { 1005 CodeRegion = I->getParent(); 1006 // If there is no debug location attached to the instruction, revert back to 1007 // using the loop's. 1008 if (I->getDebugLoc()) 1009 DL = I->getDebugLoc(); 1010 } 1011 1012 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1013 } 1014 1015 namespace llvm { 1016 1017 /// Return a value for Step multiplied by VF. 1018 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, 1019 int64_t Step) { 1020 assert(Ty->isIntegerTy() && "Expected an integer step"); 1021 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 1022 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1023 } 1024 1025 /// Return the runtime value for VF. 1026 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) { 1027 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1028 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1029 } 1030 1031 static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy, 1032 ElementCount VF) { 1033 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 1034 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 1035 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 1036 return B.CreateUIToFP(RuntimeVF, FTy); 1037 } 1038 1039 void reportVectorizationFailure(const StringRef DebugMsg, 1040 const StringRef OREMsg, const StringRef ORETag, 1041 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1042 Instruction *I) { 1043 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1044 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1045 ORE->emit( 1046 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1047 << "loop not vectorized: " << OREMsg); 1048 } 1049 1050 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1051 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1052 Instruction *I) { 1053 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1054 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1055 ORE->emit( 1056 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1057 << Msg); 1058 } 1059 1060 } // end namespace llvm 1061 1062 #ifndef NDEBUG 1063 /// \return string containing a file name and a line # for the given loop. 1064 static std::string getDebugLocString(const Loop *L) { 1065 std::string Result; 1066 if (L) { 1067 raw_string_ostream OS(Result); 1068 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1069 LoopDbgLoc.print(OS); 1070 else 1071 // Just print the module name. 1072 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1073 OS.flush(); 1074 } 1075 return Result; 1076 } 1077 #endif 1078 1079 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1080 const Instruction *Orig) { 1081 // If the loop was versioned with memchecks, add the corresponding no-alias 1082 // metadata. 1083 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1084 LVer->annotateInstWithNoAlias(To, Orig); 1085 } 1086 1087 void InnerLoopVectorizer::collectPoisonGeneratingRecipes( 1088 VPTransformState &State) { 1089 1090 // Collect recipes in the backward slice of `Root` that may generate a poison 1091 // value that is used after vectorization. 1092 SmallPtrSet<VPRecipeBase *, 16> Visited; 1093 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1094 SmallVector<VPRecipeBase *, 16> Worklist; 1095 Worklist.push_back(Root); 1096 1097 // Traverse the backward slice of Root through its use-def chain. 1098 while (!Worklist.empty()) { 1099 VPRecipeBase *CurRec = Worklist.back(); 1100 Worklist.pop_back(); 1101 1102 if (!Visited.insert(CurRec).second) 1103 continue; 1104 1105 // Prune search if we find another recipe generating a widen memory 1106 // instruction. Widen memory instructions involved in address computation 1107 // will lead to gather/scatter instructions, which don't need to be 1108 // handled. 1109 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) || 1110 isa<VPInterleaveRecipe>(CurRec) || 1111 isa<VPScalarIVStepsRecipe>(CurRec) || 1112 isa<VPCanonicalIVPHIRecipe>(CurRec)) 1113 continue; 1114 1115 // This recipe contributes to the address computation of a widen 1116 // load/store. Collect recipe if its underlying instruction has 1117 // poison-generating flags. 1118 Instruction *Instr = CurRec->getUnderlyingInstr(); 1119 if (Instr && Instr->hasPoisonGeneratingFlags()) 1120 State.MayGeneratePoisonRecipes.insert(CurRec); 1121 1122 // Add new definitions to the worklist. 1123 for (VPValue *operand : CurRec->operands()) 1124 if (VPDef *OpDef = operand->getDef()) 1125 Worklist.push_back(cast<VPRecipeBase>(OpDef)); 1126 } 1127 }); 1128 1129 // Traverse all the recipes in the VPlan and collect the poison-generating 1130 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1131 // VPInterleaveRecipe. 1132 auto Iter = depth_first( 1133 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry())); 1134 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1135 for (VPRecipeBase &Recipe : *VPBB) { 1136 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) { 1137 Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr(); 1138 VPDef *AddrDef = WidenRec->getAddr()->getDef(); 1139 if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr && 1140 Legal->blockNeedsPredication(UnderlyingInstr->getParent())) 1141 collectPoisonGeneratingInstrsInBackwardSlice( 1142 cast<VPRecipeBase>(AddrDef)); 1143 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1144 VPDef *AddrDef = InterleaveRec->getAddr()->getDef(); 1145 if (AddrDef) { 1146 // Check if any member of the interleave group needs predication. 1147 const InterleaveGroup<Instruction> *InterGroup = 1148 InterleaveRec->getInterleaveGroup(); 1149 bool NeedPredication = false; 1150 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1151 I < NumMembers; ++I) { 1152 Instruction *Member = InterGroup->getMember(I); 1153 if (Member) 1154 NeedPredication |= 1155 Legal->blockNeedsPredication(Member->getParent()); 1156 } 1157 1158 if (NeedPredication) 1159 collectPoisonGeneratingInstrsInBackwardSlice( 1160 cast<VPRecipeBase>(AddrDef)); 1161 } 1162 } 1163 } 1164 } 1165 } 1166 1167 void InnerLoopVectorizer::addMetadata(Instruction *To, 1168 Instruction *From) { 1169 propagateMetadata(To, From); 1170 addNewMetadata(To, From); 1171 } 1172 1173 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1174 Instruction *From) { 1175 for (Value *V : To) { 1176 if (Instruction *I = dyn_cast<Instruction>(V)) 1177 addMetadata(I, From); 1178 } 1179 } 1180 1181 PHINode *InnerLoopVectorizer::getReductionResumeValue( 1182 const RecurrenceDescriptor &RdxDesc) { 1183 auto It = ReductionResumeValues.find(&RdxDesc); 1184 assert(It != ReductionResumeValues.end() && 1185 "Expected to find a resume value for the reduction."); 1186 return It->second; 1187 } 1188 1189 namespace llvm { 1190 1191 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1192 // lowered. 1193 enum ScalarEpilogueLowering { 1194 1195 // The default: allowing scalar epilogues. 1196 CM_ScalarEpilogueAllowed, 1197 1198 // Vectorization with OptForSize: don't allow epilogues. 1199 CM_ScalarEpilogueNotAllowedOptSize, 1200 1201 // A special case of vectorisation with OptForSize: loops with a very small 1202 // trip count are considered for vectorization under OptForSize, thereby 1203 // making sure the cost of their loop body is dominant, free of runtime 1204 // guards and scalar iteration overheads. 1205 CM_ScalarEpilogueNotAllowedLowTripLoop, 1206 1207 // Loop hint predicate indicating an epilogue is undesired. 1208 CM_ScalarEpilogueNotNeededUsePredicate, 1209 1210 // Directive indicating we must either tail fold or not vectorize 1211 CM_ScalarEpilogueNotAllowedUsePredicate 1212 }; 1213 1214 /// ElementCountComparator creates a total ordering for ElementCount 1215 /// for the purposes of using it in a set structure. 1216 struct ElementCountComparator { 1217 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1218 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1219 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1220 } 1221 }; 1222 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1223 1224 /// LoopVectorizationCostModel - estimates the expected speedups due to 1225 /// vectorization. 1226 /// In many cases vectorization is not profitable. This can happen because of 1227 /// a number of reasons. In this class we mainly attempt to predict the 1228 /// expected speedup/slowdowns due to the supported instruction set. We use the 1229 /// TargetTransformInfo to query the different backends for the cost of 1230 /// different operations. 1231 class LoopVectorizationCostModel { 1232 public: 1233 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1234 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1235 LoopVectorizationLegality *Legal, 1236 const TargetTransformInfo &TTI, 1237 const TargetLibraryInfo *TLI, DemandedBits *DB, 1238 AssumptionCache *AC, 1239 OptimizationRemarkEmitter *ORE, const Function *F, 1240 const LoopVectorizeHints *Hints, 1241 InterleavedAccessInfo &IAI) 1242 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1243 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1244 Hints(Hints), InterleaveInfo(IAI) {} 1245 1246 /// \return An upper bound for the vectorization factors (both fixed and 1247 /// scalable). If the factors are 0, vectorization and interleaving should be 1248 /// avoided up front. 1249 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1250 1251 /// \return True if runtime checks are required for vectorization, and false 1252 /// otherwise. 1253 bool runtimeChecksRequired(); 1254 1255 /// \return The most profitable vectorization factor and the cost of that VF. 1256 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1257 /// then this vectorization factor will be selected if vectorization is 1258 /// possible. 1259 VectorizationFactor 1260 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1261 1262 VectorizationFactor 1263 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1264 const LoopVectorizationPlanner &LVP); 1265 1266 /// Setup cost-based decisions for user vectorization factor. 1267 /// \return true if the UserVF is a feasible VF to be chosen. 1268 bool selectUserVectorizationFactor(ElementCount UserVF) { 1269 collectUniformsAndScalars(UserVF); 1270 collectInstsToScalarize(UserVF); 1271 return expectedCost(UserVF).first.isValid(); 1272 } 1273 1274 /// \return The size (in bits) of the smallest and widest types in the code 1275 /// that needs to be vectorized. We ignore values that remain scalar such as 1276 /// 64 bit loop indices. 1277 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1278 1279 /// \return The desired interleave count. 1280 /// If interleave count has been specified by metadata it will be returned. 1281 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1282 /// are the selected vectorization factor and the cost of the selected VF. 1283 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1284 1285 /// Memory access instruction may be vectorized in more than one way. 1286 /// Form of instruction after vectorization depends on cost. 1287 /// This function takes cost-based decisions for Load/Store instructions 1288 /// and collects them in a map. This decisions map is used for building 1289 /// the lists of loop-uniform and loop-scalar instructions. 1290 /// The calculated cost is saved with widening decision in order to 1291 /// avoid redundant calculations. 1292 void setCostBasedWideningDecision(ElementCount VF); 1293 1294 /// A struct that represents some properties of the register usage 1295 /// of a loop. 1296 struct RegisterUsage { 1297 /// Holds the number of loop invariant values that are used in the loop. 1298 /// The key is ClassID of target-provided register class. 1299 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1300 /// Holds the maximum number of concurrent live intervals in the loop. 1301 /// The key is ClassID of target-provided register class. 1302 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1303 }; 1304 1305 /// \return Returns information about the register usages of the loop for the 1306 /// given vectorization factors. 1307 SmallVector<RegisterUsage, 8> 1308 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1309 1310 /// Collect values we want to ignore in the cost model. 1311 void collectValuesToIgnore(); 1312 1313 /// Collect all element types in the loop for which widening is needed. 1314 void collectElementTypesForWidening(); 1315 1316 /// Split reductions into those that happen in the loop, and those that happen 1317 /// outside. In loop reductions are collected into InLoopReductionChains. 1318 void collectInLoopReductions(); 1319 1320 /// Returns true if we should use strict in-order reductions for the given 1321 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1322 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1323 /// of FP operations. 1324 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1325 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1326 } 1327 1328 /// \returns The smallest bitwidth each instruction can be represented with. 1329 /// The vector equivalents of these instructions should be truncated to this 1330 /// type. 1331 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1332 return MinBWs; 1333 } 1334 1335 /// \returns True if it is more profitable to scalarize instruction \p I for 1336 /// vectorization factor \p VF. 1337 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1338 assert(VF.isVector() && 1339 "Profitable to scalarize relevant only for VF > 1."); 1340 1341 // Cost model is not run in the VPlan-native path - return conservative 1342 // result until this changes. 1343 if (EnableVPlanNativePath) 1344 return false; 1345 1346 auto Scalars = InstsToScalarize.find(VF); 1347 assert(Scalars != InstsToScalarize.end() && 1348 "VF not yet analyzed for scalarization profitability"); 1349 return Scalars->second.find(I) != Scalars->second.end(); 1350 } 1351 1352 /// Returns true if \p I is known to be uniform after vectorization. 1353 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1354 if (VF.isScalar()) 1355 return true; 1356 1357 // Cost model is not run in the VPlan-native path - return conservative 1358 // result until this changes. 1359 if (EnableVPlanNativePath) 1360 return false; 1361 1362 auto UniformsPerVF = Uniforms.find(VF); 1363 assert(UniformsPerVF != Uniforms.end() && 1364 "VF not yet analyzed for uniformity"); 1365 return UniformsPerVF->second.count(I); 1366 } 1367 1368 /// Returns true if \p I is known to be scalar after vectorization. 1369 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1370 if (VF.isScalar()) 1371 return true; 1372 1373 // Cost model is not run in the VPlan-native path - return conservative 1374 // result until this changes. 1375 if (EnableVPlanNativePath) 1376 return false; 1377 1378 auto ScalarsPerVF = Scalars.find(VF); 1379 assert(ScalarsPerVF != Scalars.end() && 1380 "Scalar values are not calculated for VF"); 1381 return ScalarsPerVF->second.count(I); 1382 } 1383 1384 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1385 /// for vectorization factor \p VF. 1386 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1387 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1388 !isProfitableToScalarize(I, VF) && 1389 !isScalarAfterVectorization(I, VF); 1390 } 1391 1392 /// Decision that was taken during cost calculation for memory instruction. 1393 enum InstWidening { 1394 CM_Unknown, 1395 CM_Widen, // For consecutive accesses with stride +1. 1396 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1397 CM_Interleave, 1398 CM_GatherScatter, 1399 CM_Scalarize 1400 }; 1401 1402 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1403 /// instruction \p I and vector width \p VF. 1404 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1405 InstructionCost Cost) { 1406 assert(VF.isVector() && "Expected VF >=2"); 1407 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1408 } 1409 1410 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1411 /// interleaving group \p Grp and vector width \p VF. 1412 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1413 ElementCount VF, InstWidening W, 1414 InstructionCost Cost) { 1415 assert(VF.isVector() && "Expected VF >=2"); 1416 /// Broadcast this decicion to all instructions inside the group. 1417 /// But the cost will be assigned to one instruction only. 1418 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1419 if (auto *I = Grp->getMember(i)) { 1420 if (Grp->getInsertPos() == I) 1421 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1422 else 1423 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1424 } 1425 } 1426 } 1427 1428 /// Return the cost model decision for the given instruction \p I and vector 1429 /// width \p VF. Return CM_Unknown if this instruction did not pass 1430 /// through the cost modeling. 1431 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1432 assert(VF.isVector() && "Expected VF to be a vector VF"); 1433 // Cost model is not run in the VPlan-native path - return conservative 1434 // result until this changes. 1435 if (EnableVPlanNativePath) 1436 return CM_GatherScatter; 1437 1438 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1439 auto Itr = WideningDecisions.find(InstOnVF); 1440 if (Itr == WideningDecisions.end()) 1441 return CM_Unknown; 1442 return Itr->second.first; 1443 } 1444 1445 /// Return the vectorization cost for the given instruction \p I and vector 1446 /// width \p VF. 1447 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1448 assert(VF.isVector() && "Expected VF >=2"); 1449 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1450 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1451 "The cost is not calculated"); 1452 return WideningDecisions[InstOnVF].second; 1453 } 1454 1455 /// Return True if instruction \p I is an optimizable truncate whose operand 1456 /// is an induction variable. Such a truncate will be removed by adding a new 1457 /// induction variable with the destination type. 1458 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1459 // If the instruction is not a truncate, return false. 1460 auto *Trunc = dyn_cast<TruncInst>(I); 1461 if (!Trunc) 1462 return false; 1463 1464 // Get the source and destination types of the truncate. 1465 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1466 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1467 1468 // If the truncate is free for the given types, return false. Replacing a 1469 // free truncate with an induction variable would add an induction variable 1470 // update instruction to each iteration of the loop. We exclude from this 1471 // check the primary induction variable since it will need an update 1472 // instruction regardless. 1473 Value *Op = Trunc->getOperand(0); 1474 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1475 return false; 1476 1477 // If the truncated value is not an induction variable, return false. 1478 return Legal->isInductionPhi(Op); 1479 } 1480 1481 /// Collects the instructions to scalarize for each predicated instruction in 1482 /// the loop. 1483 void collectInstsToScalarize(ElementCount VF); 1484 1485 /// Collect Uniform and Scalar values for the given \p VF. 1486 /// The sets depend on CM decision for Load/Store instructions 1487 /// that may be vectorized as interleave, gather-scatter or scalarized. 1488 void collectUniformsAndScalars(ElementCount VF) { 1489 // Do the analysis once. 1490 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1491 return; 1492 setCostBasedWideningDecision(VF); 1493 collectLoopUniforms(VF); 1494 collectLoopScalars(VF); 1495 } 1496 1497 /// Returns true if the target machine supports masked store operation 1498 /// for the given \p DataType and kind of access to \p Ptr. 1499 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1500 return Legal->isConsecutivePtr(DataType, Ptr) && 1501 TTI.isLegalMaskedStore(DataType, Alignment); 1502 } 1503 1504 /// Returns true if the target machine supports masked load operation 1505 /// for the given \p DataType and kind of access to \p Ptr. 1506 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1507 return Legal->isConsecutivePtr(DataType, Ptr) && 1508 TTI.isLegalMaskedLoad(DataType, Alignment); 1509 } 1510 1511 /// Returns true if the target machine can represent \p V as a masked gather 1512 /// or scatter operation. 1513 bool isLegalGatherOrScatter(Value *V, 1514 ElementCount VF = ElementCount::getFixed(1)) { 1515 bool LI = isa<LoadInst>(V); 1516 bool SI = isa<StoreInst>(V); 1517 if (!LI && !SI) 1518 return false; 1519 auto *Ty = getLoadStoreType(V); 1520 Align Align = getLoadStoreAlignment(V); 1521 if (VF.isVector()) 1522 Ty = VectorType::get(Ty, VF); 1523 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1524 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1525 } 1526 1527 /// Returns true if the target machine supports all of the reduction 1528 /// variables found for the given VF. 1529 bool canVectorizeReductions(ElementCount VF) const { 1530 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1531 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1532 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1533 })); 1534 } 1535 1536 /// Returns true if \p I is an instruction that will be scalarized with 1537 /// predication when vectorizing \p I with vectorization factor \p VF. Such 1538 /// instructions include conditional stores and instructions that may divide 1539 /// by zero. 1540 bool isScalarWithPredication(Instruction *I, ElementCount VF) const; 1541 1542 // Returns true if \p I is an instruction that will be predicated either 1543 // through scalar predication or masked load/store or masked gather/scatter. 1544 // \p VF is the vectorization factor that will be used to vectorize \p I. 1545 // Superset of instructions that return true for isScalarWithPredication. 1546 bool isPredicatedInst(Instruction *I, ElementCount VF, 1547 bool IsKnownUniform = false) { 1548 // When we know the load is uniform and the original scalar loop was not 1549 // predicated we don't need to mark it as a predicated instruction. Any 1550 // vectorised blocks created when tail-folding are something artificial we 1551 // have introduced and we know there is always at least one active lane. 1552 // That's why we call Legal->blockNeedsPredication here because it doesn't 1553 // query tail-folding. 1554 if (IsKnownUniform && isa<LoadInst>(I) && 1555 !Legal->blockNeedsPredication(I->getParent())) 1556 return false; 1557 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1558 return false; 1559 // Loads and stores that need some form of masked operation are predicated 1560 // instructions. 1561 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1562 return Legal->isMaskRequired(I); 1563 return isScalarWithPredication(I, VF); 1564 } 1565 1566 /// Returns true if \p I is a memory instruction with consecutive memory 1567 /// access that can be widened. 1568 bool 1569 memoryInstructionCanBeWidened(Instruction *I, 1570 ElementCount VF = ElementCount::getFixed(1)); 1571 1572 /// Returns true if \p I is a memory instruction in an interleaved-group 1573 /// of memory accesses that can be vectorized with wide vector loads/stores 1574 /// and shuffles. 1575 bool 1576 interleavedAccessCanBeWidened(Instruction *I, 1577 ElementCount VF = ElementCount::getFixed(1)); 1578 1579 /// Check if \p Instr belongs to any interleaved access group. 1580 bool isAccessInterleaved(Instruction *Instr) { 1581 return InterleaveInfo.isInterleaved(Instr); 1582 } 1583 1584 /// Get the interleaved access group that \p Instr belongs to. 1585 const InterleaveGroup<Instruction> * 1586 getInterleavedAccessGroup(Instruction *Instr) { 1587 return InterleaveInfo.getInterleaveGroup(Instr); 1588 } 1589 1590 /// Returns true if we're required to use a scalar epilogue for at least 1591 /// the final iteration of the original loop. 1592 bool requiresScalarEpilogue(ElementCount VF) const { 1593 if (!isScalarEpilogueAllowed()) 1594 return false; 1595 // If we might exit from anywhere but the latch, must run the exiting 1596 // iteration in scalar form. 1597 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1598 return true; 1599 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1600 } 1601 1602 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1603 /// loop hint annotation. 1604 bool isScalarEpilogueAllowed() const { 1605 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1606 } 1607 1608 /// Returns true if all loop blocks should be masked to fold tail loop. 1609 bool foldTailByMasking() const { return FoldTailByMasking; } 1610 1611 /// Returns true if the instructions in this block requires predication 1612 /// for any reason, e.g. because tail folding now requires a predicate 1613 /// or because the block in the original loop was predicated. 1614 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1615 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1616 } 1617 1618 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1619 /// nodes to the chain of instructions representing the reductions. Uses a 1620 /// MapVector to ensure deterministic iteration order. 1621 using ReductionChainMap = 1622 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1623 1624 /// Return the chain of instructions representing an inloop reduction. 1625 const ReductionChainMap &getInLoopReductionChains() const { 1626 return InLoopReductionChains; 1627 } 1628 1629 /// Returns true if the Phi is part of an inloop reduction. 1630 bool isInLoopReduction(PHINode *Phi) const { 1631 return InLoopReductionChains.count(Phi); 1632 } 1633 1634 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1635 /// with factor VF. Return the cost of the instruction, including 1636 /// scalarization overhead if it's needed. 1637 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1638 1639 /// Estimate cost of a call instruction CI if it were vectorized with factor 1640 /// VF. Return the cost of the instruction, including scalarization overhead 1641 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1642 /// scalarized - 1643 /// i.e. either vector version isn't available, or is too expensive. 1644 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1645 bool &NeedToScalarize) const; 1646 1647 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1648 /// that of B. 1649 bool isMoreProfitable(const VectorizationFactor &A, 1650 const VectorizationFactor &B) const; 1651 1652 /// Invalidates decisions already taken by the cost model. 1653 void invalidateCostModelingDecisions() { 1654 WideningDecisions.clear(); 1655 Uniforms.clear(); 1656 Scalars.clear(); 1657 } 1658 1659 private: 1660 unsigned NumPredStores = 0; 1661 1662 /// Convenience function that returns the value of vscale_range iff 1663 /// vscale_range.min == vscale_range.max or otherwise returns the value 1664 /// returned by the corresponding TLI method. 1665 Optional<unsigned> getVScaleForTuning() const; 1666 1667 /// \return An upper bound for the vectorization factors for both 1668 /// fixed and scalable vectorization, where the minimum-known number of 1669 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1670 /// disabled or unsupported, then the scalable part will be equal to 1671 /// ElementCount::getScalable(0). 1672 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1673 ElementCount UserVF, 1674 bool FoldTailByMasking); 1675 1676 /// \return the maximized element count based on the targets vector 1677 /// registers and the loop trip-count, but limited to a maximum safe VF. 1678 /// This is a helper function of computeFeasibleMaxVF. 1679 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1680 /// issue that occurred on one of the buildbots which cannot be reproduced 1681 /// without having access to the properietary compiler (see comments on 1682 /// D98509). The issue is currently under investigation and this workaround 1683 /// will be removed as soon as possible. 1684 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1685 unsigned SmallestType, 1686 unsigned WidestType, 1687 const ElementCount &MaxSafeVF, 1688 bool FoldTailByMasking); 1689 1690 /// \return the maximum legal scalable VF, based on the safe max number 1691 /// of elements. 1692 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1693 1694 /// The vectorization cost is a combination of the cost itself and a boolean 1695 /// indicating whether any of the contributing operations will actually 1696 /// operate on vector values after type legalization in the backend. If this 1697 /// latter value is false, then all operations will be scalarized (i.e. no 1698 /// vectorization has actually taken place). 1699 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1700 1701 /// Returns the expected execution cost. The unit of the cost does 1702 /// not matter because we use the 'cost' units to compare different 1703 /// vector widths. The cost that is returned is *not* normalized by 1704 /// the factor width. If \p Invalid is not nullptr, this function 1705 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1706 /// each instruction that has an Invalid cost for the given VF. 1707 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1708 VectorizationCostTy 1709 expectedCost(ElementCount VF, 1710 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1711 1712 /// Returns the execution time cost of an instruction for a given vector 1713 /// width. Vector width of one means scalar. 1714 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1715 1716 /// The cost-computation logic from getInstructionCost which provides 1717 /// the vector type as an output parameter. 1718 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1719 Type *&VectorTy); 1720 1721 /// Return the cost of instructions in an inloop reduction pattern, if I is 1722 /// part of that pattern. 1723 Optional<InstructionCost> 1724 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1725 TTI::TargetCostKind CostKind); 1726 1727 /// Calculate vectorization cost of memory instruction \p I. 1728 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1729 1730 /// The cost computation for scalarized memory instruction. 1731 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1732 1733 /// The cost computation for interleaving group of memory instructions. 1734 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1735 1736 /// The cost computation for Gather/Scatter instruction. 1737 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1738 1739 /// The cost computation for widening instruction \p I with consecutive 1740 /// memory access. 1741 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1742 1743 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1744 /// Load: scalar load + broadcast. 1745 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1746 /// element) 1747 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1748 1749 /// Estimate the overhead of scalarizing an instruction. This is a 1750 /// convenience wrapper for the type-based getScalarizationOverhead API. 1751 InstructionCost getScalarizationOverhead(Instruction *I, 1752 ElementCount VF) const; 1753 1754 /// Returns whether the instruction is a load or store and will be a emitted 1755 /// as a vector operation. 1756 bool isConsecutiveLoadOrStore(Instruction *I); 1757 1758 /// Returns true if an artificially high cost for emulated masked memrefs 1759 /// should be used. 1760 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF); 1761 1762 /// Map of scalar integer values to the smallest bitwidth they can be legally 1763 /// represented as. The vector equivalents of these values should be truncated 1764 /// to this type. 1765 MapVector<Instruction *, uint64_t> MinBWs; 1766 1767 /// A type representing the costs for instructions if they were to be 1768 /// scalarized rather than vectorized. The entries are Instruction-Cost 1769 /// pairs. 1770 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1771 1772 /// A set containing all BasicBlocks that are known to present after 1773 /// vectorization as a predicated block. 1774 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1775 1776 /// Records whether it is allowed to have the original scalar loop execute at 1777 /// least once. This may be needed as a fallback loop in case runtime 1778 /// aliasing/dependence checks fail, or to handle the tail/remainder 1779 /// iterations when the trip count is unknown or doesn't divide by the VF, 1780 /// or as a peel-loop to handle gaps in interleave-groups. 1781 /// Under optsize and when the trip count is very small we don't allow any 1782 /// iterations to execute in the scalar loop. 1783 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1784 1785 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1786 bool FoldTailByMasking = false; 1787 1788 /// A map holding scalar costs for different vectorization factors. The 1789 /// presence of a cost for an instruction in the mapping indicates that the 1790 /// instruction will be scalarized when vectorizing with the associated 1791 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1792 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1793 1794 /// Holds the instructions known to be uniform after vectorization. 1795 /// The data is collected per VF. 1796 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1797 1798 /// Holds the instructions known to be scalar after vectorization. 1799 /// The data is collected per VF. 1800 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1801 1802 /// Holds the instructions (address computations) that are forced to be 1803 /// scalarized. 1804 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1805 1806 /// PHINodes of the reductions that should be expanded in-loop along with 1807 /// their associated chains of reduction operations, in program order from top 1808 /// (PHI) to bottom 1809 ReductionChainMap InLoopReductionChains; 1810 1811 /// A Map of inloop reduction operations and their immediate chain operand. 1812 /// FIXME: This can be removed once reductions can be costed correctly in 1813 /// vplan. This was added to allow quick lookup to the inloop operations, 1814 /// without having to loop through InLoopReductionChains. 1815 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1816 1817 /// Returns the expected difference in cost from scalarizing the expression 1818 /// feeding a predicated instruction \p PredInst. The instructions to 1819 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1820 /// non-negative return value implies the expression will be scalarized. 1821 /// Currently, only single-use chains are considered for scalarization. 1822 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1823 ElementCount VF); 1824 1825 /// Collect the instructions that are uniform after vectorization. An 1826 /// instruction is uniform if we represent it with a single scalar value in 1827 /// the vectorized loop corresponding to each vector iteration. Examples of 1828 /// uniform instructions include pointer operands of consecutive or 1829 /// interleaved memory accesses. Note that although uniformity implies an 1830 /// instruction will be scalar, the reverse is not true. In general, a 1831 /// scalarized instruction will be represented by VF scalar values in the 1832 /// vectorized loop, each corresponding to an iteration of the original 1833 /// scalar loop. 1834 void collectLoopUniforms(ElementCount VF); 1835 1836 /// Collect the instructions that are scalar after vectorization. An 1837 /// instruction is scalar if it is known to be uniform or will be scalarized 1838 /// during vectorization. collectLoopScalars should only add non-uniform nodes 1839 /// to the list if they are used by a load/store instruction that is marked as 1840 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by 1841 /// VF values in the vectorized loop, each corresponding to an iteration of 1842 /// the original scalar loop. 1843 void collectLoopScalars(ElementCount VF); 1844 1845 /// Keeps cost model vectorization decision and cost for instructions. 1846 /// Right now it is used for memory instructions only. 1847 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1848 std::pair<InstWidening, InstructionCost>>; 1849 1850 DecisionList WideningDecisions; 1851 1852 /// Returns true if \p V is expected to be vectorized and it needs to be 1853 /// extracted. 1854 bool needsExtract(Value *V, ElementCount VF) const { 1855 Instruction *I = dyn_cast<Instruction>(V); 1856 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1857 TheLoop->isLoopInvariant(I)) 1858 return false; 1859 1860 // Assume we can vectorize V (and hence we need extraction) if the 1861 // scalars are not computed yet. This can happen, because it is called 1862 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1863 // the scalars are collected. That should be a safe assumption in most 1864 // cases, because we check if the operands have vectorizable types 1865 // beforehand in LoopVectorizationLegality. 1866 return Scalars.find(VF) == Scalars.end() || 1867 !isScalarAfterVectorization(I, VF); 1868 }; 1869 1870 /// Returns a range containing only operands needing to be extracted. 1871 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1872 ElementCount VF) const { 1873 return SmallVector<Value *, 4>(make_filter_range( 1874 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1875 } 1876 1877 /// Determines if we have the infrastructure to vectorize loop \p L and its 1878 /// epilogue, assuming the main loop is vectorized by \p VF. 1879 bool isCandidateForEpilogueVectorization(const Loop &L, 1880 const ElementCount VF) const; 1881 1882 /// Returns true if epilogue vectorization is considered profitable, and 1883 /// false otherwise. 1884 /// \p VF is the vectorization factor chosen for the original loop. 1885 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1886 1887 public: 1888 /// The loop that we evaluate. 1889 Loop *TheLoop; 1890 1891 /// Predicated scalar evolution analysis. 1892 PredicatedScalarEvolution &PSE; 1893 1894 /// Loop Info analysis. 1895 LoopInfo *LI; 1896 1897 /// Vectorization legality. 1898 LoopVectorizationLegality *Legal; 1899 1900 /// Vector target information. 1901 const TargetTransformInfo &TTI; 1902 1903 /// Target Library Info. 1904 const TargetLibraryInfo *TLI; 1905 1906 /// Demanded bits analysis. 1907 DemandedBits *DB; 1908 1909 /// Assumption cache. 1910 AssumptionCache *AC; 1911 1912 /// Interface to emit optimization remarks. 1913 OptimizationRemarkEmitter *ORE; 1914 1915 const Function *TheFunction; 1916 1917 /// Loop Vectorize Hint. 1918 const LoopVectorizeHints *Hints; 1919 1920 /// The interleave access information contains groups of interleaved accesses 1921 /// with the same stride and close to each other. 1922 InterleavedAccessInfo &InterleaveInfo; 1923 1924 /// Values to ignore in the cost model. 1925 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1926 1927 /// Values to ignore in the cost model when VF > 1. 1928 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1929 1930 /// All element types found in the loop. 1931 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1932 1933 /// Profitable vector factors. 1934 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1935 }; 1936 } // end namespace llvm 1937 1938 /// Helper struct to manage generating runtime checks for vectorization. 1939 /// 1940 /// The runtime checks are created up-front in temporary blocks to allow better 1941 /// estimating the cost and un-linked from the existing IR. After deciding to 1942 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1943 /// temporary blocks are completely removed. 1944 class GeneratedRTChecks { 1945 /// Basic block which contains the generated SCEV checks, if any. 1946 BasicBlock *SCEVCheckBlock = nullptr; 1947 1948 /// The value representing the result of the generated SCEV checks. If it is 1949 /// nullptr, either no SCEV checks have been generated or they have been used. 1950 Value *SCEVCheckCond = nullptr; 1951 1952 /// Basic block which contains the generated memory runtime checks, if any. 1953 BasicBlock *MemCheckBlock = nullptr; 1954 1955 /// The value representing the result of the generated memory runtime checks. 1956 /// If it is nullptr, either no memory runtime checks have been generated or 1957 /// they have been used. 1958 Value *MemRuntimeCheckCond = nullptr; 1959 1960 DominatorTree *DT; 1961 LoopInfo *LI; 1962 1963 SCEVExpander SCEVExp; 1964 SCEVExpander MemCheckExp; 1965 1966 public: 1967 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1968 const DataLayout &DL) 1969 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1970 MemCheckExp(SE, DL, "scev.check") {} 1971 1972 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1973 /// accurately estimate the cost of the runtime checks. The blocks are 1974 /// un-linked from the IR and is added back during vector code generation. If 1975 /// there is no vector code generation, the check blocks are removed 1976 /// completely. 1977 void Create(Loop *L, const LoopAccessInfo &LAI, 1978 const SCEVPredicate &Pred) { 1979 1980 BasicBlock *LoopHeader = L->getHeader(); 1981 BasicBlock *Preheader = L->getLoopPreheader(); 1982 1983 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1984 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1985 // may be used by SCEVExpander. The blocks will be un-linked from their 1986 // predecessors and removed from LI & DT at the end of the function. 1987 if (!Pred.isAlwaysTrue()) { 1988 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1989 nullptr, "vector.scevcheck"); 1990 1991 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1992 &Pred, SCEVCheckBlock->getTerminator()); 1993 } 1994 1995 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1996 if (RtPtrChecking.Need) { 1997 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1998 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1999 "vector.memcheck"); 2000 2001 MemRuntimeCheckCond = 2002 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 2003 RtPtrChecking.getChecks(), MemCheckExp); 2004 assert(MemRuntimeCheckCond && 2005 "no RT checks generated although RtPtrChecking " 2006 "claimed checks are required"); 2007 } 2008 2009 if (!MemCheckBlock && !SCEVCheckBlock) 2010 return; 2011 2012 // Unhook the temporary block with the checks, update various places 2013 // accordingly. 2014 if (SCEVCheckBlock) 2015 SCEVCheckBlock->replaceAllUsesWith(Preheader); 2016 if (MemCheckBlock) 2017 MemCheckBlock->replaceAllUsesWith(Preheader); 2018 2019 if (SCEVCheckBlock) { 2020 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2021 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 2022 Preheader->getTerminator()->eraseFromParent(); 2023 } 2024 if (MemCheckBlock) { 2025 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2026 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2027 Preheader->getTerminator()->eraseFromParent(); 2028 } 2029 2030 DT->changeImmediateDominator(LoopHeader, Preheader); 2031 if (MemCheckBlock) { 2032 DT->eraseNode(MemCheckBlock); 2033 LI->removeBlock(MemCheckBlock); 2034 } 2035 if (SCEVCheckBlock) { 2036 DT->eraseNode(SCEVCheckBlock); 2037 LI->removeBlock(SCEVCheckBlock); 2038 } 2039 } 2040 2041 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2042 /// unused. 2043 ~GeneratedRTChecks() { 2044 SCEVExpanderCleaner SCEVCleaner(SCEVExp); 2045 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp); 2046 if (!SCEVCheckCond) 2047 SCEVCleaner.markResultUsed(); 2048 2049 if (!MemRuntimeCheckCond) 2050 MemCheckCleaner.markResultUsed(); 2051 2052 if (MemRuntimeCheckCond) { 2053 auto &SE = *MemCheckExp.getSE(); 2054 // Memory runtime check generation creates compares that use expanded 2055 // values. Remove them before running the SCEVExpanderCleaners. 2056 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2057 if (MemCheckExp.isInsertedInstruction(&I)) 2058 continue; 2059 SE.forgetValue(&I); 2060 I.eraseFromParent(); 2061 } 2062 } 2063 MemCheckCleaner.cleanup(); 2064 SCEVCleaner.cleanup(); 2065 2066 if (SCEVCheckCond) 2067 SCEVCheckBlock->eraseFromParent(); 2068 if (MemRuntimeCheckCond) 2069 MemCheckBlock->eraseFromParent(); 2070 } 2071 2072 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2073 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2074 /// depending on the generated condition. 2075 BasicBlock *emitSCEVChecks(BasicBlock *Bypass, 2076 BasicBlock *LoopVectorPreHeader, 2077 BasicBlock *LoopExitBlock) { 2078 if (!SCEVCheckCond) 2079 return nullptr; 2080 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2081 if (C->isZero()) 2082 return nullptr; 2083 2084 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2085 2086 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2087 // Create new preheader for vector loop. 2088 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2089 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2090 2091 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2092 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2093 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2094 SCEVCheckBlock); 2095 2096 DT->addNewBlock(SCEVCheckBlock, Pred); 2097 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2098 2099 ReplaceInstWithInst( 2100 SCEVCheckBlock->getTerminator(), 2101 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2102 // Mark the check as used, to prevent it from being removed during cleanup. 2103 SCEVCheckCond = nullptr; 2104 return SCEVCheckBlock; 2105 } 2106 2107 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2108 /// the branches to branch to the vector preheader or \p Bypass, depending on 2109 /// the generated condition. 2110 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass, 2111 BasicBlock *LoopVectorPreHeader) { 2112 // Check if we generated code that checks in runtime if arrays overlap. 2113 if (!MemRuntimeCheckCond) 2114 return nullptr; 2115 2116 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2117 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2118 MemCheckBlock); 2119 2120 DT->addNewBlock(MemCheckBlock, Pred); 2121 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2122 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2123 2124 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2125 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2126 2127 ReplaceInstWithInst( 2128 MemCheckBlock->getTerminator(), 2129 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2130 MemCheckBlock->getTerminator()->setDebugLoc( 2131 Pred->getTerminator()->getDebugLoc()); 2132 2133 // Mark the check as used, to prevent it from being removed during cleanup. 2134 MemRuntimeCheckCond = nullptr; 2135 return MemCheckBlock; 2136 } 2137 }; 2138 2139 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2140 // vectorization. The loop needs to be annotated with #pragma omp simd 2141 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2142 // vector length information is not provided, vectorization is not considered 2143 // explicit. Interleave hints are not allowed either. These limitations will be 2144 // relaxed in the future. 2145 // Please, note that we are currently forced to abuse the pragma 'clang 2146 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2147 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2148 // provides *explicit vectorization hints* (LV can bypass legal checks and 2149 // assume that vectorization is legal). However, both hints are implemented 2150 // using the same metadata (llvm.loop.vectorize, processed by 2151 // LoopVectorizeHints). This will be fixed in the future when the native IR 2152 // representation for pragma 'omp simd' is introduced. 2153 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2154 OptimizationRemarkEmitter *ORE) { 2155 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2156 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2157 2158 // Only outer loops with an explicit vectorization hint are supported. 2159 // Unannotated outer loops are ignored. 2160 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2161 return false; 2162 2163 Function *Fn = OuterLp->getHeader()->getParent(); 2164 if (!Hints.allowVectorization(Fn, OuterLp, 2165 true /*VectorizeOnlyWhenForced*/)) { 2166 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2167 return false; 2168 } 2169 2170 if (Hints.getInterleave() > 1) { 2171 // TODO: Interleave support is future work. 2172 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2173 "outer loops.\n"); 2174 Hints.emitRemarkWithHints(); 2175 return false; 2176 } 2177 2178 return true; 2179 } 2180 2181 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2182 OptimizationRemarkEmitter *ORE, 2183 SmallVectorImpl<Loop *> &V) { 2184 // Collect inner loops and outer loops without irreducible control flow. For 2185 // now, only collect outer loops that have explicit vectorization hints. If we 2186 // are stress testing the VPlan H-CFG construction, we collect the outermost 2187 // loop of every loop nest. 2188 if (L.isInnermost() || VPlanBuildStressTest || 2189 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2190 LoopBlocksRPO RPOT(&L); 2191 RPOT.perform(LI); 2192 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2193 V.push_back(&L); 2194 // TODO: Collect inner loops inside marked outer loops in case 2195 // vectorization fails for the outer loop. Do not invoke 2196 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2197 // already known to be reducible. We can use an inherited attribute for 2198 // that. 2199 return; 2200 } 2201 } 2202 for (Loop *InnerL : L) 2203 collectSupportedLoops(*InnerL, LI, ORE, V); 2204 } 2205 2206 namespace { 2207 2208 /// The LoopVectorize Pass. 2209 struct LoopVectorize : public FunctionPass { 2210 /// Pass identification, replacement for typeid 2211 static char ID; 2212 2213 LoopVectorizePass Impl; 2214 2215 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2216 bool VectorizeOnlyWhenForced = false) 2217 : FunctionPass(ID), 2218 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2219 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2220 } 2221 2222 bool runOnFunction(Function &F) override { 2223 if (skipFunction(F)) 2224 return false; 2225 2226 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2227 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2228 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2229 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2230 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2231 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2232 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2233 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2234 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2235 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2236 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2237 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2238 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2239 2240 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2241 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2242 2243 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2244 GetLAA, *ORE, PSI).MadeAnyChange; 2245 } 2246 2247 void getAnalysisUsage(AnalysisUsage &AU) const override { 2248 AU.addRequired<AssumptionCacheTracker>(); 2249 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2250 AU.addRequired<DominatorTreeWrapperPass>(); 2251 AU.addRequired<LoopInfoWrapperPass>(); 2252 AU.addRequired<ScalarEvolutionWrapperPass>(); 2253 AU.addRequired<TargetTransformInfoWrapperPass>(); 2254 AU.addRequired<AAResultsWrapperPass>(); 2255 AU.addRequired<LoopAccessLegacyAnalysis>(); 2256 AU.addRequired<DemandedBitsWrapperPass>(); 2257 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2258 AU.addRequired<InjectTLIMappingsLegacy>(); 2259 2260 // We currently do not preserve loopinfo/dominator analyses with outer loop 2261 // vectorization. Until this is addressed, mark these analyses as preserved 2262 // only for non-VPlan-native path. 2263 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2264 if (!EnableVPlanNativePath) { 2265 AU.addPreserved<LoopInfoWrapperPass>(); 2266 AU.addPreserved<DominatorTreeWrapperPass>(); 2267 } 2268 2269 AU.addPreserved<BasicAAWrapperPass>(); 2270 AU.addPreserved<GlobalsAAWrapperPass>(); 2271 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2272 } 2273 }; 2274 2275 } // end anonymous namespace 2276 2277 //===----------------------------------------------------------------------===// 2278 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2279 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2280 //===----------------------------------------------------------------------===// 2281 2282 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2283 // We need to place the broadcast of invariant variables outside the loop, 2284 // but only if it's proven safe to do so. Else, broadcast will be inside 2285 // vector loop body. 2286 Instruction *Instr = dyn_cast<Instruction>(V); 2287 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2288 (!Instr || 2289 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2290 // Place the code for broadcasting invariant variables in the new preheader. 2291 IRBuilder<>::InsertPointGuard Guard(Builder); 2292 if (SafeToHoist) 2293 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2294 2295 // Broadcast the scalar into all locations in the vector. 2296 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2297 2298 return Shuf; 2299 } 2300 2301 /// This function adds 2302 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 2303 /// to each vector element of Val. The sequence starts at StartIndex. 2304 /// \p Opcode is relevant for FP induction variable. 2305 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step, 2306 Instruction::BinaryOps BinOp, ElementCount VF, 2307 IRBuilderBase &Builder) { 2308 assert(VF.isVector() && "only vector VFs are supported"); 2309 2310 // Create and check the types. 2311 auto *ValVTy = cast<VectorType>(Val->getType()); 2312 ElementCount VLen = ValVTy->getElementCount(); 2313 2314 Type *STy = Val->getType()->getScalarType(); 2315 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2316 "Induction Step must be an integer or FP"); 2317 assert(Step->getType() == STy && "Step has wrong type"); 2318 2319 SmallVector<Constant *, 8> Indices; 2320 2321 // Create a vector of consecutive numbers from zero to VF. 2322 VectorType *InitVecValVTy = ValVTy; 2323 if (STy->isFloatingPointTy()) { 2324 Type *InitVecValSTy = 2325 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2326 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2327 } 2328 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2329 2330 // Splat the StartIdx 2331 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2332 2333 if (STy->isIntegerTy()) { 2334 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2335 Step = Builder.CreateVectorSplat(VLen, Step); 2336 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2337 // FIXME: The newly created binary instructions should contain nsw/nuw 2338 // flags, which can be found from the original scalar operations. 2339 Step = Builder.CreateMul(InitVec, Step); 2340 return Builder.CreateAdd(Val, Step, "induction"); 2341 } 2342 2343 // Floating point induction. 2344 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2345 "Binary Opcode should be specified for FP induction"); 2346 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2347 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2348 2349 Step = Builder.CreateVectorSplat(VLen, Step); 2350 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2351 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2352 } 2353 2354 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 2355 /// variable on which to base the steps, \p Step is the size of the step. 2356 static void buildScalarSteps(Value *ScalarIV, Value *Step, 2357 const InductionDescriptor &ID, VPValue *Def, 2358 VPTransformState &State) { 2359 IRBuilderBase &Builder = State.Builder; 2360 // We shouldn't have to build scalar steps if we aren't vectorizing. 2361 assert(State.VF.isVector() && "VF should be greater than one"); 2362 // Get the value type and ensure it and the step have the same integer type. 2363 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2364 assert(ScalarIVTy == Step->getType() && 2365 "Val and Step should have the same type"); 2366 2367 // We build scalar steps for both integer and floating-point induction 2368 // variables. Here, we determine the kind of arithmetic we will perform. 2369 Instruction::BinaryOps AddOp; 2370 Instruction::BinaryOps MulOp; 2371 if (ScalarIVTy->isIntegerTy()) { 2372 AddOp = Instruction::Add; 2373 MulOp = Instruction::Mul; 2374 } else { 2375 AddOp = ID.getInductionOpcode(); 2376 MulOp = Instruction::FMul; 2377 } 2378 2379 // Determine the number of scalars we need to generate for each unroll 2380 // iteration. 2381 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def); 2382 unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue(); 2383 // Compute the scalar steps and save the results in State. 2384 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2385 ScalarIVTy->getScalarSizeInBits()); 2386 Type *VecIVTy = nullptr; 2387 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2388 if (!FirstLaneOnly && State.VF.isScalable()) { 2389 VecIVTy = VectorType::get(ScalarIVTy, State.VF); 2390 UnitStepVec = 2391 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF)); 2392 SplatStep = Builder.CreateVectorSplat(State.VF, Step); 2393 SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV); 2394 } 2395 2396 for (unsigned Part = 0; Part < State.UF; ++Part) { 2397 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part); 2398 2399 if (!FirstLaneOnly && State.VF.isScalable()) { 2400 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0); 2401 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2402 if (ScalarIVTy->isFloatingPointTy()) 2403 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2404 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2405 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2406 State.set(Def, Add, Part); 2407 // It's useful to record the lane values too for the known minimum number 2408 // of elements so we do those below. This improves the code quality when 2409 // trying to extract the first element, for example. 2410 } 2411 2412 if (ScalarIVTy->isFloatingPointTy()) 2413 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2414 2415 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2416 Value *StartIdx = Builder.CreateBinOp( 2417 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2418 // The step returned by `createStepForVF` is a runtime-evaluated value 2419 // when VF is scalable. Otherwise, it should be folded into a Constant. 2420 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) && 2421 "Expected StartIdx to be folded to a constant when VF is not " 2422 "scalable"); 2423 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2424 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2425 State.set(Def, Add, VPIteration(Part, Lane)); 2426 } 2427 } 2428 } 2429 2430 // Generate code for the induction step. Note that induction steps are 2431 // required to be loop-invariant 2432 static Value *CreateStepValue(const SCEV *Step, ScalarEvolution &SE, 2433 Instruction *InsertBefore, 2434 Loop *OrigLoop = nullptr) { 2435 const DataLayout &DL = SE.getDataLayout(); 2436 assert((!OrigLoop || SE.isLoopInvariant(Step, OrigLoop)) && 2437 "Induction step should be loop invariant"); 2438 if (auto *E = dyn_cast<SCEVUnknown>(Step)) 2439 return E->getValue(); 2440 2441 SCEVExpander Exp(SE, DL, "induction"); 2442 return Exp.expandCodeFor(Step, Step->getType(), InsertBefore); 2443 } 2444 2445 /// Compute the transformed value of Index at offset StartValue using step 2446 /// StepValue. 2447 /// For integer induction, returns StartValue + Index * StepValue. 2448 /// For pointer induction, returns StartValue[Index * StepValue]. 2449 /// FIXME: The newly created binary instructions should contain nsw/nuw 2450 /// flags, which can be found from the original scalar operations. 2451 static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index, 2452 Value *StartValue, Value *Step, 2453 const InductionDescriptor &ID) { 2454 assert(Index->getType()->getScalarType() == Step->getType() && 2455 "Index scalar type does not match StepValue type"); 2456 2457 // Note: the IR at this point is broken. We cannot use SE to create any new 2458 // SCEV and then expand it, hoping that SCEV's simplification will give us 2459 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2460 // lead to various SCEV crashes. So all we can do is to use builder and rely 2461 // on InstCombine for future simplifications. Here we handle some trivial 2462 // cases only. 2463 auto CreateAdd = [&B](Value *X, Value *Y) { 2464 assert(X->getType() == Y->getType() && "Types don't match!"); 2465 if (auto *CX = dyn_cast<ConstantInt>(X)) 2466 if (CX->isZero()) 2467 return Y; 2468 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2469 if (CY->isZero()) 2470 return X; 2471 return B.CreateAdd(X, Y); 2472 }; 2473 2474 // We allow X to be a vector type, in which case Y will potentially be 2475 // splatted into a vector with the same element count. 2476 auto CreateMul = [&B](Value *X, Value *Y) { 2477 assert(X->getType()->getScalarType() == Y->getType() && 2478 "Types don't match!"); 2479 if (auto *CX = dyn_cast<ConstantInt>(X)) 2480 if (CX->isOne()) 2481 return Y; 2482 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2483 if (CY->isOne()) 2484 return X; 2485 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 2486 if (XVTy && !isa<VectorType>(Y->getType())) 2487 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 2488 return B.CreateMul(X, Y); 2489 }; 2490 2491 switch (ID.getKind()) { 2492 case InductionDescriptor::IK_IntInduction: { 2493 assert(!isa<VectorType>(Index->getType()) && 2494 "Vector indices not supported for integer inductions yet"); 2495 assert(Index->getType() == StartValue->getType() && 2496 "Index type does not match StartValue type"); 2497 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne()) 2498 return B.CreateSub(StartValue, Index); 2499 auto *Offset = CreateMul(Index, Step); 2500 return CreateAdd(StartValue, Offset); 2501 } 2502 case InductionDescriptor::IK_PtrInduction: { 2503 assert(isa<Constant>(Step) && 2504 "Expected constant step for pointer induction"); 2505 return B.CreateGEP(ID.getElementType(), StartValue, CreateMul(Index, Step)); 2506 } 2507 case InductionDescriptor::IK_FpInduction: { 2508 assert(!isa<VectorType>(Index->getType()) && 2509 "Vector indices not supported for FP inductions yet"); 2510 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2511 auto InductionBinOp = ID.getInductionBinOp(); 2512 assert(InductionBinOp && 2513 (InductionBinOp->getOpcode() == Instruction::FAdd || 2514 InductionBinOp->getOpcode() == Instruction::FSub) && 2515 "Original bin op should be defined for FP induction"); 2516 2517 Value *MulExp = B.CreateFMul(Step, Index); 2518 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2519 "induction"); 2520 } 2521 case InductionDescriptor::IK_NoInduction: 2522 return nullptr; 2523 } 2524 llvm_unreachable("invalid enum"); 2525 } 2526 2527 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2528 const VPIteration &Instance, 2529 VPTransformState &State) { 2530 Value *ScalarInst = State.get(Def, Instance); 2531 Value *VectorValue = State.get(Def, Instance.Part); 2532 VectorValue = Builder.CreateInsertElement( 2533 VectorValue, ScalarInst, 2534 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2535 State.set(Def, VectorValue, Instance.Part); 2536 } 2537 2538 // Return whether we allow using masked interleave-groups (for dealing with 2539 // strided loads/stores that reside in predicated blocks, or for dealing 2540 // with gaps). 2541 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2542 // If an override option has been passed in for interleaved accesses, use it. 2543 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2544 return EnableMaskedInterleavedMemAccesses; 2545 2546 return TTI.enableMaskedInterleavedAccessVectorization(); 2547 } 2548 2549 // Try to vectorize the interleave group that \p Instr belongs to. 2550 // 2551 // E.g. Translate following interleaved load group (factor = 3): 2552 // for (i = 0; i < N; i+=3) { 2553 // R = Pic[i]; // Member of index 0 2554 // G = Pic[i+1]; // Member of index 1 2555 // B = Pic[i+2]; // Member of index 2 2556 // ... // do something to R, G, B 2557 // } 2558 // To: 2559 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2560 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2561 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2562 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2563 // 2564 // Or translate following interleaved store group (factor = 3): 2565 // for (i = 0; i < N; i+=3) { 2566 // ... do something to R, G, B 2567 // Pic[i] = R; // Member of index 0 2568 // Pic[i+1] = G; // Member of index 1 2569 // Pic[i+2] = B; // Member of index 2 2570 // } 2571 // To: 2572 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2573 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2574 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2575 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2576 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2577 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2578 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2579 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2580 VPValue *BlockInMask) { 2581 Instruction *Instr = Group->getInsertPos(); 2582 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2583 2584 // Prepare for the vector type of the interleaved load/store. 2585 Type *ScalarTy = getLoadStoreType(Instr); 2586 unsigned InterleaveFactor = Group->getFactor(); 2587 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2588 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2589 2590 // Prepare for the new pointers. 2591 SmallVector<Value *, 2> AddrParts; 2592 unsigned Index = Group->getIndex(Instr); 2593 2594 // TODO: extend the masked interleaved-group support to reversed access. 2595 assert((!BlockInMask || !Group->isReverse()) && 2596 "Reversed masked interleave-group not supported."); 2597 2598 // If the group is reverse, adjust the index to refer to the last vector lane 2599 // instead of the first. We adjust the index from the first vector lane, 2600 // rather than directly getting the pointer for lane VF - 1, because the 2601 // pointer operand of the interleaved access is supposed to be uniform. For 2602 // uniform instructions, we're only required to generate a value for the 2603 // first vector lane in each unroll iteration. 2604 if (Group->isReverse()) 2605 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2606 2607 for (unsigned Part = 0; Part < UF; Part++) { 2608 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2609 setDebugLocFromInst(AddrPart); 2610 2611 // Notice current instruction could be any index. Need to adjust the address 2612 // to the member of index 0. 2613 // 2614 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2615 // b = A[i]; // Member of index 0 2616 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2617 // 2618 // E.g. A[i+1] = a; // Member of index 1 2619 // A[i] = b; // Member of index 0 2620 // A[i+2] = c; // Member of index 2 (Current instruction) 2621 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2622 2623 bool InBounds = false; 2624 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2625 InBounds = gep->isInBounds(); 2626 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2627 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2628 2629 // Cast to the vector pointer type. 2630 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2631 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2632 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2633 } 2634 2635 setDebugLocFromInst(Instr); 2636 Value *PoisonVec = PoisonValue::get(VecTy); 2637 2638 Value *MaskForGaps = nullptr; 2639 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2640 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2641 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2642 } 2643 2644 // Vectorize the interleaved load group. 2645 if (isa<LoadInst>(Instr)) { 2646 // For each unroll part, create a wide load for the group. 2647 SmallVector<Value *, 2> NewLoads; 2648 for (unsigned Part = 0; Part < UF; Part++) { 2649 Instruction *NewLoad; 2650 if (BlockInMask || MaskForGaps) { 2651 assert(useMaskedInterleavedAccesses(*TTI) && 2652 "masked interleaved groups are not allowed."); 2653 Value *GroupMask = MaskForGaps; 2654 if (BlockInMask) { 2655 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2656 Value *ShuffledMask = Builder.CreateShuffleVector( 2657 BlockInMaskPart, 2658 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2659 "interleaved.mask"); 2660 GroupMask = MaskForGaps 2661 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2662 MaskForGaps) 2663 : ShuffledMask; 2664 } 2665 NewLoad = 2666 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2667 GroupMask, PoisonVec, "wide.masked.vec"); 2668 } 2669 else 2670 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2671 Group->getAlign(), "wide.vec"); 2672 Group->addMetadata(NewLoad); 2673 NewLoads.push_back(NewLoad); 2674 } 2675 2676 // For each member in the group, shuffle out the appropriate data from the 2677 // wide loads. 2678 unsigned J = 0; 2679 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2680 Instruction *Member = Group->getMember(I); 2681 2682 // Skip the gaps in the group. 2683 if (!Member) 2684 continue; 2685 2686 auto StrideMask = 2687 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2688 for (unsigned Part = 0; Part < UF; Part++) { 2689 Value *StridedVec = Builder.CreateShuffleVector( 2690 NewLoads[Part], StrideMask, "strided.vec"); 2691 2692 // If this member has different type, cast the result type. 2693 if (Member->getType() != ScalarTy) { 2694 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2695 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2696 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2697 } 2698 2699 if (Group->isReverse()) 2700 StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse"); 2701 2702 State.set(VPDefs[J], StridedVec, Part); 2703 } 2704 ++J; 2705 } 2706 return; 2707 } 2708 2709 // The sub vector type for current instruction. 2710 auto *SubVT = VectorType::get(ScalarTy, VF); 2711 2712 // Vectorize the interleaved store group. 2713 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2714 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2715 "masked interleaved groups are not allowed."); 2716 assert((!MaskForGaps || !VF.isScalable()) && 2717 "masking gaps for scalable vectors is not yet supported."); 2718 for (unsigned Part = 0; Part < UF; Part++) { 2719 // Collect the stored vector from each member. 2720 SmallVector<Value *, 4> StoredVecs; 2721 for (unsigned i = 0; i < InterleaveFactor; i++) { 2722 assert((Group->getMember(i) || MaskForGaps) && 2723 "Fail to get a member from an interleaved store group"); 2724 Instruction *Member = Group->getMember(i); 2725 2726 // Skip the gaps in the group. 2727 if (!Member) { 2728 Value *Undef = PoisonValue::get(SubVT); 2729 StoredVecs.push_back(Undef); 2730 continue; 2731 } 2732 2733 Value *StoredVec = State.get(StoredValues[i], Part); 2734 2735 if (Group->isReverse()) 2736 StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse"); 2737 2738 // If this member has different type, cast it to a unified type. 2739 2740 if (StoredVec->getType() != SubVT) 2741 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2742 2743 StoredVecs.push_back(StoredVec); 2744 } 2745 2746 // Concatenate all vectors into a wide vector. 2747 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2748 2749 // Interleave the elements in the wide vector. 2750 Value *IVec = Builder.CreateShuffleVector( 2751 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2752 "interleaved.vec"); 2753 2754 Instruction *NewStoreInstr; 2755 if (BlockInMask || MaskForGaps) { 2756 Value *GroupMask = MaskForGaps; 2757 if (BlockInMask) { 2758 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2759 Value *ShuffledMask = Builder.CreateShuffleVector( 2760 BlockInMaskPart, 2761 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2762 "interleaved.mask"); 2763 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2764 ShuffledMask, MaskForGaps) 2765 : ShuffledMask; 2766 } 2767 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2768 Group->getAlign(), GroupMask); 2769 } else 2770 NewStoreInstr = 2771 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2772 2773 Group->addMetadata(NewStoreInstr); 2774 } 2775 } 2776 2777 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2778 VPReplicateRecipe *RepRecipe, 2779 const VPIteration &Instance, 2780 bool IfPredicateInstr, 2781 VPTransformState &State) { 2782 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2783 2784 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2785 // the first lane and part. 2786 if (isa<NoAliasScopeDeclInst>(Instr)) 2787 if (!Instance.isFirstIteration()) 2788 return; 2789 2790 setDebugLocFromInst(Instr); 2791 2792 // Does this instruction return a value ? 2793 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2794 2795 Instruction *Cloned = Instr->clone(); 2796 if (!IsVoidRetTy) 2797 Cloned->setName(Instr->getName() + ".cloned"); 2798 2799 // If the scalarized instruction contributes to the address computation of a 2800 // widen masked load/store which was in a basic block that needed predication 2801 // and is not predicated after vectorization, we can't propagate 2802 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized 2803 // instruction could feed a poison value to the base address of the widen 2804 // load/store. 2805 if (State.MayGeneratePoisonRecipes.contains(RepRecipe)) 2806 Cloned->dropPoisonGeneratingFlags(); 2807 2808 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 2809 Builder.GetInsertPoint()); 2810 // Replace the operands of the cloned instructions with their scalar 2811 // equivalents in the new loop. 2812 for (auto &I : enumerate(RepRecipe->operands())) { 2813 auto InputInstance = Instance; 2814 VPValue *Operand = I.value(); 2815 VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand); 2816 if (OperandR && OperandR->isUniform()) 2817 InputInstance.Lane = VPLane::getFirstLane(); 2818 Cloned->setOperand(I.index(), State.get(Operand, InputInstance)); 2819 } 2820 addNewMetadata(Cloned, Instr); 2821 2822 // Place the cloned scalar in the new loop. 2823 Builder.Insert(Cloned); 2824 2825 State.set(RepRecipe, Cloned, Instance); 2826 2827 // If we just cloned a new assumption, add it the assumption cache. 2828 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 2829 AC->registerAssumption(II); 2830 2831 // End if-block. 2832 if (IfPredicateInstr) 2833 PredicatedInstructions.push_back(Cloned); 2834 } 2835 2836 void InnerLoopVectorizer::createHeaderBranch(Loop *L) { 2837 BasicBlock *Header = L->getHeader(); 2838 assert(!L->getLoopLatch() && "loop should not have a latch at this point"); 2839 2840 IRBuilder<> B(Header->getTerminator()); 2841 Instruction *OldInst = 2842 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 2843 setDebugLocFromInst(OldInst, &B); 2844 2845 // Connect the header to the exit and header blocks and replace the old 2846 // terminator. 2847 B.CreateCondBr(B.getTrue(), L->getUniqueExitBlock(), Header); 2848 2849 // Now we have two terminators. Remove the old one from the block. 2850 Header->getTerminator()->eraseFromParent(); 2851 } 2852 2853 Value *InnerLoopVectorizer::getOrCreateTripCount(BasicBlock *InsertBlock) { 2854 if (TripCount) 2855 return TripCount; 2856 2857 assert(InsertBlock); 2858 IRBuilder<> Builder(InsertBlock->getTerminator()); 2859 // Find the loop boundaries. 2860 ScalarEvolution *SE = PSE.getSE(); 2861 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2862 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 2863 "Invalid loop count"); 2864 2865 Type *IdxTy = Legal->getWidestInductionType(); 2866 assert(IdxTy && "No type for induction"); 2867 2868 // The exit count might have the type of i64 while the phi is i32. This can 2869 // happen if we have an induction variable that is sign extended before the 2870 // compare. The only way that we get a backedge taken count is that the 2871 // induction variable was signed and as such will not overflow. In such a case 2872 // truncation is legal. 2873 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2874 IdxTy->getPrimitiveSizeInBits()) 2875 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2876 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2877 2878 // Get the total trip count from the count by adding 1. 2879 const SCEV *ExitCount = SE->getAddExpr( 2880 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2881 2882 const DataLayout &DL = InsertBlock->getModule()->getDataLayout(); 2883 2884 // Expand the trip count and place the new instructions in the preheader. 2885 // Notice that the pre-header does not change, only the loop body. 2886 SCEVExpander Exp(*SE, DL, "induction"); 2887 2888 // Count holds the overall loop count (N). 2889 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2890 InsertBlock->getTerminator()); 2891 2892 if (TripCount->getType()->isPointerTy()) 2893 TripCount = 2894 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2895 InsertBlock->getTerminator()); 2896 2897 return TripCount; 2898 } 2899 2900 Value * 2901 InnerLoopVectorizer::getOrCreateVectorTripCount(BasicBlock *InsertBlock) { 2902 if (VectorTripCount) 2903 return VectorTripCount; 2904 2905 Value *TC = getOrCreateTripCount(InsertBlock); 2906 IRBuilder<> Builder(InsertBlock->getTerminator()); 2907 2908 Type *Ty = TC->getType(); 2909 // This is where we can make the step a runtime constant. 2910 Value *Step = createStepForVF(Builder, Ty, VF, UF); 2911 2912 // If the tail is to be folded by masking, round the number of iterations N 2913 // up to a multiple of Step instead of rounding down. This is done by first 2914 // adding Step-1 and then rounding down. Note that it's ok if this addition 2915 // overflows: the vector induction variable will eventually wrap to zero given 2916 // that it starts at zero and its Step is a power of two; the loop will then 2917 // exit, with the last early-exit vector comparison also producing all-true. 2918 if (Cost->foldTailByMasking()) { 2919 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 2920 "VF*UF must be a power of 2 when folding tail by masking"); 2921 Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF); 2922 TC = Builder.CreateAdd( 2923 TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up"); 2924 } 2925 2926 // Now we need to generate the expression for the part of the loop that the 2927 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2928 // iterations are not required for correctness, or N - Step, otherwise. Step 2929 // is equal to the vectorization factor (number of SIMD elements) times the 2930 // unroll factor (number of SIMD instructions). 2931 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2932 2933 // There are cases where we *must* run at least one iteration in the remainder 2934 // loop. See the cost model for when this can happen. If the step evenly 2935 // divides the trip count, we set the remainder to be equal to the step. If 2936 // the step does not evenly divide the trip count, no adjustment is necessary 2937 // since there will already be scalar iterations. Note that the minimum 2938 // iterations check ensures that N >= Step. 2939 if (Cost->requiresScalarEpilogue(VF)) { 2940 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2941 R = Builder.CreateSelect(IsZero, Step, R); 2942 } 2943 2944 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2945 2946 return VectorTripCount; 2947 } 2948 2949 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2950 const DataLayout &DL) { 2951 // Verify that V is a vector type with same number of elements as DstVTy. 2952 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 2953 unsigned VF = DstFVTy->getNumElements(); 2954 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 2955 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2956 Type *SrcElemTy = SrcVecTy->getElementType(); 2957 Type *DstElemTy = DstFVTy->getElementType(); 2958 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2959 "Vector elements must have same size"); 2960 2961 // Do a direct cast if element types are castable. 2962 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2963 return Builder.CreateBitOrPointerCast(V, DstFVTy); 2964 } 2965 // V cannot be directly casted to desired vector type. 2966 // May happen when V is a floating point vector but DstVTy is a vector of 2967 // pointers or vice-versa. Handle this using a two-step bitcast using an 2968 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2969 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2970 "Only one type should be a pointer type"); 2971 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2972 "Only one type should be a floating point type"); 2973 Type *IntTy = 2974 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2975 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 2976 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2977 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 2978 } 2979 2980 void InnerLoopVectorizer::emitMinimumIterationCountCheck(BasicBlock *Bypass) { 2981 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 2982 // Reuse existing vector loop preheader for TC checks. 2983 // Note that new preheader block is generated for vector loop. 2984 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 2985 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 2986 2987 // Generate code to check if the loop's trip count is less than VF * UF, or 2988 // equal to it in case a scalar epilogue is required; this implies that the 2989 // vector trip count is zero. This check also covers the case where adding one 2990 // to the backedge-taken count overflowed leading to an incorrect trip count 2991 // of zero. In this case we will also jump to the scalar loop. 2992 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 2993 : ICmpInst::ICMP_ULT; 2994 2995 // If tail is to be folded, vector loop takes care of all iterations. 2996 Value *CheckMinIters = Builder.getFalse(); 2997 if (!Cost->foldTailByMasking()) { 2998 Value *Step = createStepForVF(Builder, Count->getType(), VF, UF); 2999 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3000 } 3001 // Create new preheader for vector loop. 3002 LoopVectorPreHeader = 3003 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3004 "vector.ph"); 3005 3006 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3007 DT->getNode(Bypass)->getIDom()) && 3008 "TC check is expected to dominate Bypass"); 3009 3010 // Update dominator for Bypass & LoopExit (if needed). 3011 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3012 if (!Cost->requiresScalarEpilogue(VF)) 3013 // If there is an epilogue which must run, there's no edge from the 3014 // middle block to exit blocks and thus no need to update the immediate 3015 // dominator of the exit blocks. 3016 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3017 3018 ReplaceInstWithInst( 3019 TCCheckBlock->getTerminator(), 3020 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3021 LoopBypassBlocks.push_back(TCCheckBlock); 3022 } 3023 3024 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) { 3025 3026 BasicBlock *const SCEVCheckBlock = 3027 RTChecks.emitSCEVChecks(Bypass, LoopVectorPreHeader, LoopExitBlock); 3028 if (!SCEVCheckBlock) 3029 return nullptr; 3030 3031 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3032 (OptForSizeBasedOnProfile && 3033 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3034 "Cannot SCEV check stride or overflow when optimizing for size"); 3035 3036 3037 // Update dominator only if this is first RT check. 3038 if (LoopBypassBlocks.empty()) { 3039 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3040 if (!Cost->requiresScalarEpilogue(VF)) 3041 // If there is an epilogue which must run, there's no edge from the 3042 // middle block to exit blocks and thus no need to update the immediate 3043 // dominator of the exit blocks. 3044 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3045 } 3046 3047 LoopBypassBlocks.push_back(SCEVCheckBlock); 3048 AddedSafetyChecks = true; 3049 return SCEVCheckBlock; 3050 } 3051 3052 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) { 3053 // VPlan-native path does not do any analysis for runtime checks currently. 3054 if (EnableVPlanNativePath) 3055 return nullptr; 3056 3057 BasicBlock *const MemCheckBlock = 3058 RTChecks.emitMemRuntimeChecks(Bypass, LoopVectorPreHeader); 3059 3060 // Check if we generated code that checks in runtime if arrays overlap. We put 3061 // the checks into a separate block to make the more common case of few 3062 // elements faster. 3063 if (!MemCheckBlock) 3064 return nullptr; 3065 3066 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3067 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3068 "Cannot emit memory checks when optimizing for size, unless forced " 3069 "to vectorize."); 3070 ORE->emit([&]() { 3071 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3072 OrigLoop->getStartLoc(), 3073 OrigLoop->getHeader()) 3074 << "Code-size may be reduced by not forcing " 3075 "vectorization, or by source-code modifications " 3076 "eliminating the need for runtime checks " 3077 "(e.g., adding 'restrict')."; 3078 }); 3079 } 3080 3081 LoopBypassBlocks.push_back(MemCheckBlock); 3082 3083 AddedSafetyChecks = true; 3084 3085 // We currently don't use LoopVersioning for the actual loop cloning but we 3086 // still use it to add the noalias metadata. 3087 LVer = std::make_unique<LoopVersioning>( 3088 *Legal->getLAI(), 3089 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3090 DT, PSE.getSE()); 3091 LVer->prepareNoAliasMetadata(); 3092 return MemCheckBlock; 3093 } 3094 3095 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3096 LoopScalarBody = OrigLoop->getHeader(); 3097 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3098 assert(LoopVectorPreHeader && "Invalid loop structure"); 3099 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3100 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3101 "multiple exit loop without required epilogue?"); 3102 3103 LoopMiddleBlock = 3104 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3105 LI, nullptr, Twine(Prefix) + "middle.block"); 3106 LoopScalarPreHeader = 3107 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3108 nullptr, Twine(Prefix) + "scalar.ph"); 3109 3110 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3111 3112 // Set up the middle block terminator. Two cases: 3113 // 1) If we know that we must execute the scalar epilogue, emit an 3114 // unconditional branch. 3115 // 2) Otherwise, we must have a single unique exit block (due to how we 3116 // implement the multiple exit case). In this case, set up a conditonal 3117 // branch from the middle block to the loop scalar preheader, and the 3118 // exit block. completeLoopSkeleton will update the condition to use an 3119 // iteration check, if required to decide whether to execute the remainder. 3120 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3121 BranchInst::Create(LoopScalarPreHeader) : 3122 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3123 Builder.getTrue()); 3124 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3125 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3126 3127 // We intentionally don't let SplitBlock to update LoopInfo since 3128 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3129 // LoopVectorBody is explicitly added to the correct place few lines later. 3130 BasicBlock *LoopVectorBody = 3131 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3132 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3133 3134 // Update dominator for loop exit. 3135 if (!Cost->requiresScalarEpilogue(VF)) 3136 // If there is an epilogue which must run, there's no edge from the 3137 // middle block to exit blocks and thus no need to update the immediate 3138 // dominator of the exit blocks. 3139 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3140 3141 // Create and register the new vector loop. 3142 Loop *Lp = LI->AllocateLoop(); 3143 Loop *ParentLoop = OrigLoop->getParentLoop(); 3144 3145 // Insert the new loop into the loop nest and register the new basic blocks 3146 // before calling any utilities such as SCEV that require valid LoopInfo. 3147 if (ParentLoop) { 3148 ParentLoop->addChildLoop(Lp); 3149 } else { 3150 LI->addTopLevelLoop(Lp); 3151 } 3152 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3153 return Lp; 3154 } 3155 3156 void InnerLoopVectorizer::createInductionResumeValues( 3157 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3158 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3159 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3160 "Inconsistent information about additional bypass."); 3161 3162 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3163 assert(VectorTripCount && "Expected valid arguments"); 3164 // We are going to resume the execution of the scalar loop. 3165 // Go over all of the induction variables that we found and fix the 3166 // PHIs that are left in the scalar version of the loop. 3167 // The starting values of PHI nodes depend on the counter of the last 3168 // iteration in the vectorized loop. 3169 // If we come from a bypass edge then we need to start from the original 3170 // start value. 3171 Instruction *OldInduction = Legal->getPrimaryInduction(); 3172 for (auto &InductionEntry : Legal->getInductionVars()) { 3173 PHINode *OrigPhi = InductionEntry.first; 3174 InductionDescriptor II = InductionEntry.second; 3175 3176 // Create phi nodes to merge from the backedge-taken check block. 3177 PHINode *BCResumeVal = 3178 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3179 LoopScalarPreHeader->getTerminator()); 3180 // Copy original phi DL over to the new one. 3181 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3182 Value *&EndValue = IVEndValues[OrigPhi]; 3183 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3184 if (OrigPhi == OldInduction) { 3185 // We know what the end value is. 3186 EndValue = VectorTripCount; 3187 } else { 3188 IRBuilder<> B(LoopVectorPreHeader->getTerminator()); 3189 3190 // Fast-math-flags propagate from the original induction instruction. 3191 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3192 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3193 3194 Type *StepType = II.getStep()->getType(); 3195 Instruction::CastOps CastOp = 3196 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3197 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3198 Value *Step = 3199 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3200 EndValue = emitTransformedIndex(B, CRD, II.getStartValue(), Step, II); 3201 EndValue->setName("ind.end"); 3202 3203 // Compute the end value for the additional bypass (if applicable). 3204 if (AdditionalBypass.first) { 3205 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3206 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3207 StepType, true); 3208 Value *Step = 3209 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3210 CRD = 3211 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3212 EndValueFromAdditionalBypass = 3213 emitTransformedIndex(B, CRD, II.getStartValue(), Step, II); 3214 EndValueFromAdditionalBypass->setName("ind.end"); 3215 } 3216 } 3217 // The new PHI merges the original incoming value, in case of a bypass, 3218 // or the value at the end of the vectorized loop. 3219 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3220 3221 // Fix the scalar body counter (PHI node). 3222 // The old induction's phi node in the scalar body needs the truncated 3223 // value. 3224 for (BasicBlock *BB : LoopBypassBlocks) 3225 BCResumeVal->addIncoming(II.getStartValue(), BB); 3226 3227 if (AdditionalBypass.first) 3228 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3229 EndValueFromAdditionalBypass); 3230 3231 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3232 } 3233 } 3234 3235 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(MDNode *OrigLoopID) { 3236 // The trip counts should be cached by now. 3237 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 3238 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3239 3240 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3241 3242 // Add a check in the middle block to see if we have completed 3243 // all of the iterations in the first vector loop. Three cases: 3244 // 1) If we require a scalar epilogue, there is no conditional branch as 3245 // we unconditionally branch to the scalar preheader. Do nothing. 3246 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3247 // Thus if tail is to be folded, we know we don't need to run the 3248 // remainder and we can use the previous value for the condition (true). 3249 // 3) Otherwise, construct a runtime check. 3250 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3251 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3252 Count, VectorTripCount, "cmp.n", 3253 LoopMiddleBlock->getTerminator()); 3254 3255 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3256 // of the corresponding compare because they may have ended up with 3257 // different line numbers and we want to avoid awkward line stepping while 3258 // debugging. Eg. if the compare has got a line number inside the loop. 3259 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3260 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3261 } 3262 3263 #ifdef EXPENSIVE_CHECKS 3264 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3265 LI->verify(*DT); 3266 #endif 3267 3268 return LoopVectorPreHeader; 3269 } 3270 3271 std::pair<BasicBlock *, Value *> 3272 InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3273 /* 3274 In this function we generate a new loop. The new loop will contain 3275 the vectorized instructions while the old loop will continue to run the 3276 scalar remainder. 3277 3278 [ ] <-- loop iteration number check. 3279 / | 3280 / v 3281 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3282 | / | 3283 | / v 3284 || [ ] <-- vector pre header. 3285 |/ | 3286 | v 3287 | [ ] \ 3288 | [ ]_| <-- vector loop. 3289 | | 3290 | v 3291 \ -[ ] <--- middle-block. 3292 \/ | 3293 /\ v 3294 | ->[ ] <--- new preheader. 3295 | | 3296 (opt) v <-- edge from middle to exit iff epilogue is not required. 3297 | [ ] \ 3298 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3299 \ | 3300 \ v 3301 >[ ] <-- exit block(s). 3302 ... 3303 */ 3304 3305 // Get the metadata of the original loop before it gets modified. 3306 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3307 3308 // Workaround! Compute the trip count of the original loop and cache it 3309 // before we start modifying the CFG. This code has a systemic problem 3310 // wherein it tries to run analysis over partially constructed IR; this is 3311 // wrong, and not simply for SCEV. The trip count of the original loop 3312 // simply happens to be prone to hitting this in practice. In theory, we 3313 // can hit the same issue for any SCEV, or ValueTracking query done during 3314 // mutation. See PR49900. 3315 getOrCreateTripCount(OrigLoop->getLoopPreheader()); 3316 3317 // Create an empty vector loop, and prepare basic blocks for the runtime 3318 // checks. 3319 Loop *Lp = createVectorLoopSkeleton(""); 3320 3321 // Now, compare the new count to zero. If it is zero skip the vector loop and 3322 // jump to the scalar loop. This check also covers the case where the 3323 // backedge-taken count is uint##_max: adding one to it will overflow leading 3324 // to an incorrect trip count of zero. In this (rare) case we will also jump 3325 // to the scalar loop. 3326 emitMinimumIterationCountCheck(LoopScalarPreHeader); 3327 3328 // Generate the code to check any assumptions that we've made for SCEV 3329 // expressions. 3330 emitSCEVChecks(LoopScalarPreHeader); 3331 3332 // Generate the code that checks in runtime if arrays overlap. We put the 3333 // checks into a separate block to make the more common case of few elements 3334 // faster. 3335 emitMemRuntimeChecks(LoopScalarPreHeader); 3336 3337 createHeaderBranch(Lp); 3338 3339 // Emit phis for the new starting index of the scalar loop. 3340 createInductionResumeValues(); 3341 3342 return {completeLoopSkeleton(OrigLoopID), nullptr}; 3343 } 3344 3345 // Fix up external users of the induction variable. At this point, we are 3346 // in LCSSA form, with all external PHIs that use the IV having one input value, 3347 // coming from the remainder loop. We need those PHIs to also have a correct 3348 // value for the IV when arriving directly from the middle block. 3349 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3350 const InductionDescriptor &II, 3351 Value *CountRoundDown, Value *EndValue, 3352 BasicBlock *MiddleBlock, 3353 BasicBlock *VectorHeader) { 3354 // There are two kinds of external IV usages - those that use the value 3355 // computed in the last iteration (the PHI) and those that use the penultimate 3356 // value (the value that feeds into the phi from the loop latch). 3357 // We allow both, but they, obviously, have different values. 3358 3359 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3360 3361 DenseMap<Value *, Value *> MissingVals; 3362 3363 // An external user of the last iteration's value should see the value that 3364 // the remainder loop uses to initialize its own IV. 3365 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3366 for (User *U : PostInc->users()) { 3367 Instruction *UI = cast<Instruction>(U); 3368 if (!OrigLoop->contains(UI)) { 3369 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3370 MissingVals[UI] = EndValue; 3371 } 3372 } 3373 3374 // An external user of the penultimate value need to see EndValue - Step. 3375 // The simplest way to get this is to recompute it from the constituent SCEVs, 3376 // that is Start + (Step * (CRD - 1)). 3377 for (User *U : OrigPhi->users()) { 3378 auto *UI = cast<Instruction>(U); 3379 if (!OrigLoop->contains(UI)) { 3380 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3381 3382 IRBuilder<> B(MiddleBlock->getTerminator()); 3383 3384 // Fast-math-flags propagate from the original induction instruction. 3385 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3386 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3387 3388 Value *CountMinusOne = B.CreateSub( 3389 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3390 Value *CMO = 3391 !II.getStep()->getType()->isIntegerTy() 3392 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3393 II.getStep()->getType()) 3394 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3395 CMO->setName("cast.cmo"); 3396 3397 Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(), 3398 VectorHeader->getTerminator()); 3399 Value *Escape = 3400 emitTransformedIndex(B, CMO, II.getStartValue(), Step, II); 3401 Escape->setName("ind.escape"); 3402 MissingVals[UI] = Escape; 3403 } 3404 } 3405 3406 for (auto &I : MissingVals) { 3407 PHINode *PHI = cast<PHINode>(I.first); 3408 // One corner case we have to handle is two IVs "chasing" each-other, 3409 // that is %IV2 = phi [...], [ %IV1, %latch ] 3410 // In this case, if IV1 has an external use, we need to avoid adding both 3411 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3412 // don't already have an incoming value for the middle block. 3413 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3414 PHI->addIncoming(I.second, MiddleBlock); 3415 } 3416 } 3417 3418 namespace { 3419 3420 struct CSEDenseMapInfo { 3421 static bool canHandle(const Instruction *I) { 3422 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3423 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3424 } 3425 3426 static inline Instruction *getEmptyKey() { 3427 return DenseMapInfo<Instruction *>::getEmptyKey(); 3428 } 3429 3430 static inline Instruction *getTombstoneKey() { 3431 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3432 } 3433 3434 static unsigned getHashValue(const Instruction *I) { 3435 assert(canHandle(I) && "Unknown instruction!"); 3436 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3437 I->value_op_end())); 3438 } 3439 3440 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3441 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3442 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3443 return LHS == RHS; 3444 return LHS->isIdenticalTo(RHS); 3445 } 3446 }; 3447 3448 } // end anonymous namespace 3449 3450 ///Perform cse of induction variable instructions. 3451 static void cse(BasicBlock *BB) { 3452 // Perform simple cse. 3453 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3454 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3455 if (!CSEDenseMapInfo::canHandle(&In)) 3456 continue; 3457 3458 // Check if we can replace this instruction with any of the 3459 // visited instructions. 3460 if (Instruction *V = CSEMap.lookup(&In)) { 3461 In.replaceAllUsesWith(V); 3462 In.eraseFromParent(); 3463 continue; 3464 } 3465 3466 CSEMap[&In] = &In; 3467 } 3468 } 3469 3470 InstructionCost 3471 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3472 bool &NeedToScalarize) const { 3473 Function *F = CI->getCalledFunction(); 3474 Type *ScalarRetTy = CI->getType(); 3475 SmallVector<Type *, 4> Tys, ScalarTys; 3476 for (auto &ArgOp : CI->args()) 3477 ScalarTys.push_back(ArgOp->getType()); 3478 3479 // Estimate cost of scalarized vector call. The source operands are assumed 3480 // to be vectors, so we need to extract individual elements from there, 3481 // execute VF scalar calls, and then gather the result into the vector return 3482 // value. 3483 InstructionCost ScalarCallCost = 3484 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3485 if (VF.isScalar()) 3486 return ScalarCallCost; 3487 3488 // Compute corresponding vector type for return value and arguments. 3489 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3490 for (Type *ScalarTy : ScalarTys) 3491 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3492 3493 // Compute costs of unpacking argument values for the scalar calls and 3494 // packing the return values to a vector. 3495 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3496 3497 InstructionCost Cost = 3498 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3499 3500 // If we can't emit a vector call for this function, then the currently found 3501 // cost is the cost we need to return. 3502 NeedToScalarize = true; 3503 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3504 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3505 3506 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3507 return Cost; 3508 3509 // If the corresponding vector cost is cheaper, return its cost. 3510 InstructionCost VectorCallCost = 3511 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3512 if (VectorCallCost < Cost) { 3513 NeedToScalarize = false; 3514 Cost = VectorCallCost; 3515 } 3516 return Cost; 3517 } 3518 3519 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3520 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3521 return Elt; 3522 return VectorType::get(Elt, VF); 3523 } 3524 3525 InstructionCost 3526 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3527 ElementCount VF) const { 3528 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3529 assert(ID && "Expected intrinsic call!"); 3530 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3531 FastMathFlags FMF; 3532 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3533 FMF = FPMO->getFastMathFlags(); 3534 3535 SmallVector<const Value *> Arguments(CI->args()); 3536 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3537 SmallVector<Type *> ParamTys; 3538 std::transform(FTy->param_begin(), FTy->param_end(), 3539 std::back_inserter(ParamTys), 3540 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3541 3542 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3543 dyn_cast<IntrinsicInst>(CI)); 3544 return TTI.getIntrinsicInstrCost(CostAttrs, 3545 TargetTransformInfo::TCK_RecipThroughput); 3546 } 3547 3548 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3549 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3550 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3551 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3552 } 3553 3554 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3555 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3556 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3557 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3558 } 3559 3560 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3561 // For every instruction `I` in MinBWs, truncate the operands, create a 3562 // truncated version of `I` and reextend its result. InstCombine runs 3563 // later and will remove any ext/trunc pairs. 3564 SmallPtrSet<Value *, 4> Erased; 3565 for (const auto &KV : Cost->getMinimalBitwidths()) { 3566 // If the value wasn't vectorized, we must maintain the original scalar 3567 // type. The absence of the value from State indicates that it 3568 // wasn't vectorized. 3569 // FIXME: Should not rely on getVPValue at this point. 3570 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3571 if (!State.hasAnyVectorValue(Def)) 3572 continue; 3573 for (unsigned Part = 0; Part < UF; ++Part) { 3574 Value *I = State.get(Def, Part); 3575 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3576 continue; 3577 Type *OriginalTy = I->getType(); 3578 Type *ScalarTruncatedTy = 3579 IntegerType::get(OriginalTy->getContext(), KV.second); 3580 auto *TruncatedTy = VectorType::get( 3581 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3582 if (TruncatedTy == OriginalTy) 3583 continue; 3584 3585 IRBuilder<> B(cast<Instruction>(I)); 3586 auto ShrinkOperand = [&](Value *V) -> Value * { 3587 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3588 if (ZI->getSrcTy() == TruncatedTy) 3589 return ZI->getOperand(0); 3590 return B.CreateZExtOrTrunc(V, TruncatedTy); 3591 }; 3592 3593 // The actual instruction modification depends on the instruction type, 3594 // unfortunately. 3595 Value *NewI = nullptr; 3596 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3597 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3598 ShrinkOperand(BO->getOperand(1))); 3599 3600 // Any wrapping introduced by shrinking this operation shouldn't be 3601 // considered undefined behavior. So, we can't unconditionally copy 3602 // arithmetic wrapping flags to NewI. 3603 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3604 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3605 NewI = 3606 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3607 ShrinkOperand(CI->getOperand(1))); 3608 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3609 NewI = B.CreateSelect(SI->getCondition(), 3610 ShrinkOperand(SI->getTrueValue()), 3611 ShrinkOperand(SI->getFalseValue())); 3612 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3613 switch (CI->getOpcode()) { 3614 default: 3615 llvm_unreachable("Unhandled cast!"); 3616 case Instruction::Trunc: 3617 NewI = ShrinkOperand(CI->getOperand(0)); 3618 break; 3619 case Instruction::SExt: 3620 NewI = B.CreateSExtOrTrunc( 3621 CI->getOperand(0), 3622 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3623 break; 3624 case Instruction::ZExt: 3625 NewI = B.CreateZExtOrTrunc( 3626 CI->getOperand(0), 3627 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3628 break; 3629 } 3630 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3631 auto Elements0 = 3632 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 3633 auto *O0 = B.CreateZExtOrTrunc( 3634 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3635 auto Elements1 = 3636 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 3637 auto *O1 = B.CreateZExtOrTrunc( 3638 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3639 3640 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3641 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3642 // Don't do anything with the operands, just extend the result. 3643 continue; 3644 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3645 auto Elements = 3646 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 3647 auto *O0 = B.CreateZExtOrTrunc( 3648 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3649 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3650 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3651 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3652 auto Elements = 3653 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 3654 auto *O0 = B.CreateZExtOrTrunc( 3655 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3656 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3657 } else { 3658 // If we don't know what to do, be conservative and don't do anything. 3659 continue; 3660 } 3661 3662 // Lastly, extend the result. 3663 NewI->takeName(cast<Instruction>(I)); 3664 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3665 I->replaceAllUsesWith(Res); 3666 cast<Instruction>(I)->eraseFromParent(); 3667 Erased.insert(I); 3668 State.reset(Def, Res, Part); 3669 } 3670 } 3671 3672 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3673 for (const auto &KV : Cost->getMinimalBitwidths()) { 3674 // If the value wasn't vectorized, we must maintain the original scalar 3675 // type. The absence of the value from State indicates that it 3676 // wasn't vectorized. 3677 // FIXME: Should not rely on getVPValue at this point. 3678 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3679 if (!State.hasAnyVectorValue(Def)) 3680 continue; 3681 for (unsigned Part = 0; Part < UF; ++Part) { 3682 Value *I = State.get(Def, Part); 3683 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3684 if (Inst && Inst->use_empty()) { 3685 Value *NewI = Inst->getOperand(0); 3686 Inst->eraseFromParent(); 3687 State.reset(Def, NewI, Part); 3688 } 3689 } 3690 } 3691 } 3692 3693 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 3694 // Insert truncates and extends for any truncated instructions as hints to 3695 // InstCombine. 3696 if (VF.isVector()) 3697 truncateToMinimalBitwidths(State); 3698 3699 // Fix widened non-induction PHIs by setting up the PHI operands. 3700 if (OrigPHIsToFix.size()) { 3701 assert(EnableVPlanNativePath && 3702 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3703 fixNonInductionPHIs(State); 3704 } 3705 3706 // At this point every instruction in the original loop is widened to a 3707 // vector form. Now we need to fix the recurrences in the loop. These PHI 3708 // nodes are currently empty because we did not want to introduce cycles. 3709 // This is the second stage of vectorizing recurrences. 3710 fixCrossIterationPHIs(State); 3711 3712 // Forget the original basic block. 3713 PSE.getSE()->forgetLoop(OrigLoop); 3714 3715 Loop *VectorLoop = LI->getLoopFor(State.CFG.PrevBB); 3716 // If we inserted an edge from the middle block to the unique exit block, 3717 // update uses outside the loop (phis) to account for the newly inserted 3718 // edge. 3719 if (!Cost->requiresScalarEpilogue(VF)) { 3720 // Fix-up external users of the induction variables. 3721 for (auto &Entry : Legal->getInductionVars()) 3722 fixupIVUsers(Entry.first, Entry.second, 3723 getOrCreateVectorTripCount(VectorLoop->getLoopPreheader()), 3724 IVEndValues[Entry.first], LoopMiddleBlock, 3725 VectorLoop->getHeader()); 3726 3727 fixLCSSAPHIs(State); 3728 } 3729 3730 for (Instruction *PI : PredicatedInstructions) 3731 sinkScalarOperands(&*PI); 3732 3733 // Remove redundant induction instructions. 3734 cse(VectorLoop->getHeader()); 3735 3736 // Set/update profile weights for the vector and remainder loops as original 3737 // loop iterations are now distributed among them. Note that original loop 3738 // represented by LoopScalarBody becomes remainder loop after vectorization. 3739 // 3740 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3741 // end up getting slightly roughened result but that should be OK since 3742 // profile is not inherently precise anyway. Note also possible bypass of 3743 // vector code caused by legality checks is ignored, assigning all the weight 3744 // to the vector loop, optimistically. 3745 // 3746 // For scalable vectorization we can't know at compile time how many iterations 3747 // of the loop are handled in one vector iteration, so instead assume a pessimistic 3748 // vscale of '1'. 3749 setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), VectorLoop, 3750 LI->getLoopFor(LoopScalarBody), 3751 VF.getKnownMinValue() * UF); 3752 } 3753 3754 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 3755 // In order to support recurrences we need to be able to vectorize Phi nodes. 3756 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3757 // stage #2: We now need to fix the recurrences by adding incoming edges to 3758 // the currently empty PHI nodes. At this point every instruction in the 3759 // original loop is widened to a vector form so we can use them to construct 3760 // the incoming edges. 3761 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); 3762 for (VPRecipeBase &R : Header->phis()) { 3763 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 3764 fixReduction(ReductionPhi, State); 3765 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 3766 fixFirstOrderRecurrence(FOR, State); 3767 } 3768 } 3769 3770 void InnerLoopVectorizer::fixFirstOrderRecurrence( 3771 VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) { 3772 // This is the second phase of vectorizing first-order recurrences. An 3773 // overview of the transformation is described below. Suppose we have the 3774 // following loop. 3775 // 3776 // for (int i = 0; i < n; ++i) 3777 // b[i] = a[i] - a[i - 1]; 3778 // 3779 // There is a first-order recurrence on "a". For this loop, the shorthand 3780 // scalar IR looks like: 3781 // 3782 // scalar.ph: 3783 // s_init = a[-1] 3784 // br scalar.body 3785 // 3786 // scalar.body: 3787 // i = phi [0, scalar.ph], [i+1, scalar.body] 3788 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3789 // s2 = a[i] 3790 // b[i] = s2 - s1 3791 // br cond, scalar.body, ... 3792 // 3793 // In this example, s1 is a recurrence because it's value depends on the 3794 // previous iteration. In the first phase of vectorization, we created a 3795 // vector phi v1 for s1. We now complete the vectorization and produce the 3796 // shorthand vector IR shown below (for VF = 4, UF = 1). 3797 // 3798 // vector.ph: 3799 // v_init = vector(..., ..., ..., a[-1]) 3800 // br vector.body 3801 // 3802 // vector.body 3803 // i = phi [0, vector.ph], [i+4, vector.body] 3804 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3805 // v2 = a[i, i+1, i+2, i+3]; 3806 // v3 = vector(v1(3), v2(0, 1, 2)) 3807 // b[i, i+1, i+2, i+3] = v2 - v3 3808 // br cond, vector.body, middle.block 3809 // 3810 // middle.block: 3811 // x = v2(3) 3812 // br scalar.ph 3813 // 3814 // scalar.ph: 3815 // s_init = phi [x, middle.block], [a[-1], otherwise] 3816 // br scalar.body 3817 // 3818 // After execution completes the vector loop, we extract the next value of 3819 // the recurrence (x) to use as the initial value in the scalar loop. 3820 3821 // Extract the last vector element in the middle block. This will be the 3822 // initial value for the recurrence when jumping to the scalar loop. 3823 VPValue *PreviousDef = PhiR->getBackedgeValue(); 3824 Value *Incoming = State.get(PreviousDef, UF - 1); 3825 auto *ExtractForScalar = Incoming; 3826 auto *IdxTy = Builder.getInt32Ty(); 3827 if (VF.isVector()) { 3828 auto *One = ConstantInt::get(IdxTy, 1); 3829 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3830 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3831 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 3832 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 3833 "vector.recur.extract"); 3834 } 3835 // Extract the second last element in the middle block if the 3836 // Phi is used outside the loop. We need to extract the phi itself 3837 // and not the last element (the phi update in the current iteration). This 3838 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3839 // when the scalar loop is not run at all. 3840 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3841 if (VF.isVector()) { 3842 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3843 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 3844 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3845 Incoming, Idx, "vector.recur.extract.for.phi"); 3846 } else if (UF > 1) 3847 // When loop is unrolled without vectorizing, initialize 3848 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 3849 // of `Incoming`. This is analogous to the vectorized case above: extracting 3850 // the second last element when VF > 1. 3851 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 3852 3853 // Fix the initial value of the original recurrence in the scalar loop. 3854 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3855 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 3856 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3857 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 3858 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3859 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3860 Start->addIncoming(Incoming, BB); 3861 } 3862 3863 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 3864 Phi->setName("scalar.recur"); 3865 3866 // Finally, fix users of the recurrence outside the loop. The users will need 3867 // either the last value of the scalar recurrence or the last value of the 3868 // vector recurrence we extracted in the middle block. Since the loop is in 3869 // LCSSA form, we just need to find all the phi nodes for the original scalar 3870 // recurrence in the exit block, and then add an edge for the middle block. 3871 // Note that LCSSA does not imply single entry when the original scalar loop 3872 // had multiple exiting edges (as we always run the last iteration in the 3873 // scalar epilogue); in that case, there is no edge from middle to exit and 3874 // and thus no phis which needed updated. 3875 if (!Cost->requiresScalarEpilogue(VF)) 3876 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 3877 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) 3878 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3879 } 3880 3881 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 3882 VPTransformState &State) { 3883 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 3884 // Get it's reduction variable descriptor. 3885 assert(Legal->isReductionVariable(OrigPhi) && 3886 "Unable to find the reduction variable"); 3887 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 3888 3889 RecurKind RK = RdxDesc.getRecurrenceKind(); 3890 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3891 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3892 setDebugLocFromInst(ReductionStartValue); 3893 3894 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 3895 // This is the vector-clone of the value that leaves the loop. 3896 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 3897 3898 // Wrap flags are in general invalid after vectorization, clear them. 3899 clearReductionWrapFlags(RdxDesc, State); 3900 3901 // Before each round, move the insertion point right between 3902 // the PHIs and the values we are going to write. 3903 // This allows us to write both PHINodes and the extractelement 3904 // instructions. 3905 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3906 3907 setDebugLocFromInst(LoopExitInst); 3908 3909 Type *PhiTy = OrigPhi->getType(); 3910 BasicBlock *VectorLoopLatch = 3911 LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch(); 3912 // If tail is folded by masking, the vector value to leave the loop should be 3913 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 3914 // instead of the former. For an inloop reduction the reduction will already 3915 // be predicated, and does not need to be handled here. 3916 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 3917 for (unsigned Part = 0; Part < UF; ++Part) { 3918 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 3919 Value *Sel = nullptr; 3920 for (User *U : VecLoopExitInst->users()) { 3921 if (isa<SelectInst>(U)) { 3922 assert(!Sel && "Reduction exit feeding two selects"); 3923 Sel = U; 3924 } else 3925 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 3926 } 3927 assert(Sel && "Reduction exit feeds no select"); 3928 State.reset(LoopExitInstDef, Sel, Part); 3929 3930 // If the target can create a predicated operator for the reduction at no 3931 // extra cost in the loop (for example a predicated vadd), it can be 3932 // cheaper for the select to remain in the loop than be sunk out of it, 3933 // and so use the select value for the phi instead of the old 3934 // LoopExitValue. 3935 if (PreferPredicatedReductionSelect || 3936 TTI->preferPredicatedReductionSelect( 3937 RdxDesc.getOpcode(), PhiTy, 3938 TargetTransformInfo::ReductionFlags())) { 3939 auto *VecRdxPhi = 3940 cast<PHINode>(State.get(PhiR, Part)); 3941 VecRdxPhi->setIncomingValueForBlock(VectorLoopLatch, Sel); 3942 } 3943 } 3944 } 3945 3946 // If the vector reduction can be performed in a smaller type, we truncate 3947 // then extend the loop exit value to enable InstCombine to evaluate the 3948 // entire expression in the smaller type. 3949 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 3950 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 3951 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3952 Builder.SetInsertPoint(VectorLoopLatch->getTerminator()); 3953 VectorParts RdxParts(UF); 3954 for (unsigned Part = 0; Part < UF; ++Part) { 3955 RdxParts[Part] = State.get(LoopExitInstDef, Part); 3956 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3957 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3958 : Builder.CreateZExt(Trunc, VecTy); 3959 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 3960 if (U != Trunc) { 3961 U->replaceUsesOfWith(RdxParts[Part], Extnd); 3962 RdxParts[Part] = Extnd; 3963 } 3964 } 3965 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3966 for (unsigned Part = 0; Part < UF; ++Part) { 3967 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3968 State.reset(LoopExitInstDef, RdxParts[Part], Part); 3969 } 3970 } 3971 3972 // Reduce all of the unrolled parts into a single vector. 3973 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 3974 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 3975 3976 // The middle block terminator has already been assigned a DebugLoc here (the 3977 // OrigLoop's single latch terminator). We want the whole middle block to 3978 // appear to execute on this line because: (a) it is all compiler generated, 3979 // (b) these instructions are always executed after evaluating the latch 3980 // conditional branch, and (c) other passes may add new predecessors which 3981 // terminate on this line. This is the easiest way to ensure we don't 3982 // accidentally cause an extra step back into the loop while debugging. 3983 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 3984 if (PhiR->isOrdered()) 3985 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 3986 else { 3987 // Floating-point operations should have some FMF to enable the reduction. 3988 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 3989 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 3990 for (unsigned Part = 1; Part < UF; ++Part) { 3991 Value *RdxPart = State.get(LoopExitInstDef, Part); 3992 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 3993 ReducedPartRdx = Builder.CreateBinOp( 3994 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 3995 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 3996 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 3997 ReducedPartRdx, RdxPart); 3998 else 3999 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4000 } 4001 } 4002 4003 // Create the reduction after the loop. Note that inloop reductions create the 4004 // target reduction in the loop using a Reduction recipe. 4005 if (VF.isVector() && !PhiR->isInLoop()) { 4006 ReducedPartRdx = 4007 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 4008 // If the reduction can be performed in a smaller type, we need to extend 4009 // the reduction to the wider type before we branch to the original loop. 4010 if (PhiTy != RdxDesc.getRecurrenceType()) 4011 ReducedPartRdx = RdxDesc.isSigned() 4012 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4013 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4014 } 4015 4016 PHINode *ResumePhi = 4017 dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue()); 4018 4019 // Create a phi node that merges control-flow from the backedge-taken check 4020 // block and the middle block. 4021 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4022 LoopScalarPreHeader->getTerminator()); 4023 4024 // If we are fixing reductions in the epilogue loop then we should already 4025 // have created a bc.merge.rdx Phi after the main vector body. Ensure that 4026 // we carry over the incoming values correctly. 4027 for (auto *Incoming : predecessors(LoopScalarPreHeader)) { 4028 if (Incoming == LoopMiddleBlock) 4029 BCBlockPhi->addIncoming(ReducedPartRdx, Incoming); 4030 else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming)) 4031 BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming), 4032 Incoming); 4033 else 4034 BCBlockPhi->addIncoming(ReductionStartValue, Incoming); 4035 } 4036 4037 // Set the resume value for this reduction 4038 ReductionResumeValues.insert({&RdxDesc, BCBlockPhi}); 4039 4040 // Now, we need to fix the users of the reduction variable 4041 // inside and outside of the scalar remainder loop. 4042 4043 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4044 // in the exit blocks. See comment on analogous loop in 4045 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4046 if (!Cost->requiresScalarEpilogue(VF)) 4047 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4048 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) 4049 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4050 4051 // Fix the scalar loop reduction variable with the incoming reduction sum 4052 // from the vector body and from the backedge value. 4053 int IncomingEdgeBlockIdx = 4054 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4055 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4056 // Pick the other block. 4057 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4058 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4059 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4060 } 4061 4062 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4063 VPTransformState &State) { 4064 RecurKind RK = RdxDesc.getRecurrenceKind(); 4065 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4066 return; 4067 4068 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4069 assert(LoopExitInstr && "null loop exit instruction"); 4070 SmallVector<Instruction *, 8> Worklist; 4071 SmallPtrSet<Instruction *, 8> Visited; 4072 Worklist.push_back(LoopExitInstr); 4073 Visited.insert(LoopExitInstr); 4074 4075 while (!Worklist.empty()) { 4076 Instruction *Cur = Worklist.pop_back_val(); 4077 if (isa<OverflowingBinaryOperator>(Cur)) 4078 for (unsigned Part = 0; Part < UF; ++Part) { 4079 // FIXME: Should not rely on getVPValue at this point. 4080 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4081 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4082 } 4083 4084 for (User *U : Cur->users()) { 4085 Instruction *UI = cast<Instruction>(U); 4086 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4087 Visited.insert(UI).second) 4088 Worklist.push_back(UI); 4089 } 4090 } 4091 } 4092 4093 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4094 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4095 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4096 // Some phis were already hand updated by the reduction and recurrence 4097 // code above, leave them alone. 4098 continue; 4099 4100 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4101 // Non-instruction incoming values will have only one value. 4102 4103 VPLane Lane = VPLane::getFirstLane(); 4104 if (isa<Instruction>(IncomingValue) && 4105 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4106 VF)) 4107 Lane = VPLane::getLastLaneForVF(VF); 4108 4109 // Can be a loop invariant incoming value or the last scalar value to be 4110 // extracted from the vectorized loop. 4111 // FIXME: Should not rely on getVPValue at this point. 4112 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4113 Value *lastIncomingValue = 4114 OrigLoop->isLoopInvariant(IncomingValue) 4115 ? IncomingValue 4116 : State.get(State.Plan->getVPValue(IncomingValue, true), 4117 VPIteration(UF - 1, Lane)); 4118 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4119 } 4120 } 4121 4122 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4123 // The basic block and loop containing the predicated instruction. 4124 auto *PredBB = PredInst->getParent(); 4125 auto *VectorLoop = LI->getLoopFor(PredBB); 4126 4127 // Initialize a worklist with the operands of the predicated instruction. 4128 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4129 4130 // Holds instructions that we need to analyze again. An instruction may be 4131 // reanalyzed if we don't yet know if we can sink it or not. 4132 SmallVector<Instruction *, 8> InstsToReanalyze; 4133 4134 // Returns true if a given use occurs in the predicated block. Phi nodes use 4135 // their operands in their corresponding predecessor blocks. 4136 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4137 auto *I = cast<Instruction>(U.getUser()); 4138 BasicBlock *BB = I->getParent(); 4139 if (auto *Phi = dyn_cast<PHINode>(I)) 4140 BB = Phi->getIncomingBlock( 4141 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4142 return BB == PredBB; 4143 }; 4144 4145 // Iteratively sink the scalarized operands of the predicated instruction 4146 // into the block we created for it. When an instruction is sunk, it's 4147 // operands are then added to the worklist. The algorithm ends after one pass 4148 // through the worklist doesn't sink a single instruction. 4149 bool Changed; 4150 do { 4151 // Add the instructions that need to be reanalyzed to the worklist, and 4152 // reset the changed indicator. 4153 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4154 InstsToReanalyze.clear(); 4155 Changed = false; 4156 4157 while (!Worklist.empty()) { 4158 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4159 4160 // We can't sink an instruction if it is a phi node, is not in the loop, 4161 // or may have side effects. 4162 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4163 I->mayHaveSideEffects()) 4164 continue; 4165 4166 // If the instruction is already in PredBB, check if we can sink its 4167 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4168 // sinking the scalar instruction I, hence it appears in PredBB; but it 4169 // may have failed to sink I's operands (recursively), which we try 4170 // (again) here. 4171 if (I->getParent() == PredBB) { 4172 Worklist.insert(I->op_begin(), I->op_end()); 4173 continue; 4174 } 4175 4176 // It's legal to sink the instruction if all its uses occur in the 4177 // predicated block. Otherwise, there's nothing to do yet, and we may 4178 // need to reanalyze the instruction. 4179 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4180 InstsToReanalyze.push_back(I); 4181 continue; 4182 } 4183 4184 // Move the instruction to the beginning of the predicated block, and add 4185 // it's operands to the worklist. 4186 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4187 Worklist.insert(I->op_begin(), I->op_end()); 4188 4189 // The sinking may have enabled other instructions to be sunk, so we will 4190 // need to iterate. 4191 Changed = true; 4192 } 4193 } while (Changed); 4194 } 4195 4196 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4197 for (PHINode *OrigPhi : OrigPHIsToFix) { 4198 VPWidenPHIRecipe *VPPhi = 4199 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4200 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4201 // Make sure the builder has a valid insert point. 4202 Builder.SetInsertPoint(NewPhi); 4203 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4204 VPValue *Inc = VPPhi->getIncomingValue(i); 4205 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4206 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4207 } 4208 } 4209 } 4210 4211 bool InnerLoopVectorizer::useOrderedReductions( 4212 const RecurrenceDescriptor &RdxDesc) { 4213 return Cost->useOrderedReductions(RdxDesc); 4214 } 4215 4216 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4217 VPWidenPHIRecipe *PhiR, 4218 VPTransformState &State) { 4219 PHINode *P = cast<PHINode>(PN); 4220 if (EnableVPlanNativePath) { 4221 // Currently we enter here in the VPlan-native path for non-induction 4222 // PHIs where all control flow is uniform. We simply widen these PHIs. 4223 // Create a vector phi with no operands - the vector phi operands will be 4224 // set at the end of vector code generation. 4225 Type *VecTy = (State.VF.isScalar()) 4226 ? PN->getType() 4227 : VectorType::get(PN->getType(), State.VF); 4228 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4229 State.set(PhiR, VecPhi, 0); 4230 OrigPHIsToFix.push_back(P); 4231 4232 return; 4233 } 4234 4235 assert(PN->getParent() == OrigLoop->getHeader() && 4236 "Non-header phis should have been handled elsewhere"); 4237 4238 // In order to support recurrences we need to be able to vectorize Phi nodes. 4239 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4240 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4241 // this value when we vectorize all of the instructions that use the PHI. 4242 4243 assert(!Legal->isReductionVariable(P) && 4244 "reductions should be handled elsewhere"); 4245 4246 setDebugLocFromInst(P); 4247 4248 // This PHINode must be an induction variable. 4249 // Make sure that we know about it. 4250 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4251 4252 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4253 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4254 4255 auto *IVR = PhiR->getParent()->getPlan()->getCanonicalIV(); 4256 PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0)); 4257 4258 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4259 // which can be found from the original scalar operations. 4260 switch (II.getKind()) { 4261 case InductionDescriptor::IK_NoInduction: 4262 llvm_unreachable("Unknown induction"); 4263 case InductionDescriptor::IK_IntInduction: 4264 case InductionDescriptor::IK_FpInduction: 4265 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4266 case InductionDescriptor::IK_PtrInduction: { 4267 // Handle the pointer induction variable case. 4268 assert(P->getType()->isPointerTy() && "Unexpected type."); 4269 4270 if (all_of(PhiR->users(), [PhiR](const VPUser *U) { 4271 return cast<VPRecipeBase>(U)->usesScalars(PhiR); 4272 })) { 4273 // This is the normalized GEP that starts counting at zero. 4274 Value *PtrInd = 4275 Builder.CreateSExtOrTrunc(CanonicalIV, II.getStep()->getType()); 4276 // Determine the number of scalars we need to generate for each unroll 4277 // iteration. If the instruction is uniform, we only need to generate the 4278 // first lane. Otherwise, we generate all VF values. 4279 bool IsUniform = vputils::onlyFirstLaneUsed(PhiR); 4280 assert((IsUniform || !State.VF.isScalable()) && 4281 "Cannot scalarize a scalable VF"); 4282 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 4283 4284 for (unsigned Part = 0; Part < UF; ++Part) { 4285 Value *PartStart = 4286 createStepForVF(Builder, PtrInd->getType(), VF, Part); 4287 4288 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4289 Value *Idx = Builder.CreateAdd( 4290 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 4291 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4292 4293 Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(), 4294 State.CFG.PrevBB->getTerminator()); 4295 Value *SclrGep = emitTransformedIndex(Builder, GlobalIdx, 4296 II.getStartValue(), Step, II); 4297 SclrGep->setName("next.gep"); 4298 State.set(PhiR, SclrGep, VPIteration(Part, Lane)); 4299 } 4300 } 4301 return; 4302 } 4303 assert(isa<SCEVConstant>(II.getStep()) && 4304 "Induction step not a SCEV constant!"); 4305 Type *PhiType = II.getStep()->getType(); 4306 4307 // Build a pointer phi 4308 Value *ScalarStartValue = PhiR->getStartValue()->getLiveInIRValue(); 4309 Type *ScStValueType = ScalarStartValue->getType(); 4310 PHINode *NewPointerPhi = 4311 PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV); 4312 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4313 4314 // A pointer induction, performed by using a gep 4315 BasicBlock *LoopLatch = LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch(); 4316 Instruction *InductionLoc = LoopLatch->getTerminator(); 4317 const SCEV *ScalarStep = II.getStep(); 4318 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4319 Value *ScalarStepValue = 4320 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4321 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); 4322 Value *NumUnrolledElems = 4323 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 4324 Value *InductionGEP = GetElementPtrInst::Create( 4325 II.getElementType(), NewPointerPhi, 4326 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 4327 InductionLoc); 4328 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4329 4330 // Create UF many actual address geps that use the pointer 4331 // phi as base and a vectorized version of the step value 4332 // (<step*0, ..., step*N>) as offset. 4333 for (unsigned Part = 0; Part < State.UF; ++Part) { 4334 Type *VecPhiType = VectorType::get(PhiType, State.VF); 4335 Value *StartOffsetScalar = 4336 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 4337 Value *StartOffset = 4338 Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 4339 // Create a vector of consecutive numbers from zero to VF. 4340 StartOffset = 4341 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); 4342 4343 Value *GEP = Builder.CreateGEP( 4344 II.getElementType(), NewPointerPhi, 4345 Builder.CreateMul( 4346 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), 4347 "vector.gep")); 4348 State.set(PhiR, GEP, Part); 4349 } 4350 } 4351 } 4352 } 4353 4354 /// A helper function for checking whether an integer division-related 4355 /// instruction may divide by zero (in which case it must be predicated if 4356 /// executed conditionally in the scalar code). 4357 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4358 /// Non-zero divisors that are non compile-time constants will not be 4359 /// converted into multiplication, so we will still end up scalarizing 4360 /// the division, but can do so w/o predication. 4361 static bool mayDivideByZero(Instruction &I) { 4362 assert((I.getOpcode() == Instruction::UDiv || 4363 I.getOpcode() == Instruction::SDiv || 4364 I.getOpcode() == Instruction::URem || 4365 I.getOpcode() == Instruction::SRem) && 4366 "Unexpected instruction"); 4367 Value *Divisor = I.getOperand(1); 4368 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4369 return !CInt || CInt->isZero(); 4370 } 4371 4372 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4373 VPUser &ArgOperands, 4374 VPTransformState &State) { 4375 assert(!isa<DbgInfoIntrinsic>(I) && 4376 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4377 setDebugLocFromInst(&I); 4378 4379 Module *M = I.getParent()->getParent()->getParent(); 4380 auto *CI = cast<CallInst>(&I); 4381 4382 SmallVector<Type *, 4> Tys; 4383 for (Value *ArgOperand : CI->args()) 4384 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4385 4386 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4387 4388 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4389 // version of the instruction. 4390 // Is it beneficial to perform intrinsic call compared to lib call? 4391 bool NeedToScalarize = false; 4392 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4393 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4394 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4395 assert((UseVectorIntrinsic || !NeedToScalarize) && 4396 "Instruction should be scalarized elsewhere."); 4397 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4398 "Either the intrinsic cost or vector call cost must be valid"); 4399 4400 for (unsigned Part = 0; Part < UF; ++Part) { 4401 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 4402 SmallVector<Value *, 4> Args; 4403 for (auto &I : enumerate(ArgOperands.operands())) { 4404 // Some intrinsics have a scalar argument - don't replace it with a 4405 // vector. 4406 Value *Arg; 4407 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4408 Arg = State.get(I.value(), Part); 4409 else { 4410 Arg = State.get(I.value(), VPIteration(0, 0)); 4411 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 4412 TysForDecl.push_back(Arg->getType()); 4413 } 4414 Args.push_back(Arg); 4415 } 4416 4417 Function *VectorF; 4418 if (UseVectorIntrinsic) { 4419 // Use vector version of the intrinsic. 4420 if (VF.isVector()) 4421 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4422 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4423 assert(VectorF && "Can't retrieve vector intrinsic."); 4424 } else { 4425 // Use vector version of the function call. 4426 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4427 #ifndef NDEBUG 4428 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4429 "Can't create vector function."); 4430 #endif 4431 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4432 } 4433 SmallVector<OperandBundleDef, 1> OpBundles; 4434 CI->getOperandBundlesAsDefs(OpBundles); 4435 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4436 4437 if (isa<FPMathOperator>(V)) 4438 V->copyFastMathFlags(CI); 4439 4440 State.set(Def, V, Part); 4441 addMetadata(V, &I); 4442 } 4443 } 4444 4445 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4446 // We should not collect Scalars more than once per VF. Right now, this 4447 // function is called from collectUniformsAndScalars(), which already does 4448 // this check. Collecting Scalars for VF=1 does not make any sense. 4449 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4450 "This function should not be visited twice for the same VF"); 4451 4452 // This avoids any chances of creating a REPLICATE recipe during planning 4453 // since that would result in generation of scalarized code during execution, 4454 // which is not supported for scalable vectors. 4455 if (VF.isScalable()) { 4456 Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4457 return; 4458 } 4459 4460 SmallSetVector<Instruction *, 8> Worklist; 4461 4462 // These sets are used to seed the analysis with pointers used by memory 4463 // accesses that will remain scalar. 4464 SmallSetVector<Instruction *, 8> ScalarPtrs; 4465 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4466 auto *Latch = TheLoop->getLoopLatch(); 4467 4468 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4469 // The pointer operands of loads and stores will be scalar as long as the 4470 // memory access is not a gather or scatter operation. The value operand of a 4471 // store will remain scalar if the store is scalarized. 4472 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4473 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4474 assert(WideningDecision != CM_Unknown && 4475 "Widening decision should be ready at this moment"); 4476 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4477 if (Ptr == Store->getValueOperand()) 4478 return WideningDecision == CM_Scalarize; 4479 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4480 "Ptr is neither a value or pointer operand"); 4481 return WideningDecision != CM_GatherScatter; 4482 }; 4483 4484 // A helper that returns true if the given value is a bitcast or 4485 // getelementptr instruction contained in the loop. 4486 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4487 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4488 isa<GetElementPtrInst>(V)) && 4489 !TheLoop->isLoopInvariant(V); 4490 }; 4491 4492 // A helper that evaluates a memory access's use of a pointer. If the use will 4493 // be a scalar use and the pointer is only used by memory accesses, we place 4494 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4495 // PossibleNonScalarPtrs. 4496 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4497 // We only care about bitcast and getelementptr instructions contained in 4498 // the loop. 4499 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4500 return; 4501 4502 // If the pointer has already been identified as scalar (e.g., if it was 4503 // also identified as uniform), there's nothing to do. 4504 auto *I = cast<Instruction>(Ptr); 4505 if (Worklist.count(I)) 4506 return; 4507 4508 // If the use of the pointer will be a scalar use, and all users of the 4509 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4510 // place the pointer in PossibleNonScalarPtrs. 4511 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4512 return isa<LoadInst>(U) || isa<StoreInst>(U); 4513 })) 4514 ScalarPtrs.insert(I); 4515 else 4516 PossibleNonScalarPtrs.insert(I); 4517 }; 4518 4519 // We seed the scalars analysis with three classes of instructions: (1) 4520 // instructions marked uniform-after-vectorization and (2) bitcast, 4521 // getelementptr and (pointer) phi instructions used by memory accesses 4522 // requiring a scalar use. 4523 // 4524 // (1) Add to the worklist all instructions that have been identified as 4525 // uniform-after-vectorization. 4526 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4527 4528 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4529 // memory accesses requiring a scalar use. The pointer operands of loads and 4530 // stores will be scalar as long as the memory accesses is not a gather or 4531 // scatter operation. The value operand of a store will remain scalar if the 4532 // store is scalarized. 4533 for (auto *BB : TheLoop->blocks()) 4534 for (auto &I : *BB) { 4535 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4536 evaluatePtrUse(Load, Load->getPointerOperand()); 4537 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4538 evaluatePtrUse(Store, Store->getPointerOperand()); 4539 evaluatePtrUse(Store, Store->getValueOperand()); 4540 } 4541 } 4542 for (auto *I : ScalarPtrs) 4543 if (!PossibleNonScalarPtrs.count(I)) { 4544 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4545 Worklist.insert(I); 4546 } 4547 4548 // Insert the forced scalars. 4549 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4550 // induction variable when the PHI user is scalarized. 4551 auto ForcedScalar = ForcedScalars.find(VF); 4552 if (ForcedScalar != ForcedScalars.end()) 4553 for (auto *I : ForcedScalar->second) 4554 Worklist.insert(I); 4555 4556 // Expand the worklist by looking through any bitcasts and getelementptr 4557 // instructions we've already identified as scalar. This is similar to the 4558 // expansion step in collectLoopUniforms(); however, here we're only 4559 // expanding to include additional bitcasts and getelementptr instructions. 4560 unsigned Idx = 0; 4561 while (Idx != Worklist.size()) { 4562 Instruction *Dst = Worklist[Idx++]; 4563 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4564 continue; 4565 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4566 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4567 auto *J = cast<Instruction>(U); 4568 return !TheLoop->contains(J) || Worklist.count(J) || 4569 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4570 isScalarUse(J, Src)); 4571 })) { 4572 Worklist.insert(Src); 4573 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4574 } 4575 } 4576 4577 // An induction variable will remain scalar if all users of the induction 4578 // variable and induction variable update remain scalar. 4579 for (auto &Induction : Legal->getInductionVars()) { 4580 auto *Ind = Induction.first; 4581 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4582 4583 // If tail-folding is applied, the primary induction variable will be used 4584 // to feed a vector compare. 4585 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 4586 continue; 4587 4588 // Returns true if \p Indvar is a pointer induction that is used directly by 4589 // load/store instruction \p I. 4590 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar, 4591 Instruction *I) { 4592 return Induction.second.getKind() == 4593 InductionDescriptor::IK_PtrInduction && 4594 (isa<LoadInst>(I) || isa<StoreInst>(I)) && 4595 Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar); 4596 }; 4597 4598 // Determine if all users of the induction variable are scalar after 4599 // vectorization. 4600 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4601 auto *I = cast<Instruction>(U); 4602 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4603 IsDirectLoadStoreFromPtrIndvar(Ind, I); 4604 }); 4605 if (!ScalarInd) 4606 continue; 4607 4608 // Determine if all users of the induction variable update instruction are 4609 // scalar after vectorization. 4610 auto ScalarIndUpdate = 4611 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4612 auto *I = cast<Instruction>(U); 4613 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4614 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I); 4615 }); 4616 if (!ScalarIndUpdate) 4617 continue; 4618 4619 // The induction variable and its update instruction will remain scalar. 4620 Worklist.insert(Ind); 4621 Worklist.insert(IndUpdate); 4622 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4623 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4624 << "\n"); 4625 } 4626 4627 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4628 } 4629 4630 bool LoopVectorizationCostModel::isScalarWithPredication( 4631 Instruction *I, ElementCount VF) const { 4632 if (!blockNeedsPredicationForAnyReason(I->getParent())) 4633 return false; 4634 switch(I->getOpcode()) { 4635 default: 4636 break; 4637 case Instruction::Load: 4638 case Instruction::Store: { 4639 if (!Legal->isMaskRequired(I)) 4640 return false; 4641 auto *Ptr = getLoadStorePointerOperand(I); 4642 auto *Ty = getLoadStoreType(I); 4643 Type *VTy = Ty; 4644 if (VF.isVector()) 4645 VTy = VectorType::get(Ty, VF); 4646 const Align Alignment = getLoadStoreAlignment(I); 4647 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4648 TTI.isLegalMaskedGather(VTy, Alignment)) 4649 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4650 TTI.isLegalMaskedScatter(VTy, Alignment)); 4651 } 4652 case Instruction::UDiv: 4653 case Instruction::SDiv: 4654 case Instruction::SRem: 4655 case Instruction::URem: 4656 return mayDivideByZero(*I); 4657 } 4658 return false; 4659 } 4660 4661 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 4662 Instruction *I, ElementCount VF) { 4663 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4664 assert(getWideningDecision(I, VF) == CM_Unknown && 4665 "Decision should not be set yet."); 4666 auto *Group = getInterleavedAccessGroup(I); 4667 assert(Group && "Must have a group."); 4668 4669 // If the instruction's allocated size doesn't equal it's type size, it 4670 // requires padding and will be scalarized. 4671 auto &DL = I->getModule()->getDataLayout(); 4672 auto *ScalarTy = getLoadStoreType(I); 4673 if (hasIrregularType(ScalarTy, DL)) 4674 return false; 4675 4676 // Check if masking is required. 4677 // A Group may need masking for one of two reasons: it resides in a block that 4678 // needs predication, or it was decided to use masking to deal with gaps 4679 // (either a gap at the end of a load-access that may result in a speculative 4680 // load, or any gaps in a store-access). 4681 bool PredicatedAccessRequiresMasking = 4682 blockNeedsPredicationForAnyReason(I->getParent()) && 4683 Legal->isMaskRequired(I); 4684 bool LoadAccessWithGapsRequiresEpilogMasking = 4685 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 4686 !isScalarEpilogueAllowed(); 4687 bool StoreAccessWithGapsRequiresMasking = 4688 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 4689 if (!PredicatedAccessRequiresMasking && 4690 !LoadAccessWithGapsRequiresEpilogMasking && 4691 !StoreAccessWithGapsRequiresMasking) 4692 return true; 4693 4694 // If masked interleaving is required, we expect that the user/target had 4695 // enabled it, because otherwise it either wouldn't have been created or 4696 // it should have been invalidated by the CostModel. 4697 assert(useMaskedInterleavedAccesses(TTI) && 4698 "Masked interleave-groups for predicated accesses are not enabled."); 4699 4700 if (Group->isReverse()) 4701 return false; 4702 4703 auto *Ty = getLoadStoreType(I); 4704 const Align Alignment = getLoadStoreAlignment(I); 4705 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 4706 : TTI.isLegalMaskedStore(Ty, Alignment); 4707 } 4708 4709 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 4710 Instruction *I, ElementCount VF) { 4711 // Get and ensure we have a valid memory instruction. 4712 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 4713 4714 auto *Ptr = getLoadStorePointerOperand(I); 4715 auto *ScalarTy = getLoadStoreType(I); 4716 4717 // In order to be widened, the pointer should be consecutive, first of all. 4718 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 4719 return false; 4720 4721 // If the instruction is a store located in a predicated block, it will be 4722 // scalarized. 4723 if (isScalarWithPredication(I, VF)) 4724 return false; 4725 4726 // If the instruction's allocated size doesn't equal it's type size, it 4727 // requires padding and will be scalarized. 4728 auto &DL = I->getModule()->getDataLayout(); 4729 if (hasIrregularType(ScalarTy, DL)) 4730 return false; 4731 4732 return true; 4733 } 4734 4735 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 4736 // We should not collect Uniforms more than once per VF. Right now, 4737 // this function is called from collectUniformsAndScalars(), which 4738 // already does this check. Collecting Uniforms for VF=1 does not make any 4739 // sense. 4740 4741 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 4742 "This function should not be visited twice for the same VF"); 4743 4744 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4745 // not analyze again. Uniforms.count(VF) will return 1. 4746 Uniforms[VF].clear(); 4747 4748 // We now know that the loop is vectorizable! 4749 // Collect instructions inside the loop that will remain uniform after 4750 // vectorization. 4751 4752 // Global values, params and instructions outside of current loop are out of 4753 // scope. 4754 auto isOutOfScope = [&](Value *V) -> bool { 4755 Instruction *I = dyn_cast<Instruction>(V); 4756 return (!I || !TheLoop->contains(I)); 4757 }; 4758 4759 // Worklist containing uniform instructions demanding lane 0. 4760 SetVector<Instruction *> Worklist; 4761 BasicBlock *Latch = TheLoop->getLoopLatch(); 4762 4763 // Add uniform instructions demanding lane 0 to the worklist. Instructions 4764 // that are scalar with predication must not be considered uniform after 4765 // vectorization, because that would create an erroneous replicating region 4766 // where only a single instance out of VF should be formed. 4767 // TODO: optimize such seldom cases if found important, see PR40816. 4768 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 4769 if (isOutOfScope(I)) { 4770 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 4771 << *I << "\n"); 4772 return; 4773 } 4774 if (isScalarWithPredication(I, VF)) { 4775 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 4776 << *I << "\n"); 4777 return; 4778 } 4779 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 4780 Worklist.insert(I); 4781 }; 4782 4783 // Start with the conditional branch. If the branch condition is an 4784 // instruction contained in the loop that is only used by the branch, it is 4785 // uniform. 4786 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4787 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 4788 addToWorklistIfAllowed(Cmp); 4789 4790 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 4791 InstWidening WideningDecision = getWideningDecision(I, VF); 4792 assert(WideningDecision != CM_Unknown && 4793 "Widening decision should be ready at this moment"); 4794 4795 // A uniform memory op is itself uniform. We exclude uniform stores 4796 // here as they demand the last lane, not the first one. 4797 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 4798 assert(WideningDecision == CM_Scalarize); 4799 return true; 4800 } 4801 4802 return (WideningDecision == CM_Widen || 4803 WideningDecision == CM_Widen_Reverse || 4804 WideningDecision == CM_Interleave); 4805 }; 4806 4807 4808 // Returns true if Ptr is the pointer operand of a memory access instruction 4809 // I, and I is known to not require scalarization. 4810 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4811 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4812 }; 4813 4814 // Holds a list of values which are known to have at least one uniform use. 4815 // Note that there may be other uses which aren't uniform. A "uniform use" 4816 // here is something which only demands lane 0 of the unrolled iterations; 4817 // it does not imply that all lanes produce the same value (e.g. this is not 4818 // the usual meaning of uniform) 4819 SetVector<Value *> HasUniformUse; 4820 4821 // Scan the loop for instructions which are either a) known to have only 4822 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 4823 for (auto *BB : TheLoop->blocks()) 4824 for (auto &I : *BB) { 4825 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 4826 switch (II->getIntrinsicID()) { 4827 case Intrinsic::sideeffect: 4828 case Intrinsic::experimental_noalias_scope_decl: 4829 case Intrinsic::assume: 4830 case Intrinsic::lifetime_start: 4831 case Intrinsic::lifetime_end: 4832 if (TheLoop->hasLoopInvariantOperands(&I)) 4833 addToWorklistIfAllowed(&I); 4834 break; 4835 default: 4836 break; 4837 } 4838 } 4839 4840 // ExtractValue instructions must be uniform, because the operands are 4841 // known to be loop-invariant. 4842 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 4843 assert(isOutOfScope(EVI->getAggregateOperand()) && 4844 "Expected aggregate value to be loop invariant"); 4845 addToWorklistIfAllowed(EVI); 4846 continue; 4847 } 4848 4849 // If there's no pointer operand, there's nothing to do. 4850 auto *Ptr = getLoadStorePointerOperand(&I); 4851 if (!Ptr) 4852 continue; 4853 4854 // A uniform memory op is itself uniform. We exclude uniform stores 4855 // here as they demand the last lane, not the first one. 4856 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 4857 addToWorklistIfAllowed(&I); 4858 4859 if (isUniformDecision(&I, VF)) { 4860 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 4861 HasUniformUse.insert(Ptr); 4862 } 4863 } 4864 4865 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 4866 // demanding) users. Since loops are assumed to be in LCSSA form, this 4867 // disallows uses outside the loop as well. 4868 for (auto *V : HasUniformUse) { 4869 if (isOutOfScope(V)) 4870 continue; 4871 auto *I = cast<Instruction>(V); 4872 auto UsersAreMemAccesses = 4873 llvm::all_of(I->users(), [&](User *U) -> bool { 4874 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 4875 }); 4876 if (UsersAreMemAccesses) 4877 addToWorklistIfAllowed(I); 4878 } 4879 4880 // Expand Worklist in topological order: whenever a new instruction 4881 // is added , its users should be already inside Worklist. It ensures 4882 // a uniform instruction will only be used by uniform instructions. 4883 unsigned idx = 0; 4884 while (idx != Worklist.size()) { 4885 Instruction *I = Worklist[idx++]; 4886 4887 for (auto OV : I->operand_values()) { 4888 // isOutOfScope operands cannot be uniform instructions. 4889 if (isOutOfScope(OV)) 4890 continue; 4891 // First order recurrence Phi's should typically be considered 4892 // non-uniform. 4893 auto *OP = dyn_cast<PHINode>(OV); 4894 if (OP && Legal->isFirstOrderRecurrence(OP)) 4895 continue; 4896 // If all the users of the operand are uniform, then add the 4897 // operand into the uniform worklist. 4898 auto *OI = cast<Instruction>(OV); 4899 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4900 auto *J = cast<Instruction>(U); 4901 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 4902 })) 4903 addToWorklistIfAllowed(OI); 4904 } 4905 } 4906 4907 // For an instruction to be added into Worklist above, all its users inside 4908 // the loop should also be in Worklist. However, this condition cannot be 4909 // true for phi nodes that form a cyclic dependence. We must process phi 4910 // nodes separately. An induction variable will remain uniform if all users 4911 // of the induction variable and induction variable update remain uniform. 4912 // The code below handles both pointer and non-pointer induction variables. 4913 for (auto &Induction : Legal->getInductionVars()) { 4914 auto *Ind = Induction.first; 4915 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4916 4917 // Determine if all users of the induction variable are uniform after 4918 // vectorization. 4919 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4920 auto *I = cast<Instruction>(U); 4921 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4922 isVectorizedMemAccessUse(I, Ind); 4923 }); 4924 if (!UniformInd) 4925 continue; 4926 4927 // Determine if all users of the induction variable update instruction are 4928 // uniform after vectorization. 4929 auto UniformIndUpdate = 4930 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4931 auto *I = cast<Instruction>(U); 4932 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4933 isVectorizedMemAccessUse(I, IndUpdate); 4934 }); 4935 if (!UniformIndUpdate) 4936 continue; 4937 4938 // The induction variable and its update instruction will remain uniform. 4939 addToWorklistIfAllowed(Ind); 4940 addToWorklistIfAllowed(IndUpdate); 4941 } 4942 4943 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4944 } 4945 4946 bool LoopVectorizationCostModel::runtimeChecksRequired() { 4947 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 4948 4949 if (Legal->getRuntimePointerChecking()->Need) { 4950 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 4951 "runtime pointer checks needed. Enable vectorization of this " 4952 "loop with '#pragma clang loop vectorize(enable)' when " 4953 "compiling with -Os/-Oz", 4954 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4955 return true; 4956 } 4957 4958 if (!PSE.getPredicate().isAlwaysTrue()) { 4959 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 4960 "runtime SCEV checks needed. Enable vectorization of this " 4961 "loop with '#pragma clang loop vectorize(enable)' when " 4962 "compiling with -Os/-Oz", 4963 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4964 return true; 4965 } 4966 4967 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4968 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4969 reportVectorizationFailure("Runtime stride check for small trip count", 4970 "runtime stride == 1 checks needed. Enable vectorization of " 4971 "this loop without such check by compiling with -Os/-Oz", 4972 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4973 return true; 4974 } 4975 4976 return false; 4977 } 4978 4979 ElementCount 4980 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 4981 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 4982 return ElementCount::getScalable(0); 4983 4984 if (Hints->isScalableVectorizationDisabled()) { 4985 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 4986 "ScalableVectorizationDisabled", ORE, TheLoop); 4987 return ElementCount::getScalable(0); 4988 } 4989 4990 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 4991 4992 auto MaxScalableVF = ElementCount::getScalable( 4993 std::numeric_limits<ElementCount::ScalarTy>::max()); 4994 4995 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 4996 // FIXME: While for scalable vectors this is currently sufficient, this should 4997 // be replaced by a more detailed mechanism that filters out specific VFs, 4998 // instead of invalidating vectorization for a whole set of VFs based on the 4999 // MaxVF. 5000 5001 // Disable scalable vectorization if the loop contains unsupported reductions. 5002 if (!canVectorizeReductions(MaxScalableVF)) { 5003 reportVectorizationInfo( 5004 "Scalable vectorization not supported for the reduction " 5005 "operations found in this loop.", 5006 "ScalableVFUnfeasible", ORE, TheLoop); 5007 return ElementCount::getScalable(0); 5008 } 5009 5010 // Disable scalable vectorization if the loop contains any instructions 5011 // with element types not supported for scalable vectors. 5012 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 5013 return !Ty->isVoidTy() && 5014 !this->TTI.isElementTypeLegalForScalableVector(Ty); 5015 })) { 5016 reportVectorizationInfo("Scalable vectorization is not supported " 5017 "for all element types found in this loop.", 5018 "ScalableVFUnfeasible", ORE, TheLoop); 5019 return ElementCount::getScalable(0); 5020 } 5021 5022 if (Legal->isSafeForAnyVectorWidth()) 5023 return MaxScalableVF; 5024 5025 // Limit MaxScalableVF by the maximum safe dependence distance. 5026 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5027 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) 5028 MaxVScale = 5029 TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax(); 5030 MaxScalableVF = ElementCount::getScalable( 5031 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5032 if (!MaxScalableVF) 5033 reportVectorizationInfo( 5034 "Max legal vector width too small, scalable vectorization " 5035 "unfeasible.", 5036 "ScalableVFUnfeasible", ORE, TheLoop); 5037 5038 return MaxScalableVF; 5039 } 5040 5041 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF( 5042 unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) { 5043 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5044 unsigned SmallestType, WidestType; 5045 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5046 5047 // Get the maximum safe dependence distance in bits computed by LAA. 5048 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5049 // the memory accesses that is most restrictive (involved in the smallest 5050 // dependence distance). 5051 unsigned MaxSafeElements = 5052 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 5053 5054 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 5055 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 5056 5057 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 5058 << ".\n"); 5059 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 5060 << ".\n"); 5061 5062 // First analyze the UserVF, fall back if the UserVF should be ignored. 5063 if (UserVF) { 5064 auto MaxSafeUserVF = 5065 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 5066 5067 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 5068 // If `VF=vscale x N` is safe, then so is `VF=N` 5069 if (UserVF.isScalable()) 5070 return FixedScalableVFPair( 5071 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 5072 else 5073 return UserVF; 5074 } 5075 5076 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 5077 5078 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 5079 // is better to ignore the hint and let the compiler choose a suitable VF. 5080 if (!UserVF.isScalable()) { 5081 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5082 << " is unsafe, clamping to max safe VF=" 5083 << MaxSafeFixedVF << ".\n"); 5084 ORE->emit([&]() { 5085 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5086 TheLoop->getStartLoc(), 5087 TheLoop->getHeader()) 5088 << "User-specified vectorization factor " 5089 << ore::NV("UserVectorizationFactor", UserVF) 5090 << " is unsafe, clamping to maximum safe vectorization factor " 5091 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 5092 }); 5093 return MaxSafeFixedVF; 5094 } 5095 5096 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 5097 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5098 << " is ignored because scalable vectors are not " 5099 "available.\n"); 5100 ORE->emit([&]() { 5101 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5102 TheLoop->getStartLoc(), 5103 TheLoop->getHeader()) 5104 << "User-specified vectorization factor " 5105 << ore::NV("UserVectorizationFactor", UserVF) 5106 << " is ignored because the target does not support scalable " 5107 "vectors. The compiler will pick a more suitable value."; 5108 }); 5109 } else { 5110 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5111 << " is unsafe. Ignoring scalable UserVF.\n"); 5112 ORE->emit([&]() { 5113 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5114 TheLoop->getStartLoc(), 5115 TheLoop->getHeader()) 5116 << "User-specified vectorization factor " 5117 << ore::NV("UserVectorizationFactor", UserVF) 5118 << " is unsafe. Ignoring the hint to let the compiler pick a " 5119 "more suitable value."; 5120 }); 5121 } 5122 } 5123 5124 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5125 << " / " << WidestType << " bits.\n"); 5126 5127 FixedScalableVFPair Result(ElementCount::getFixed(1), 5128 ElementCount::getScalable(0)); 5129 if (auto MaxVF = 5130 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 5131 MaxSafeFixedVF, FoldTailByMasking)) 5132 Result.FixedVF = MaxVF; 5133 5134 if (auto MaxVF = 5135 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 5136 MaxSafeScalableVF, FoldTailByMasking)) 5137 if (MaxVF.isScalable()) { 5138 Result.ScalableVF = MaxVF; 5139 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5140 << "\n"); 5141 } 5142 5143 return Result; 5144 } 5145 5146 FixedScalableVFPair 5147 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5148 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5149 // TODO: It may by useful to do since it's still likely to be dynamically 5150 // uniform if the target can skip. 5151 reportVectorizationFailure( 5152 "Not inserting runtime ptr check for divergent target", 5153 "runtime pointer checks needed. Not enabled for divergent target", 5154 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5155 return FixedScalableVFPair::getNone(); 5156 } 5157 5158 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5159 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5160 if (TC == 1) { 5161 reportVectorizationFailure("Single iteration (non) loop", 5162 "loop trip count is one, irrelevant for vectorization", 5163 "SingleIterationLoop", ORE, TheLoop); 5164 return FixedScalableVFPair::getNone(); 5165 } 5166 5167 switch (ScalarEpilogueStatus) { 5168 case CM_ScalarEpilogueAllowed: 5169 return computeFeasibleMaxVF(TC, UserVF, false); 5170 case CM_ScalarEpilogueNotAllowedUsePredicate: 5171 LLVM_FALLTHROUGH; 5172 case CM_ScalarEpilogueNotNeededUsePredicate: 5173 LLVM_DEBUG( 5174 dbgs() << "LV: vector predicate hint/switch found.\n" 5175 << "LV: Not allowing scalar epilogue, creating predicated " 5176 << "vector loop.\n"); 5177 break; 5178 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5179 // fallthrough as a special case of OptForSize 5180 case CM_ScalarEpilogueNotAllowedOptSize: 5181 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5182 LLVM_DEBUG( 5183 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5184 else 5185 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5186 << "count.\n"); 5187 5188 // Bail if runtime checks are required, which are not good when optimising 5189 // for size. 5190 if (runtimeChecksRequired()) 5191 return FixedScalableVFPair::getNone(); 5192 5193 break; 5194 } 5195 5196 // The only loops we can vectorize without a scalar epilogue, are loops with 5197 // a bottom-test and a single exiting block. We'd have to handle the fact 5198 // that not every instruction executes on the last iteration. This will 5199 // require a lane mask which varies through the vector loop body. (TODO) 5200 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5201 // If there was a tail-folding hint/switch, but we can't fold the tail by 5202 // masking, fallback to a vectorization with a scalar epilogue. 5203 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5204 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5205 "scalar epilogue instead.\n"); 5206 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5207 return computeFeasibleMaxVF(TC, UserVF, false); 5208 } 5209 return FixedScalableVFPair::getNone(); 5210 } 5211 5212 // Now try the tail folding 5213 5214 // Invalidate interleave groups that require an epilogue if we can't mask 5215 // the interleave-group. 5216 if (!useMaskedInterleavedAccesses(TTI)) { 5217 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5218 "No decisions should have been taken at this point"); 5219 // Note: There is no need to invalidate any cost modeling decisions here, as 5220 // non where taken so far. 5221 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5222 } 5223 5224 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true); 5225 // Avoid tail folding if the trip count is known to be a multiple of any VF 5226 // we chose. 5227 // FIXME: The condition below pessimises the case for fixed-width vectors, 5228 // when scalable VFs are also candidates for vectorization. 5229 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5230 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5231 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5232 "MaxFixedVF must be a power of 2"); 5233 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5234 : MaxFixedVF.getFixedValue(); 5235 ScalarEvolution *SE = PSE.getSE(); 5236 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5237 const SCEV *ExitCount = SE->getAddExpr( 5238 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5239 const SCEV *Rem = SE->getURemExpr( 5240 SE->applyLoopGuards(ExitCount, TheLoop), 5241 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5242 if (Rem->isZero()) { 5243 // Accept MaxFixedVF if we do not have a tail. 5244 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5245 return MaxFactors; 5246 } 5247 } 5248 5249 // For scalable vectors don't use tail folding for low trip counts or 5250 // optimizing for code size. We only permit this if the user has explicitly 5251 // requested it. 5252 if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate && 5253 ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate && 5254 MaxFactors.ScalableVF.isVector()) 5255 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5256 5257 // If we don't know the precise trip count, or if the trip count that we 5258 // found modulo the vectorization factor is not zero, try to fold the tail 5259 // by masking. 5260 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5261 if (Legal->prepareToFoldTailByMasking()) { 5262 FoldTailByMasking = true; 5263 return MaxFactors; 5264 } 5265 5266 // If there was a tail-folding hint/switch, but we can't fold the tail by 5267 // masking, fallback to a vectorization with a scalar epilogue. 5268 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5269 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5270 "scalar epilogue instead.\n"); 5271 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5272 return MaxFactors; 5273 } 5274 5275 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5276 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5277 return FixedScalableVFPair::getNone(); 5278 } 5279 5280 if (TC == 0) { 5281 reportVectorizationFailure( 5282 "Unable to calculate the loop count due to complex control flow", 5283 "unable to calculate the loop count due to complex control flow", 5284 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5285 return FixedScalableVFPair::getNone(); 5286 } 5287 5288 reportVectorizationFailure( 5289 "Cannot optimize for size and vectorize at the same time.", 5290 "cannot optimize for size and vectorize at the same time. " 5291 "Enable vectorization of this loop with '#pragma clang loop " 5292 "vectorize(enable)' when compiling with -Os/-Oz", 5293 "NoTailLoopWithOptForSize", ORE, TheLoop); 5294 return FixedScalableVFPair::getNone(); 5295 } 5296 5297 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5298 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5299 const ElementCount &MaxSafeVF, bool FoldTailByMasking) { 5300 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5301 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5302 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5303 : TargetTransformInfo::RGK_FixedWidthVector); 5304 5305 // Convenience function to return the minimum of two ElementCounts. 5306 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5307 assert((LHS.isScalable() == RHS.isScalable()) && 5308 "Scalable flags must match"); 5309 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5310 }; 5311 5312 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5313 // Note that both WidestRegister and WidestType may not be a powers of 2. 5314 auto MaxVectorElementCount = ElementCount::get( 5315 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5316 ComputeScalableMaxVF); 5317 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5318 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5319 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5320 5321 if (!MaxVectorElementCount) { 5322 LLVM_DEBUG(dbgs() << "LV: The target has no " 5323 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5324 << " vector registers.\n"); 5325 return ElementCount::getFixed(1); 5326 } 5327 5328 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5329 if (ConstTripCount && 5330 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5331 (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) { 5332 // If loop trip count (TC) is known at compile time there is no point in 5333 // choosing VF greater than TC (as done in the loop below). Select maximum 5334 // power of two which doesn't exceed TC. 5335 // If MaxVectorElementCount is scalable, we only fall back on a fixed VF 5336 // when the TC is less than or equal to the known number of lanes. 5337 auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount); 5338 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not " 5339 "exceeding the constant trip count: " 5340 << ClampedConstTripCount << "\n"); 5341 return ElementCount::getFixed(ClampedConstTripCount); 5342 } 5343 5344 ElementCount MaxVF = MaxVectorElementCount; 5345 if (TTI.shouldMaximizeVectorBandwidth() || 5346 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5347 auto MaxVectorElementCountMaxBW = ElementCount::get( 5348 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5349 ComputeScalableMaxVF); 5350 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5351 5352 // Collect all viable vectorization factors larger than the default MaxVF 5353 // (i.e. MaxVectorElementCount). 5354 SmallVector<ElementCount, 8> VFs; 5355 for (ElementCount VS = MaxVectorElementCount * 2; 5356 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5357 VFs.push_back(VS); 5358 5359 // For each VF calculate its register usage. 5360 auto RUs = calculateRegisterUsage(VFs); 5361 5362 // Select the largest VF which doesn't require more registers than existing 5363 // ones. 5364 for (int i = RUs.size() - 1; i >= 0; --i) { 5365 bool Selected = true; 5366 for (auto &pair : RUs[i].MaxLocalUsers) { 5367 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5368 if (pair.second > TargetNumRegisters) 5369 Selected = false; 5370 } 5371 if (Selected) { 5372 MaxVF = VFs[i]; 5373 break; 5374 } 5375 } 5376 if (ElementCount MinVF = 5377 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5378 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5379 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5380 << ") with target's minimum: " << MinVF << '\n'); 5381 MaxVF = MinVF; 5382 } 5383 } 5384 } 5385 return MaxVF; 5386 } 5387 5388 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const { 5389 if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) { 5390 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange); 5391 auto Min = Attr.getVScaleRangeMin(); 5392 auto Max = Attr.getVScaleRangeMax(); 5393 if (Max && Min == Max) 5394 return Max; 5395 } 5396 5397 return TTI.getVScaleForTuning(); 5398 } 5399 5400 bool LoopVectorizationCostModel::isMoreProfitable( 5401 const VectorizationFactor &A, const VectorizationFactor &B) const { 5402 InstructionCost CostA = A.Cost; 5403 InstructionCost CostB = B.Cost; 5404 5405 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 5406 5407 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 5408 MaxTripCount) { 5409 // If we are folding the tail and the trip count is a known (possibly small) 5410 // constant, the trip count will be rounded up to an integer number of 5411 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 5412 // which we compare directly. When not folding the tail, the total cost will 5413 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 5414 // approximated with the per-lane cost below instead of using the tripcount 5415 // as here. 5416 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 5417 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 5418 return RTCostA < RTCostB; 5419 } 5420 5421 // Improve estimate for the vector width if it is scalable. 5422 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 5423 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 5424 if (Optional<unsigned> VScale = getVScaleForTuning()) { 5425 if (A.Width.isScalable()) 5426 EstimatedWidthA *= VScale.getValue(); 5427 if (B.Width.isScalable()) 5428 EstimatedWidthB *= VScale.getValue(); 5429 } 5430 5431 // Assume vscale may be larger than 1 (or the value being tuned for), 5432 // so that scalable vectorization is slightly favorable over fixed-width 5433 // vectorization. 5434 if (A.Width.isScalable() && !B.Width.isScalable()) 5435 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 5436 5437 // To avoid the need for FP division: 5438 // (CostA / A.Width) < (CostB / B.Width) 5439 // <=> (CostA * B.Width) < (CostB * A.Width) 5440 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 5441 } 5442 5443 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 5444 const ElementCountSet &VFCandidates) { 5445 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5446 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5447 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5448 assert(VFCandidates.count(ElementCount::getFixed(1)) && 5449 "Expected Scalar VF to be a candidate"); 5450 5451 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 5452 VectorizationFactor ChosenFactor = ScalarCost; 5453 5454 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5455 if (ForceVectorization && VFCandidates.size() > 1) { 5456 // Ignore scalar width, because the user explicitly wants vectorization. 5457 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5458 // evaluation. 5459 ChosenFactor.Cost = InstructionCost::getMax(); 5460 } 5461 5462 SmallVector<InstructionVFPair> InvalidCosts; 5463 for (const auto &i : VFCandidates) { 5464 // The cost for scalar VF=1 is already calculated, so ignore it. 5465 if (i.isScalar()) 5466 continue; 5467 5468 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 5469 VectorizationFactor Candidate(i, C.first); 5470 5471 #ifndef NDEBUG 5472 unsigned AssumedMinimumVscale = 1; 5473 if (Optional<unsigned> VScale = getVScaleForTuning()) 5474 AssumedMinimumVscale = VScale.getValue(); 5475 unsigned Width = 5476 Candidate.Width.isScalable() 5477 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 5478 : Candidate.Width.getFixedValue(); 5479 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5480 << " costs: " << (Candidate.Cost / Width)); 5481 if (i.isScalable()) 5482 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 5483 << AssumedMinimumVscale << ")"); 5484 LLVM_DEBUG(dbgs() << ".\n"); 5485 #endif 5486 5487 if (!C.second && !ForceVectorization) { 5488 LLVM_DEBUG( 5489 dbgs() << "LV: Not considering vector loop of width " << i 5490 << " because it will not generate any vector instructions.\n"); 5491 continue; 5492 } 5493 5494 // If profitable add it to ProfitableVF list. 5495 if (isMoreProfitable(Candidate, ScalarCost)) 5496 ProfitableVFs.push_back(Candidate); 5497 5498 if (isMoreProfitable(Candidate, ChosenFactor)) 5499 ChosenFactor = Candidate; 5500 } 5501 5502 // Emit a report of VFs with invalid costs in the loop. 5503 if (!InvalidCosts.empty()) { 5504 // Group the remarks per instruction, keeping the instruction order from 5505 // InvalidCosts. 5506 std::map<Instruction *, unsigned> Numbering; 5507 unsigned I = 0; 5508 for (auto &Pair : InvalidCosts) 5509 if (!Numbering.count(Pair.first)) 5510 Numbering[Pair.first] = I++; 5511 5512 // Sort the list, first on instruction(number) then on VF. 5513 llvm::sort(InvalidCosts, 5514 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 5515 if (Numbering[A.first] != Numbering[B.first]) 5516 return Numbering[A.first] < Numbering[B.first]; 5517 ElementCountComparator ECC; 5518 return ECC(A.second, B.second); 5519 }); 5520 5521 // For a list of ordered instruction-vf pairs: 5522 // [(load, vf1), (load, vf2), (store, vf1)] 5523 // Group the instructions together to emit separate remarks for: 5524 // load (vf1, vf2) 5525 // store (vf1) 5526 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 5527 auto Subset = ArrayRef<InstructionVFPair>(); 5528 do { 5529 if (Subset.empty()) 5530 Subset = Tail.take_front(1); 5531 5532 Instruction *I = Subset.front().first; 5533 5534 // If the next instruction is different, or if there are no other pairs, 5535 // emit a remark for the collated subset. e.g. 5536 // [(load, vf1), (load, vf2))] 5537 // to emit: 5538 // remark: invalid costs for 'load' at VF=(vf, vf2) 5539 if (Subset == Tail || Tail[Subset.size()].first != I) { 5540 std::string OutString; 5541 raw_string_ostream OS(OutString); 5542 assert(!Subset.empty() && "Unexpected empty range"); 5543 OS << "Instruction with invalid costs prevented vectorization at VF=("; 5544 for (auto &Pair : Subset) 5545 OS << (Pair.second == Subset.front().second ? "" : ", ") 5546 << Pair.second; 5547 OS << "):"; 5548 if (auto *CI = dyn_cast<CallInst>(I)) 5549 OS << " call to " << CI->getCalledFunction()->getName(); 5550 else 5551 OS << " " << I->getOpcodeName(); 5552 OS.flush(); 5553 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 5554 Tail = Tail.drop_front(Subset.size()); 5555 Subset = {}; 5556 } else 5557 // Grow the subset by one element 5558 Subset = Tail.take_front(Subset.size() + 1); 5559 } while (!Tail.empty()); 5560 } 5561 5562 if (!EnableCondStoresVectorization && NumPredStores) { 5563 reportVectorizationFailure("There are conditional stores.", 5564 "store that is conditionally executed prevents vectorization", 5565 "ConditionalStore", ORE, TheLoop); 5566 ChosenFactor = ScalarCost; 5567 } 5568 5569 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 5570 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 5571 << "LV: Vectorization seems to be not beneficial, " 5572 << "but was forced by a user.\n"); 5573 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 5574 return ChosenFactor; 5575 } 5576 5577 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5578 const Loop &L, ElementCount VF) const { 5579 // Cross iteration phis such as reductions need special handling and are 5580 // currently unsupported. 5581 if (any_of(L.getHeader()->phis(), 5582 [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); })) 5583 return false; 5584 5585 // Phis with uses outside of the loop require special handling and are 5586 // currently unsupported. 5587 for (auto &Entry : Legal->getInductionVars()) { 5588 // Look for uses of the value of the induction at the last iteration. 5589 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5590 for (User *U : PostInc->users()) 5591 if (!L.contains(cast<Instruction>(U))) 5592 return false; 5593 // Look for uses of penultimate value of the induction. 5594 for (User *U : Entry.first->users()) 5595 if (!L.contains(cast<Instruction>(U))) 5596 return false; 5597 } 5598 5599 // Induction variables that are widened require special handling that is 5600 // currently not supported. 5601 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5602 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5603 this->isProfitableToScalarize(Entry.first, VF)); 5604 })) 5605 return false; 5606 5607 // Epilogue vectorization code has not been auditted to ensure it handles 5608 // non-latch exits properly. It may be fine, but it needs auditted and 5609 // tested. 5610 if (L.getExitingBlock() != L.getLoopLatch()) 5611 return false; 5612 5613 return true; 5614 } 5615 5616 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5617 const ElementCount VF) const { 5618 // FIXME: We need a much better cost-model to take different parameters such 5619 // as register pressure, code size increase and cost of extra branches into 5620 // account. For now we apply a very crude heuristic and only consider loops 5621 // with vectorization factors larger than a certain value. 5622 // We also consider epilogue vectorization unprofitable for targets that don't 5623 // consider interleaving beneficial (eg. MVE). 5624 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5625 return false; 5626 // FIXME: We should consider changing the threshold for scalable 5627 // vectors to take VScaleForTuning into account. 5628 if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF) 5629 return true; 5630 return false; 5631 } 5632 5633 VectorizationFactor 5634 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5635 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5636 VectorizationFactor Result = VectorizationFactor::Disabled(); 5637 if (!EnableEpilogueVectorization) { 5638 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5639 return Result; 5640 } 5641 5642 if (!isScalarEpilogueAllowed()) { 5643 LLVM_DEBUG( 5644 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5645 "allowed.\n";); 5646 return Result; 5647 } 5648 5649 // Not really a cost consideration, but check for unsupported cases here to 5650 // simplify the logic. 5651 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5652 LLVM_DEBUG( 5653 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5654 "not a supported candidate.\n";); 5655 return Result; 5656 } 5657 5658 if (EpilogueVectorizationForceVF > 1) { 5659 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5660 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 5661 if (LVP.hasPlanWithVF(ForcedEC)) 5662 return {ForcedEC, 0}; 5663 else { 5664 LLVM_DEBUG( 5665 dbgs() 5666 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5667 return Result; 5668 } 5669 } 5670 5671 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5672 TheLoop->getHeader()->getParent()->hasMinSize()) { 5673 LLVM_DEBUG( 5674 dbgs() 5675 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5676 return Result; 5677 } 5678 5679 if (!isEpilogueVectorizationProfitable(MainLoopVF)) { 5680 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 5681 "this loop\n"); 5682 return Result; 5683 } 5684 5685 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know 5686 // the main loop handles 8 lanes per iteration. We could still benefit from 5687 // vectorizing the epilogue loop with VF=4. 5688 ElementCount EstimatedRuntimeVF = MainLoopVF; 5689 if (MainLoopVF.isScalable()) { 5690 EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 5691 if (Optional<unsigned> VScale = getVScaleForTuning()) 5692 EstimatedRuntimeVF *= VScale.getValue(); 5693 } 5694 5695 for (auto &NextVF : ProfitableVFs) 5696 if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() && 5697 ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) || 5698 ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) && 5699 (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) && 5700 LVP.hasPlanWithVF(NextVF.Width)) 5701 Result = NextVF; 5702 5703 if (Result != VectorizationFactor::Disabled()) 5704 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5705 << Result.Width << "\n";); 5706 return Result; 5707 } 5708 5709 std::pair<unsigned, unsigned> 5710 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5711 unsigned MinWidth = -1U; 5712 unsigned MaxWidth = 8; 5713 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5714 // For in-loop reductions, no element types are added to ElementTypesInLoop 5715 // if there are no loads/stores in the loop. In this case, check through the 5716 // reduction variables to determine the maximum width. 5717 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) { 5718 // Reset MaxWidth so that we can find the smallest type used by recurrences 5719 // in the loop. 5720 MaxWidth = -1U; 5721 for (auto &PhiDescriptorPair : Legal->getReductionVars()) { 5722 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second; 5723 // When finding the min width used by the recurrence we need to account 5724 // for casts on the input operands of the recurrence. 5725 MaxWidth = std::min<unsigned>( 5726 MaxWidth, std::min<unsigned>( 5727 RdxDesc.getMinWidthCastToRecurrenceTypeInBits(), 5728 RdxDesc.getRecurrenceType()->getScalarSizeInBits())); 5729 } 5730 } else { 5731 for (Type *T : ElementTypesInLoop) { 5732 MinWidth = std::min<unsigned>( 5733 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5734 MaxWidth = std::max<unsigned>( 5735 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5736 } 5737 } 5738 return {MinWidth, MaxWidth}; 5739 } 5740 5741 void LoopVectorizationCostModel::collectElementTypesForWidening() { 5742 ElementTypesInLoop.clear(); 5743 // For each block. 5744 for (BasicBlock *BB : TheLoop->blocks()) { 5745 // For each instruction in the loop. 5746 for (Instruction &I : BB->instructionsWithoutDebug()) { 5747 Type *T = I.getType(); 5748 5749 // Skip ignored values. 5750 if (ValuesToIgnore.count(&I)) 5751 continue; 5752 5753 // Only examine Loads, Stores and PHINodes. 5754 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5755 continue; 5756 5757 // Examine PHI nodes that are reduction variables. Update the type to 5758 // account for the recurrence type. 5759 if (auto *PN = dyn_cast<PHINode>(&I)) { 5760 if (!Legal->isReductionVariable(PN)) 5761 continue; 5762 const RecurrenceDescriptor &RdxDesc = 5763 Legal->getReductionVars().find(PN)->second; 5764 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 5765 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 5766 RdxDesc.getRecurrenceType(), 5767 TargetTransformInfo::ReductionFlags())) 5768 continue; 5769 T = RdxDesc.getRecurrenceType(); 5770 } 5771 5772 // Examine the stored values. 5773 if (auto *ST = dyn_cast<StoreInst>(&I)) 5774 T = ST->getValueOperand()->getType(); 5775 5776 assert(T->isSized() && 5777 "Expected the load/store/recurrence type to be sized"); 5778 5779 ElementTypesInLoop.insert(T); 5780 } 5781 } 5782 } 5783 5784 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 5785 unsigned LoopCost) { 5786 // -- The interleave heuristics -- 5787 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5788 // There are many micro-architectural considerations that we can't predict 5789 // at this level. For example, frontend pressure (on decode or fetch) due to 5790 // code size, or the number and capabilities of the execution ports. 5791 // 5792 // We use the following heuristics to select the interleave count: 5793 // 1. If the code has reductions, then we interleave to break the cross 5794 // iteration dependency. 5795 // 2. If the loop is really small, then we interleave to reduce the loop 5796 // overhead. 5797 // 3. We don't interleave if we think that we will spill registers to memory 5798 // due to the increased register pressure. 5799 5800 if (!isScalarEpilogueAllowed()) 5801 return 1; 5802 5803 // We used the distance for the interleave count. 5804 if (Legal->getMaxSafeDepDistBytes() != -1U) 5805 return 1; 5806 5807 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 5808 const bool HasReductions = !Legal->getReductionVars().empty(); 5809 // Do not interleave loops with a relatively small known or estimated trip 5810 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 5811 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 5812 // because with the above conditions interleaving can expose ILP and break 5813 // cross iteration dependences for reductions. 5814 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 5815 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 5816 return 1; 5817 5818 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5819 // We divide by these constants so assume that we have at least one 5820 // instruction that uses at least one register. 5821 for (auto& pair : R.MaxLocalUsers) { 5822 pair.second = std::max(pair.second, 1U); 5823 } 5824 5825 // We calculate the interleave count using the following formula. 5826 // Subtract the number of loop invariants from the number of available 5827 // registers. These registers are used by all of the interleaved instances. 5828 // Next, divide the remaining registers by the number of registers that is 5829 // required by the loop, in order to estimate how many parallel instances 5830 // fit without causing spills. All of this is rounded down if necessary to be 5831 // a power of two. We want power of two interleave count to simplify any 5832 // addressing operations or alignment considerations. 5833 // We also want power of two interleave counts to ensure that the induction 5834 // variable of the vector loop wraps to zero, when tail is folded by masking; 5835 // this currently happens when OptForSize, in which case IC is set to 1 above. 5836 unsigned IC = UINT_MAX; 5837 5838 for (auto& pair : R.MaxLocalUsers) { 5839 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5840 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5841 << " registers of " 5842 << TTI.getRegisterClassName(pair.first) << " register class\n"); 5843 if (VF.isScalar()) { 5844 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5845 TargetNumRegisters = ForceTargetNumScalarRegs; 5846 } else { 5847 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5848 TargetNumRegisters = ForceTargetNumVectorRegs; 5849 } 5850 unsigned MaxLocalUsers = pair.second; 5851 unsigned LoopInvariantRegs = 0; 5852 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 5853 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 5854 5855 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 5856 // Don't count the induction variable as interleaved. 5857 if (EnableIndVarRegisterHeur) { 5858 TmpIC = 5859 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 5860 std::max(1U, (MaxLocalUsers - 1))); 5861 } 5862 5863 IC = std::min(IC, TmpIC); 5864 } 5865 5866 // Clamp the interleave ranges to reasonable counts. 5867 unsigned MaxInterleaveCount = 5868 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 5869 5870 // Check if the user has overridden the max. 5871 if (VF.isScalar()) { 5872 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5873 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5874 } else { 5875 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5876 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5877 } 5878 5879 // If trip count is known or estimated compile time constant, limit the 5880 // interleave count to be less than the trip count divided by VF, provided it 5881 // is at least 1. 5882 // 5883 // For scalable vectors we can't know if interleaving is beneficial. It may 5884 // not be beneficial for small loops if none of the lanes in the second vector 5885 // iterations is enabled. However, for larger loops, there is likely to be a 5886 // similar benefit as for fixed-width vectors. For now, we choose to leave 5887 // the InterleaveCount as if vscale is '1', although if some information about 5888 // the vector is known (e.g. min vector size), we can make a better decision. 5889 if (BestKnownTC) { 5890 MaxInterleaveCount = 5891 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 5892 // Make sure MaxInterleaveCount is greater than 0. 5893 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 5894 } 5895 5896 assert(MaxInterleaveCount > 0 && 5897 "Maximum interleave count must be greater than 0"); 5898 5899 // Clamp the calculated IC to be between the 1 and the max interleave count 5900 // that the target and trip count allows. 5901 if (IC > MaxInterleaveCount) 5902 IC = MaxInterleaveCount; 5903 else 5904 // Make sure IC is greater than 0. 5905 IC = std::max(1u, IC); 5906 5907 assert(IC > 0 && "Interleave count must be greater than 0."); 5908 5909 // If we did not calculate the cost for VF (because the user selected the VF) 5910 // then we calculate the cost of VF here. 5911 if (LoopCost == 0) { 5912 InstructionCost C = expectedCost(VF).first; 5913 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 5914 LoopCost = *C.getValue(); 5915 } 5916 5917 assert(LoopCost && "Non-zero loop cost expected"); 5918 5919 // Interleave if we vectorized this loop and there is a reduction that could 5920 // benefit from interleaving. 5921 if (VF.isVector() && HasReductions) { 5922 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5923 return IC; 5924 } 5925 5926 // For any scalar loop that either requires runtime checks or predication we 5927 // are better off leaving this to the unroller. Note that if we've already 5928 // vectorized the loop we will have done the runtime check and so interleaving 5929 // won't require further checks. 5930 bool ScalarInterleavingRequiresPredication = 5931 (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) { 5932 return Legal->blockNeedsPredication(BB); 5933 })); 5934 bool ScalarInterleavingRequiresRuntimePointerCheck = 5935 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 5936 5937 // We want to interleave small loops in order to reduce the loop overhead and 5938 // potentially expose ILP opportunities. 5939 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 5940 << "LV: IC is " << IC << '\n' 5941 << "LV: VF is " << VF << '\n'); 5942 const bool AggressivelyInterleaveReductions = 5943 TTI.enableAggressiveInterleaving(HasReductions); 5944 if (!ScalarInterleavingRequiresRuntimePointerCheck && 5945 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) { 5946 // We assume that the cost overhead is 1 and we use the cost model 5947 // to estimate the cost of the loop and interleave until the cost of the 5948 // loop overhead is about 5% of the cost of the loop. 5949 unsigned SmallIC = 5950 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5951 5952 // Interleave until store/load ports (estimated by max interleave count) are 5953 // saturated. 5954 unsigned NumStores = Legal->getNumStores(); 5955 unsigned NumLoads = Legal->getNumLoads(); 5956 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5957 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5958 5959 // There is little point in interleaving for reductions containing selects 5960 // and compares when VF=1 since it may just create more overhead than it's 5961 // worth for loops with small trip counts. This is because we still have to 5962 // do the final reduction after the loop. 5963 bool HasSelectCmpReductions = 5964 HasReductions && 5965 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5966 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5967 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 5968 RdxDesc.getRecurrenceKind()); 5969 }); 5970 if (HasSelectCmpReductions) { 5971 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 5972 return 1; 5973 } 5974 5975 // If we have a scalar reduction (vector reductions are already dealt with 5976 // by this point), we can increase the critical path length if the loop 5977 // we're interleaving is inside another loop. For tree-wise reductions 5978 // set the limit to 2, and for ordered reductions it's best to disable 5979 // interleaving entirely. 5980 if (HasReductions && TheLoop->getLoopDepth() > 1) { 5981 bool HasOrderedReductions = 5982 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5983 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5984 return RdxDesc.isOrdered(); 5985 }); 5986 if (HasOrderedReductions) { 5987 LLVM_DEBUG( 5988 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 5989 return 1; 5990 } 5991 5992 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5993 SmallIC = std::min(SmallIC, F); 5994 StoresIC = std::min(StoresIC, F); 5995 LoadsIC = std::min(LoadsIC, F); 5996 } 5997 5998 if (EnableLoadStoreRuntimeInterleave && 5999 std::max(StoresIC, LoadsIC) > SmallIC) { 6000 LLVM_DEBUG( 6001 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6002 return std::max(StoresIC, LoadsIC); 6003 } 6004 6005 // If there are scalar reductions and TTI has enabled aggressive 6006 // interleaving for reductions, we will interleave to expose ILP. 6007 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6008 AggressivelyInterleaveReductions) { 6009 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6010 // Interleave no less than SmallIC but not as aggressive as the normal IC 6011 // to satisfy the rare situation when resources are too limited. 6012 return std::max(IC / 2, SmallIC); 6013 } else { 6014 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6015 return SmallIC; 6016 } 6017 } 6018 6019 // Interleave if this is a large loop (small loops are already dealt with by 6020 // this point) that could benefit from interleaving. 6021 if (AggressivelyInterleaveReductions) { 6022 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6023 return IC; 6024 } 6025 6026 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6027 return 1; 6028 } 6029 6030 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6031 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6032 // This function calculates the register usage by measuring the highest number 6033 // of values that are alive at a single location. Obviously, this is a very 6034 // rough estimation. We scan the loop in a topological order in order and 6035 // assign a number to each instruction. We use RPO to ensure that defs are 6036 // met before their users. We assume that each instruction that has in-loop 6037 // users starts an interval. We record every time that an in-loop value is 6038 // used, so we have a list of the first and last occurrences of each 6039 // instruction. Next, we transpose this data structure into a multi map that 6040 // holds the list of intervals that *end* at a specific location. This multi 6041 // map allows us to perform a linear search. We scan the instructions linearly 6042 // and record each time that a new interval starts, by placing it in a set. 6043 // If we find this value in the multi-map then we remove it from the set. 6044 // The max register usage is the maximum size of the set. 6045 // We also search for instructions that are defined outside the loop, but are 6046 // used inside the loop. We need this number separately from the max-interval 6047 // usage number because when we unroll, loop-invariant values do not take 6048 // more register. 6049 LoopBlocksDFS DFS(TheLoop); 6050 DFS.perform(LI); 6051 6052 RegisterUsage RU; 6053 6054 // Each 'key' in the map opens a new interval. The values 6055 // of the map are the index of the 'last seen' usage of the 6056 // instruction that is the key. 6057 using IntervalMap = DenseMap<Instruction *, unsigned>; 6058 6059 // Maps instruction to its index. 6060 SmallVector<Instruction *, 64> IdxToInstr; 6061 // Marks the end of each interval. 6062 IntervalMap EndPoint; 6063 // Saves the list of instruction indices that are used in the loop. 6064 SmallPtrSet<Instruction *, 8> Ends; 6065 // Saves the list of values that are used in the loop but are 6066 // defined outside the loop, such as arguments and constants. 6067 SmallPtrSet<Value *, 8> LoopInvariants; 6068 6069 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6070 for (Instruction &I : BB->instructionsWithoutDebug()) { 6071 IdxToInstr.push_back(&I); 6072 6073 // Save the end location of each USE. 6074 for (Value *U : I.operands()) { 6075 auto *Instr = dyn_cast<Instruction>(U); 6076 6077 // Ignore non-instruction values such as arguments, constants, etc. 6078 if (!Instr) 6079 continue; 6080 6081 // If this instruction is outside the loop then record it and continue. 6082 if (!TheLoop->contains(Instr)) { 6083 LoopInvariants.insert(Instr); 6084 continue; 6085 } 6086 6087 // Overwrite previous end points. 6088 EndPoint[Instr] = IdxToInstr.size(); 6089 Ends.insert(Instr); 6090 } 6091 } 6092 } 6093 6094 // Saves the list of intervals that end with the index in 'key'. 6095 using InstrList = SmallVector<Instruction *, 2>; 6096 DenseMap<unsigned, InstrList> TransposeEnds; 6097 6098 // Transpose the EndPoints to a list of values that end at each index. 6099 for (auto &Interval : EndPoint) 6100 TransposeEnds[Interval.second].push_back(Interval.first); 6101 6102 SmallPtrSet<Instruction *, 8> OpenIntervals; 6103 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6104 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6105 6106 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6107 6108 // A lambda that gets the register usage for the given type and VF. 6109 const auto &TTICapture = TTI; 6110 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 6111 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6112 return 0; 6113 InstructionCost::CostType RegUsage = 6114 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 6115 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 6116 "Nonsensical values for register usage."); 6117 return RegUsage; 6118 }; 6119 6120 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6121 Instruction *I = IdxToInstr[i]; 6122 6123 // Remove all of the instructions that end at this location. 6124 InstrList &List = TransposeEnds[i]; 6125 for (Instruction *ToRemove : List) 6126 OpenIntervals.erase(ToRemove); 6127 6128 // Ignore instructions that are never used within the loop. 6129 if (!Ends.count(I)) 6130 continue; 6131 6132 // Skip ignored values. 6133 if (ValuesToIgnore.count(I)) 6134 continue; 6135 6136 // For each VF find the maximum usage of registers. 6137 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6138 // Count the number of live intervals. 6139 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6140 6141 if (VFs[j].isScalar()) { 6142 for (auto Inst : OpenIntervals) { 6143 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6144 if (RegUsage.find(ClassID) == RegUsage.end()) 6145 RegUsage[ClassID] = 1; 6146 else 6147 RegUsage[ClassID] += 1; 6148 } 6149 } else { 6150 collectUniformsAndScalars(VFs[j]); 6151 for (auto Inst : OpenIntervals) { 6152 // Skip ignored values for VF > 1. 6153 if (VecValuesToIgnore.count(Inst)) 6154 continue; 6155 if (isScalarAfterVectorization(Inst, VFs[j])) { 6156 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6157 if (RegUsage.find(ClassID) == RegUsage.end()) 6158 RegUsage[ClassID] = 1; 6159 else 6160 RegUsage[ClassID] += 1; 6161 } else { 6162 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6163 if (RegUsage.find(ClassID) == RegUsage.end()) 6164 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6165 else 6166 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6167 } 6168 } 6169 } 6170 6171 for (auto& pair : RegUsage) { 6172 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6173 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6174 else 6175 MaxUsages[j][pair.first] = pair.second; 6176 } 6177 } 6178 6179 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6180 << OpenIntervals.size() << '\n'); 6181 6182 // Add the current instruction to the list of open intervals. 6183 OpenIntervals.insert(I); 6184 } 6185 6186 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6187 SmallMapVector<unsigned, unsigned, 4> Invariant; 6188 6189 for (auto Inst : LoopInvariants) { 6190 unsigned Usage = 6191 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6192 unsigned ClassID = 6193 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6194 if (Invariant.find(ClassID) == Invariant.end()) 6195 Invariant[ClassID] = Usage; 6196 else 6197 Invariant[ClassID] += Usage; 6198 } 6199 6200 LLVM_DEBUG({ 6201 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6202 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6203 << " item\n"; 6204 for (const auto &pair : MaxUsages[i]) { 6205 dbgs() << "LV(REG): RegisterClass: " 6206 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6207 << " registers\n"; 6208 } 6209 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6210 << " item\n"; 6211 for (const auto &pair : Invariant) { 6212 dbgs() << "LV(REG): RegisterClass: " 6213 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6214 << " registers\n"; 6215 } 6216 }); 6217 6218 RU.LoopInvariantRegs = Invariant; 6219 RU.MaxLocalUsers = MaxUsages[i]; 6220 RUs[i] = RU; 6221 } 6222 6223 return RUs; 6224 } 6225 6226 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I, 6227 ElementCount VF) { 6228 // TODO: Cost model for emulated masked load/store is completely 6229 // broken. This hack guides the cost model to use an artificially 6230 // high enough value to practically disable vectorization with such 6231 // operations, except where previously deployed legality hack allowed 6232 // using very low cost values. This is to avoid regressions coming simply 6233 // from moving "masked load/store" check from legality to cost model. 6234 // Masked Load/Gather emulation was previously never allowed. 6235 // Limited number of Masked Store/Scatter emulation was allowed. 6236 assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction"); 6237 return isa<LoadInst>(I) || 6238 (isa<StoreInst>(I) && 6239 NumPredStores > NumberOfStoresToPredicate); 6240 } 6241 6242 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6243 // If we aren't vectorizing the loop, or if we've already collected the 6244 // instructions to scalarize, there's nothing to do. Collection may already 6245 // have occurred if we have a user-selected VF and are now computing the 6246 // expected cost for interleaving. 6247 if (VF.isScalar() || VF.isZero() || 6248 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6249 return; 6250 6251 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6252 // not profitable to scalarize any instructions, the presence of VF in the 6253 // map will indicate that we've analyzed it already. 6254 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6255 6256 // Find all the instructions that are scalar with predication in the loop and 6257 // determine if it would be better to not if-convert the blocks they are in. 6258 // If so, we also record the instructions to scalarize. 6259 for (BasicBlock *BB : TheLoop->blocks()) { 6260 if (!blockNeedsPredicationForAnyReason(BB)) 6261 continue; 6262 for (Instruction &I : *BB) 6263 if (isScalarWithPredication(&I, VF)) { 6264 ScalarCostsTy ScalarCosts; 6265 // Do not apply discount if scalable, because that would lead to 6266 // invalid scalarization costs. 6267 // Do not apply discount logic if hacked cost is needed 6268 // for emulated masked memrefs. 6269 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) && 6270 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6271 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6272 // Remember that BB will remain after vectorization. 6273 PredicatedBBsAfterVectorization.insert(BB); 6274 } 6275 } 6276 } 6277 6278 int LoopVectorizationCostModel::computePredInstDiscount( 6279 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6280 assert(!isUniformAfterVectorization(PredInst, VF) && 6281 "Instruction marked uniform-after-vectorization will be predicated"); 6282 6283 // Initialize the discount to zero, meaning that the scalar version and the 6284 // vector version cost the same. 6285 InstructionCost Discount = 0; 6286 6287 // Holds instructions to analyze. The instructions we visit are mapped in 6288 // ScalarCosts. Those instructions are the ones that would be scalarized if 6289 // we find that the scalar version costs less. 6290 SmallVector<Instruction *, 8> Worklist; 6291 6292 // Returns true if the given instruction can be scalarized. 6293 auto canBeScalarized = [&](Instruction *I) -> bool { 6294 // We only attempt to scalarize instructions forming a single-use chain 6295 // from the original predicated block that would otherwise be vectorized. 6296 // Although not strictly necessary, we give up on instructions we know will 6297 // already be scalar to avoid traversing chains that are unlikely to be 6298 // beneficial. 6299 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6300 isScalarAfterVectorization(I, VF)) 6301 return false; 6302 6303 // If the instruction is scalar with predication, it will be analyzed 6304 // separately. We ignore it within the context of PredInst. 6305 if (isScalarWithPredication(I, VF)) 6306 return false; 6307 6308 // If any of the instruction's operands are uniform after vectorization, 6309 // the instruction cannot be scalarized. This prevents, for example, a 6310 // masked load from being scalarized. 6311 // 6312 // We assume we will only emit a value for lane zero of an instruction 6313 // marked uniform after vectorization, rather than VF identical values. 6314 // Thus, if we scalarize an instruction that uses a uniform, we would 6315 // create uses of values corresponding to the lanes we aren't emitting code 6316 // for. This behavior can be changed by allowing getScalarValue to clone 6317 // the lane zero values for uniforms rather than asserting. 6318 for (Use &U : I->operands()) 6319 if (auto *J = dyn_cast<Instruction>(U.get())) 6320 if (isUniformAfterVectorization(J, VF)) 6321 return false; 6322 6323 // Otherwise, we can scalarize the instruction. 6324 return true; 6325 }; 6326 6327 // Compute the expected cost discount from scalarizing the entire expression 6328 // feeding the predicated instruction. We currently only consider expressions 6329 // that are single-use instruction chains. 6330 Worklist.push_back(PredInst); 6331 while (!Worklist.empty()) { 6332 Instruction *I = Worklist.pop_back_val(); 6333 6334 // If we've already analyzed the instruction, there's nothing to do. 6335 if (ScalarCosts.find(I) != ScalarCosts.end()) 6336 continue; 6337 6338 // Compute the cost of the vector instruction. Note that this cost already 6339 // includes the scalarization overhead of the predicated instruction. 6340 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6341 6342 // Compute the cost of the scalarized instruction. This cost is the cost of 6343 // the instruction as if it wasn't if-converted and instead remained in the 6344 // predicated block. We will scale this cost by block probability after 6345 // computing the scalarization overhead. 6346 InstructionCost ScalarCost = 6347 VF.getFixedValue() * 6348 getInstructionCost(I, ElementCount::getFixed(1)).first; 6349 6350 // Compute the scalarization overhead of needed insertelement instructions 6351 // and phi nodes. 6352 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) { 6353 ScalarCost += TTI.getScalarizationOverhead( 6354 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6355 APInt::getAllOnes(VF.getFixedValue()), true, false); 6356 ScalarCost += 6357 VF.getFixedValue() * 6358 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6359 } 6360 6361 // Compute the scalarization overhead of needed extractelement 6362 // instructions. For each of the instruction's operands, if the operand can 6363 // be scalarized, add it to the worklist; otherwise, account for the 6364 // overhead. 6365 for (Use &U : I->operands()) 6366 if (auto *J = dyn_cast<Instruction>(U.get())) { 6367 assert(VectorType::isValidElementType(J->getType()) && 6368 "Instruction has non-scalar type"); 6369 if (canBeScalarized(J)) 6370 Worklist.push_back(J); 6371 else if (needsExtract(J, VF)) { 6372 ScalarCost += TTI.getScalarizationOverhead( 6373 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6374 APInt::getAllOnes(VF.getFixedValue()), false, true); 6375 } 6376 } 6377 6378 // Scale the total scalar cost by block probability. 6379 ScalarCost /= getReciprocalPredBlockProb(); 6380 6381 // Compute the discount. A non-negative discount means the vector version 6382 // of the instruction costs more, and scalarizing would be beneficial. 6383 Discount += VectorCost - ScalarCost; 6384 ScalarCosts[I] = ScalarCost; 6385 } 6386 6387 return *Discount.getValue(); 6388 } 6389 6390 LoopVectorizationCostModel::VectorizationCostTy 6391 LoopVectorizationCostModel::expectedCost( 6392 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6393 VectorizationCostTy Cost; 6394 6395 // For each block. 6396 for (BasicBlock *BB : TheLoop->blocks()) { 6397 VectorizationCostTy BlockCost; 6398 6399 // For each instruction in the old loop. 6400 for (Instruction &I : BB->instructionsWithoutDebug()) { 6401 // Skip ignored values. 6402 if (ValuesToIgnore.count(&I) || 6403 (VF.isVector() && VecValuesToIgnore.count(&I))) 6404 continue; 6405 6406 VectorizationCostTy C = getInstructionCost(&I, VF); 6407 6408 // Check if we should override the cost. 6409 if (C.first.isValid() && 6410 ForceTargetInstructionCost.getNumOccurrences() > 0) 6411 C.first = InstructionCost(ForceTargetInstructionCost); 6412 6413 // Keep a list of instructions with invalid costs. 6414 if (Invalid && !C.first.isValid()) 6415 Invalid->emplace_back(&I, VF); 6416 6417 BlockCost.first += C.first; 6418 BlockCost.second |= C.second; 6419 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6420 << " for VF " << VF << " For instruction: " << I 6421 << '\n'); 6422 } 6423 6424 // If we are vectorizing a predicated block, it will have been 6425 // if-converted. This means that the block's instructions (aside from 6426 // stores and instructions that may divide by zero) will now be 6427 // unconditionally executed. For the scalar case, we may not always execute 6428 // the predicated block, if it is an if-else block. Thus, scale the block's 6429 // cost by the probability of executing it. blockNeedsPredication from 6430 // Legal is used so as to not include all blocks in tail folded loops. 6431 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6432 BlockCost.first /= getReciprocalPredBlockProb(); 6433 6434 Cost.first += BlockCost.first; 6435 Cost.second |= BlockCost.second; 6436 } 6437 6438 return Cost; 6439 } 6440 6441 /// Gets Address Access SCEV after verifying that the access pattern 6442 /// is loop invariant except the induction variable dependence. 6443 /// 6444 /// This SCEV can be sent to the Target in order to estimate the address 6445 /// calculation cost. 6446 static const SCEV *getAddressAccessSCEV( 6447 Value *Ptr, 6448 LoopVectorizationLegality *Legal, 6449 PredicatedScalarEvolution &PSE, 6450 const Loop *TheLoop) { 6451 6452 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6453 if (!Gep) 6454 return nullptr; 6455 6456 // We are looking for a gep with all loop invariant indices except for one 6457 // which should be an induction variable. 6458 auto SE = PSE.getSE(); 6459 unsigned NumOperands = Gep->getNumOperands(); 6460 for (unsigned i = 1; i < NumOperands; ++i) { 6461 Value *Opd = Gep->getOperand(i); 6462 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6463 !Legal->isInductionVariable(Opd)) 6464 return nullptr; 6465 } 6466 6467 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6468 return PSE.getSCEV(Ptr); 6469 } 6470 6471 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6472 return Legal->hasStride(I->getOperand(0)) || 6473 Legal->hasStride(I->getOperand(1)); 6474 } 6475 6476 InstructionCost 6477 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6478 ElementCount VF) { 6479 assert(VF.isVector() && 6480 "Scalarization cost of instruction implies vectorization."); 6481 if (VF.isScalable()) 6482 return InstructionCost::getInvalid(); 6483 6484 Type *ValTy = getLoadStoreType(I); 6485 auto SE = PSE.getSE(); 6486 6487 unsigned AS = getLoadStoreAddressSpace(I); 6488 Value *Ptr = getLoadStorePointerOperand(I); 6489 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6490 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost` 6491 // that it is being called from this specific place. 6492 6493 // Figure out whether the access is strided and get the stride value 6494 // if it's known in compile time 6495 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6496 6497 // Get the cost of the scalar memory instruction and address computation. 6498 InstructionCost Cost = 6499 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6500 6501 // Don't pass *I here, since it is scalar but will actually be part of a 6502 // vectorized loop where the user of it is a vectorized instruction. 6503 const Align Alignment = getLoadStoreAlignment(I); 6504 Cost += VF.getKnownMinValue() * 6505 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6506 AS, TTI::TCK_RecipThroughput); 6507 6508 // Get the overhead of the extractelement and insertelement instructions 6509 // we might create due to scalarization. 6510 Cost += getScalarizationOverhead(I, VF); 6511 6512 // If we have a predicated load/store, it will need extra i1 extracts and 6513 // conditional branches, but may not be executed for each vector lane. Scale 6514 // the cost by the probability of executing the predicated block. 6515 if (isPredicatedInst(I, VF)) { 6516 Cost /= getReciprocalPredBlockProb(); 6517 6518 // Add the cost of an i1 extract and a branch 6519 auto *Vec_i1Ty = 6520 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6521 Cost += TTI.getScalarizationOverhead( 6522 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 6523 /*Insert=*/false, /*Extract=*/true); 6524 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6525 6526 if (useEmulatedMaskMemRefHack(I, VF)) 6527 // Artificially setting to a high enough value to practically disable 6528 // vectorization with such operations. 6529 Cost = 3000000; 6530 } 6531 6532 return Cost; 6533 } 6534 6535 InstructionCost 6536 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6537 ElementCount VF) { 6538 Type *ValTy = getLoadStoreType(I); 6539 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6540 Value *Ptr = getLoadStorePointerOperand(I); 6541 unsigned AS = getLoadStoreAddressSpace(I); 6542 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 6543 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6544 6545 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6546 "Stride should be 1 or -1 for consecutive memory access"); 6547 const Align Alignment = getLoadStoreAlignment(I); 6548 InstructionCost Cost = 0; 6549 if (Legal->isMaskRequired(I)) 6550 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6551 CostKind); 6552 else 6553 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6554 CostKind, I); 6555 6556 bool Reverse = ConsecutiveStride < 0; 6557 if (Reverse) 6558 Cost += 6559 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6560 return Cost; 6561 } 6562 6563 InstructionCost 6564 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6565 ElementCount VF) { 6566 assert(Legal->isUniformMemOp(*I)); 6567 6568 Type *ValTy = getLoadStoreType(I); 6569 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6570 const Align Alignment = getLoadStoreAlignment(I); 6571 unsigned AS = getLoadStoreAddressSpace(I); 6572 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6573 if (isa<LoadInst>(I)) { 6574 return TTI.getAddressComputationCost(ValTy) + 6575 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6576 CostKind) + 6577 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6578 } 6579 StoreInst *SI = cast<StoreInst>(I); 6580 6581 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6582 return TTI.getAddressComputationCost(ValTy) + 6583 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6584 CostKind) + 6585 (isLoopInvariantStoreValue 6586 ? 0 6587 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6588 VF.getKnownMinValue() - 1)); 6589 } 6590 6591 InstructionCost 6592 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6593 ElementCount VF) { 6594 Type *ValTy = getLoadStoreType(I); 6595 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6596 const Align Alignment = getLoadStoreAlignment(I); 6597 const Value *Ptr = getLoadStorePointerOperand(I); 6598 6599 return TTI.getAddressComputationCost(VectorTy) + 6600 TTI.getGatherScatterOpCost( 6601 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6602 TargetTransformInfo::TCK_RecipThroughput, I); 6603 } 6604 6605 InstructionCost 6606 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6607 ElementCount VF) { 6608 // TODO: Once we have support for interleaving with scalable vectors 6609 // we can calculate the cost properly here. 6610 if (VF.isScalable()) 6611 return InstructionCost::getInvalid(); 6612 6613 Type *ValTy = getLoadStoreType(I); 6614 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6615 unsigned AS = getLoadStoreAddressSpace(I); 6616 6617 auto Group = getInterleavedAccessGroup(I); 6618 assert(Group && "Fail to get an interleaved access group."); 6619 6620 unsigned InterleaveFactor = Group->getFactor(); 6621 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6622 6623 // Holds the indices of existing members in the interleaved group. 6624 SmallVector<unsigned, 4> Indices; 6625 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 6626 if (Group->getMember(IF)) 6627 Indices.push_back(IF); 6628 6629 // Calculate the cost of the whole interleaved group. 6630 bool UseMaskForGaps = 6631 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 6632 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 6633 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6634 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6635 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6636 6637 if (Group->isReverse()) { 6638 // TODO: Add support for reversed masked interleaved access. 6639 assert(!Legal->isMaskRequired(I) && 6640 "Reverse masked interleaved access not supported."); 6641 Cost += 6642 Group->getNumMembers() * 6643 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6644 } 6645 return Cost; 6646 } 6647 6648 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 6649 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6650 using namespace llvm::PatternMatch; 6651 // Early exit for no inloop reductions 6652 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6653 return None; 6654 auto *VectorTy = cast<VectorType>(Ty); 6655 6656 // We are looking for a pattern of, and finding the minimal acceptable cost: 6657 // reduce(mul(ext(A), ext(B))) or 6658 // reduce(mul(A, B)) or 6659 // reduce(ext(A)) or 6660 // reduce(A). 6661 // The basic idea is that we walk down the tree to do that, finding the root 6662 // reduction instruction in InLoopReductionImmediateChains. From there we find 6663 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6664 // of the components. If the reduction cost is lower then we return it for the 6665 // reduction instruction and 0 for the other instructions in the pattern. If 6666 // it is not we return an invalid cost specifying the orignal cost method 6667 // should be used. 6668 Instruction *RetI = I; 6669 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 6670 if (!RetI->hasOneUser()) 6671 return None; 6672 RetI = RetI->user_back(); 6673 } 6674 if (match(RetI, m_Mul(m_Value(), m_Value())) && 6675 RetI->user_back()->getOpcode() == Instruction::Add) { 6676 if (!RetI->hasOneUser()) 6677 return None; 6678 RetI = RetI->user_back(); 6679 } 6680 6681 // Test if the found instruction is a reduction, and if not return an invalid 6682 // cost specifying the parent to use the original cost modelling. 6683 if (!InLoopReductionImmediateChains.count(RetI)) 6684 return None; 6685 6686 // Find the reduction this chain is a part of and calculate the basic cost of 6687 // the reduction on its own. 6688 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6689 Instruction *ReductionPhi = LastChain; 6690 while (!isa<PHINode>(ReductionPhi)) 6691 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6692 6693 const RecurrenceDescriptor &RdxDesc = 6694 Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second; 6695 6696 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 6697 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 6698 6699 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a 6700 // normal fmul instruction to the cost of the fadd reduction. 6701 if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd) 6702 BaseCost += 6703 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind); 6704 6705 // If we're using ordered reductions then we can just return the base cost 6706 // here, since getArithmeticReductionCost calculates the full ordered 6707 // reduction cost when FP reassociation is not allowed. 6708 if (useOrderedReductions(RdxDesc)) 6709 return BaseCost; 6710 6711 // Get the operand that was not the reduction chain and match it to one of the 6712 // patterns, returning the better cost if it is found. 6713 Instruction *RedOp = RetI->getOperand(1) == LastChain 6714 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6715 : dyn_cast<Instruction>(RetI->getOperand(1)); 6716 6717 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6718 6719 Instruction *Op0, *Op1; 6720 if (RedOp && 6721 match(RedOp, 6722 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 6723 match(Op0, m_ZExtOrSExt(m_Value())) && 6724 Op0->getOpcode() == Op1->getOpcode() && 6725 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6726 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 6727 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 6728 6729 // Matched reduce(ext(mul(ext(A), ext(B))) 6730 // Note that the extend opcodes need to all match, or if A==B they will have 6731 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 6732 // which is equally fine. 6733 bool IsUnsigned = isa<ZExtInst>(Op0); 6734 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6735 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 6736 6737 InstructionCost ExtCost = 6738 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 6739 TTI::CastContextHint::None, CostKind, Op0); 6740 InstructionCost MulCost = 6741 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 6742 InstructionCost Ext2Cost = 6743 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 6744 TTI::CastContextHint::None, CostKind, RedOp); 6745 6746 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6747 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6748 CostKind); 6749 6750 if (RedCost.isValid() && 6751 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 6752 return I == RetI ? RedCost : 0; 6753 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 6754 !TheLoop->isLoopInvariant(RedOp)) { 6755 // Matched reduce(ext(A)) 6756 bool IsUnsigned = isa<ZExtInst>(RedOp); 6757 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6758 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6759 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6760 CostKind); 6761 6762 InstructionCost ExtCost = 6763 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6764 TTI::CastContextHint::None, CostKind, RedOp); 6765 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6766 return I == RetI ? RedCost : 0; 6767 } else if (RedOp && 6768 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 6769 if (match(Op0, m_ZExtOrSExt(m_Value())) && 6770 Op0->getOpcode() == Op1->getOpcode() && 6771 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 6772 bool IsUnsigned = isa<ZExtInst>(Op0); 6773 Type *Op0Ty = Op0->getOperand(0)->getType(); 6774 Type *Op1Ty = Op1->getOperand(0)->getType(); 6775 Type *LargestOpTy = 6776 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty 6777 : Op0Ty; 6778 auto *ExtType = VectorType::get(LargestOpTy, VectorTy); 6779 6780 // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of 6781 // different sizes. We take the largest type as the ext to reduce, and add 6782 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))). 6783 InstructionCost ExtCost0 = TTI.getCastInstrCost( 6784 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy), 6785 TTI::CastContextHint::None, CostKind, Op0); 6786 InstructionCost ExtCost1 = TTI.getCastInstrCost( 6787 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy), 6788 TTI::CastContextHint::None, CostKind, Op1); 6789 InstructionCost MulCost = 6790 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6791 6792 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6793 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6794 CostKind); 6795 InstructionCost ExtraExtCost = 0; 6796 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) { 6797 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1; 6798 ExtraExtCost = TTI.getCastInstrCost( 6799 ExtraExtOp->getOpcode(), ExtType, 6800 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy), 6801 TTI::CastContextHint::None, CostKind, ExtraExtOp); 6802 } 6803 6804 if (RedCost.isValid() && 6805 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost)) 6806 return I == RetI ? RedCost : 0; 6807 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 6808 // Matched reduce(mul()) 6809 InstructionCost MulCost = 6810 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6811 6812 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6813 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 6814 CostKind); 6815 6816 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 6817 return I == RetI ? RedCost : 0; 6818 } 6819 } 6820 6821 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 6822 } 6823 6824 InstructionCost 6825 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6826 ElementCount VF) { 6827 // Calculate scalar cost only. Vectorization cost should be ready at this 6828 // moment. 6829 if (VF.isScalar()) { 6830 Type *ValTy = getLoadStoreType(I); 6831 const Align Alignment = getLoadStoreAlignment(I); 6832 unsigned AS = getLoadStoreAddressSpace(I); 6833 6834 return TTI.getAddressComputationCost(ValTy) + 6835 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 6836 TTI::TCK_RecipThroughput, I); 6837 } 6838 return getWideningCost(I, VF); 6839 } 6840 6841 LoopVectorizationCostModel::VectorizationCostTy 6842 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6843 ElementCount VF) { 6844 // If we know that this instruction will remain uniform, check the cost of 6845 // the scalar version. 6846 if (isUniformAfterVectorization(I, VF)) 6847 VF = ElementCount::getFixed(1); 6848 6849 if (VF.isVector() && isProfitableToScalarize(I, VF)) 6850 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6851 6852 // Forced scalars do not have any scalarization overhead. 6853 auto ForcedScalar = ForcedScalars.find(VF); 6854 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 6855 auto InstSet = ForcedScalar->second; 6856 if (InstSet.count(I)) 6857 return VectorizationCostTy( 6858 (getInstructionCost(I, ElementCount::getFixed(1)).first * 6859 VF.getKnownMinValue()), 6860 false); 6861 } 6862 6863 Type *VectorTy; 6864 InstructionCost C = getInstructionCost(I, VF, VectorTy); 6865 6866 bool TypeNotScalarized = false; 6867 if (VF.isVector() && VectorTy->isVectorTy()) { 6868 unsigned NumParts = TTI.getNumberOfParts(VectorTy); 6869 if (NumParts) 6870 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 6871 else 6872 C = InstructionCost::getInvalid(); 6873 } 6874 return VectorizationCostTy(C, TypeNotScalarized); 6875 } 6876 6877 InstructionCost 6878 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6879 ElementCount VF) const { 6880 6881 // There is no mechanism yet to create a scalable scalarization loop, 6882 // so this is currently Invalid. 6883 if (VF.isScalable()) 6884 return InstructionCost::getInvalid(); 6885 6886 if (VF.isScalar()) 6887 return 0; 6888 6889 InstructionCost Cost = 0; 6890 Type *RetTy = ToVectorTy(I->getType(), VF); 6891 if (!RetTy->isVoidTy() && 6892 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 6893 Cost += TTI.getScalarizationOverhead( 6894 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 6895 false); 6896 6897 // Some targets keep addresses scalar. 6898 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 6899 return Cost; 6900 6901 // Some targets support efficient element stores. 6902 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 6903 return Cost; 6904 6905 // Collect operands to consider. 6906 CallInst *CI = dyn_cast<CallInst>(I); 6907 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 6908 6909 // Skip operands that do not require extraction/scalarization and do not incur 6910 // any overhead. 6911 SmallVector<Type *> Tys; 6912 for (auto *V : filterExtractingOperands(Ops, VF)) 6913 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 6914 return Cost + TTI.getOperandsScalarizationOverhead( 6915 filterExtractingOperands(Ops, VF), Tys); 6916 } 6917 6918 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 6919 if (VF.isScalar()) 6920 return; 6921 NumPredStores = 0; 6922 for (BasicBlock *BB : TheLoop->blocks()) { 6923 // For each instruction in the old loop. 6924 for (Instruction &I : *BB) { 6925 Value *Ptr = getLoadStorePointerOperand(&I); 6926 if (!Ptr) 6927 continue; 6928 6929 // TODO: We should generate better code and update the cost model for 6930 // predicated uniform stores. Today they are treated as any other 6931 // predicated store (see added test cases in 6932 // invariant-store-vectorization.ll). 6933 if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF)) 6934 NumPredStores++; 6935 6936 if (Legal->isUniformMemOp(I)) { 6937 // TODO: Avoid replicating loads and stores instead of 6938 // relying on instcombine to remove them. 6939 // Load: Scalar load + broadcast 6940 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 6941 InstructionCost Cost; 6942 if (isa<StoreInst>(&I) && VF.isScalable() && 6943 isLegalGatherOrScatter(&I, VF)) { 6944 Cost = getGatherScatterCost(&I, VF); 6945 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 6946 } else { 6947 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 6948 "Cannot yet scalarize uniform stores"); 6949 Cost = getUniformMemOpCost(&I, VF); 6950 setWideningDecision(&I, VF, CM_Scalarize, Cost); 6951 } 6952 continue; 6953 } 6954 6955 // We assume that widening is the best solution when possible. 6956 if (memoryInstructionCanBeWidened(&I, VF)) { 6957 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 6958 int ConsecutiveStride = Legal->isConsecutivePtr( 6959 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 6960 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6961 "Expected consecutive stride."); 6962 InstWidening Decision = 6963 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 6964 setWideningDecision(&I, VF, Decision, Cost); 6965 continue; 6966 } 6967 6968 // Choose between Interleaving, Gather/Scatter or Scalarization. 6969 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 6970 unsigned NumAccesses = 1; 6971 if (isAccessInterleaved(&I)) { 6972 auto Group = getInterleavedAccessGroup(&I); 6973 assert(Group && "Fail to get an interleaved access group."); 6974 6975 // Make one decision for the whole group. 6976 if (getWideningDecision(&I, VF) != CM_Unknown) 6977 continue; 6978 6979 NumAccesses = Group->getNumMembers(); 6980 if (interleavedAccessCanBeWidened(&I, VF)) 6981 InterleaveCost = getInterleaveGroupCost(&I, VF); 6982 } 6983 6984 InstructionCost GatherScatterCost = 6985 isLegalGatherOrScatter(&I, VF) 6986 ? getGatherScatterCost(&I, VF) * NumAccesses 6987 : InstructionCost::getInvalid(); 6988 6989 InstructionCost ScalarizationCost = 6990 getMemInstScalarizationCost(&I, VF) * NumAccesses; 6991 6992 // Choose better solution for the current VF, 6993 // write down this decision and use it during vectorization. 6994 InstructionCost Cost; 6995 InstWidening Decision; 6996 if (InterleaveCost <= GatherScatterCost && 6997 InterleaveCost < ScalarizationCost) { 6998 Decision = CM_Interleave; 6999 Cost = InterleaveCost; 7000 } else if (GatherScatterCost < ScalarizationCost) { 7001 Decision = CM_GatherScatter; 7002 Cost = GatherScatterCost; 7003 } else { 7004 Decision = CM_Scalarize; 7005 Cost = ScalarizationCost; 7006 } 7007 // If the instructions belongs to an interleave group, the whole group 7008 // receives the same decision. The whole group receives the cost, but 7009 // the cost will actually be assigned to one instruction. 7010 if (auto Group = getInterleavedAccessGroup(&I)) 7011 setWideningDecision(Group, VF, Decision, Cost); 7012 else 7013 setWideningDecision(&I, VF, Decision, Cost); 7014 } 7015 } 7016 7017 // Make sure that any load of address and any other address computation 7018 // remains scalar unless there is gather/scatter support. This avoids 7019 // inevitable extracts into address registers, and also has the benefit of 7020 // activating LSR more, since that pass can't optimize vectorized 7021 // addresses. 7022 if (TTI.prefersVectorizedAddressing()) 7023 return; 7024 7025 // Start with all scalar pointer uses. 7026 SmallPtrSet<Instruction *, 8> AddrDefs; 7027 for (BasicBlock *BB : TheLoop->blocks()) 7028 for (Instruction &I : *BB) { 7029 Instruction *PtrDef = 7030 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7031 if (PtrDef && TheLoop->contains(PtrDef) && 7032 getWideningDecision(&I, VF) != CM_GatherScatter) 7033 AddrDefs.insert(PtrDef); 7034 } 7035 7036 // Add all instructions used to generate the addresses. 7037 SmallVector<Instruction *, 4> Worklist; 7038 append_range(Worklist, AddrDefs); 7039 while (!Worklist.empty()) { 7040 Instruction *I = Worklist.pop_back_val(); 7041 for (auto &Op : I->operands()) 7042 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7043 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7044 AddrDefs.insert(InstOp).second) 7045 Worklist.push_back(InstOp); 7046 } 7047 7048 for (auto *I : AddrDefs) { 7049 if (isa<LoadInst>(I)) { 7050 // Setting the desired widening decision should ideally be handled in 7051 // by cost functions, but since this involves the task of finding out 7052 // if the loaded register is involved in an address computation, it is 7053 // instead changed here when we know this is the case. 7054 InstWidening Decision = getWideningDecision(I, VF); 7055 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7056 // Scalarize a widened load of address. 7057 setWideningDecision( 7058 I, VF, CM_Scalarize, 7059 (VF.getKnownMinValue() * 7060 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7061 else if (auto Group = getInterleavedAccessGroup(I)) { 7062 // Scalarize an interleave group of address loads. 7063 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7064 if (Instruction *Member = Group->getMember(I)) 7065 setWideningDecision( 7066 Member, VF, CM_Scalarize, 7067 (VF.getKnownMinValue() * 7068 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7069 } 7070 } 7071 } else 7072 // Make sure I gets scalarized and a cost estimate without 7073 // scalarization overhead. 7074 ForcedScalars[VF].insert(I); 7075 } 7076 } 7077 7078 InstructionCost 7079 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7080 Type *&VectorTy) { 7081 Type *RetTy = I->getType(); 7082 if (canTruncateToMinimalBitwidth(I, VF)) 7083 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7084 auto SE = PSE.getSE(); 7085 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7086 7087 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 7088 ElementCount VF) -> bool { 7089 if (VF.isScalar()) 7090 return true; 7091 7092 auto Scalarized = InstsToScalarize.find(VF); 7093 assert(Scalarized != InstsToScalarize.end() && 7094 "VF not yet analyzed for scalarization profitability"); 7095 return !Scalarized->second.count(I) && 7096 llvm::all_of(I->users(), [&](User *U) { 7097 auto *UI = cast<Instruction>(U); 7098 return !Scalarized->second.count(UI); 7099 }); 7100 }; 7101 (void) hasSingleCopyAfterVectorization; 7102 7103 if (isScalarAfterVectorization(I, VF)) { 7104 // With the exception of GEPs and PHIs, after scalarization there should 7105 // only be one copy of the instruction generated in the loop. This is 7106 // because the VF is either 1, or any instructions that need scalarizing 7107 // have already been dealt with by the the time we get here. As a result, 7108 // it means we don't have to multiply the instruction cost by VF. 7109 assert(I->getOpcode() == Instruction::GetElementPtr || 7110 I->getOpcode() == Instruction::PHI || 7111 (I->getOpcode() == Instruction::BitCast && 7112 I->getType()->isPointerTy()) || 7113 hasSingleCopyAfterVectorization(I, VF)); 7114 VectorTy = RetTy; 7115 } else 7116 VectorTy = ToVectorTy(RetTy, VF); 7117 7118 // TODO: We need to estimate the cost of intrinsic calls. 7119 switch (I->getOpcode()) { 7120 case Instruction::GetElementPtr: 7121 // We mark this instruction as zero-cost because the cost of GEPs in 7122 // vectorized code depends on whether the corresponding memory instruction 7123 // is scalarized or not. Therefore, we handle GEPs with the memory 7124 // instruction cost. 7125 return 0; 7126 case Instruction::Br: { 7127 // In cases of scalarized and predicated instructions, there will be VF 7128 // predicated blocks in the vectorized loop. Each branch around these 7129 // blocks requires also an extract of its vector compare i1 element. 7130 bool ScalarPredicatedBB = false; 7131 BranchInst *BI = cast<BranchInst>(I); 7132 if (VF.isVector() && BI->isConditional() && 7133 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7134 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7135 ScalarPredicatedBB = true; 7136 7137 if (ScalarPredicatedBB) { 7138 // Not possible to scalarize scalable vector with predicated instructions. 7139 if (VF.isScalable()) 7140 return InstructionCost::getInvalid(); 7141 // Return cost for branches around scalarized and predicated blocks. 7142 auto *Vec_i1Ty = 7143 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7144 return ( 7145 TTI.getScalarizationOverhead( 7146 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7147 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7148 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7149 // The back-edge branch will remain, as will all scalar branches. 7150 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7151 else 7152 // This branch will be eliminated by if-conversion. 7153 return 0; 7154 // Note: We currently assume zero cost for an unconditional branch inside 7155 // a predicated block since it will become a fall-through, although we 7156 // may decide in the future to call TTI for all branches. 7157 } 7158 case Instruction::PHI: { 7159 auto *Phi = cast<PHINode>(I); 7160 7161 // First-order recurrences are replaced by vector shuffles inside the loop. 7162 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7163 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7164 return TTI.getShuffleCost( 7165 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7166 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7167 7168 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7169 // converted into select instructions. We require N - 1 selects per phi 7170 // node, where N is the number of incoming values. 7171 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7172 return (Phi->getNumIncomingValues() - 1) * 7173 TTI.getCmpSelInstrCost( 7174 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7175 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7176 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7177 7178 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7179 } 7180 case Instruction::UDiv: 7181 case Instruction::SDiv: 7182 case Instruction::URem: 7183 case Instruction::SRem: 7184 // If we have a predicated instruction, it may not be executed for each 7185 // vector lane. Get the scalarization cost and scale this amount by the 7186 // probability of executing the predicated block. If the instruction is not 7187 // predicated, we fall through to the next case. 7188 if (VF.isVector() && isScalarWithPredication(I, VF)) { 7189 InstructionCost Cost = 0; 7190 7191 // These instructions have a non-void type, so account for the phi nodes 7192 // that we will create. This cost is likely to be zero. The phi node 7193 // cost, if any, should be scaled by the block probability because it 7194 // models a copy at the end of each predicated block. 7195 Cost += VF.getKnownMinValue() * 7196 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7197 7198 // The cost of the non-predicated instruction. 7199 Cost += VF.getKnownMinValue() * 7200 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7201 7202 // The cost of insertelement and extractelement instructions needed for 7203 // scalarization. 7204 Cost += getScalarizationOverhead(I, VF); 7205 7206 // Scale the cost by the probability of executing the predicated blocks. 7207 // This assumes the predicated block for each vector lane is equally 7208 // likely. 7209 return Cost / getReciprocalPredBlockProb(); 7210 } 7211 LLVM_FALLTHROUGH; 7212 case Instruction::Add: 7213 case Instruction::FAdd: 7214 case Instruction::Sub: 7215 case Instruction::FSub: 7216 case Instruction::Mul: 7217 case Instruction::FMul: 7218 case Instruction::FDiv: 7219 case Instruction::FRem: 7220 case Instruction::Shl: 7221 case Instruction::LShr: 7222 case Instruction::AShr: 7223 case Instruction::And: 7224 case Instruction::Or: 7225 case Instruction::Xor: { 7226 // Since we will replace the stride by 1 the multiplication should go away. 7227 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7228 return 0; 7229 7230 // Detect reduction patterns 7231 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7232 return *RedCost; 7233 7234 // Certain instructions can be cheaper to vectorize if they have a constant 7235 // second vector operand. One example of this are shifts on x86. 7236 Value *Op2 = I->getOperand(1); 7237 TargetTransformInfo::OperandValueProperties Op2VP; 7238 TargetTransformInfo::OperandValueKind Op2VK = 7239 TTI.getOperandInfo(Op2, Op2VP); 7240 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7241 Op2VK = TargetTransformInfo::OK_UniformValue; 7242 7243 SmallVector<const Value *, 4> Operands(I->operand_values()); 7244 return TTI.getArithmeticInstrCost( 7245 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7246 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7247 } 7248 case Instruction::FNeg: { 7249 return TTI.getArithmeticInstrCost( 7250 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7251 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7252 TargetTransformInfo::OP_None, I->getOperand(0), I); 7253 } 7254 case Instruction::Select: { 7255 SelectInst *SI = cast<SelectInst>(I); 7256 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7257 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7258 7259 const Value *Op0, *Op1; 7260 using namespace llvm::PatternMatch; 7261 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7262 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7263 // select x, y, false --> x & y 7264 // select x, true, y --> x | y 7265 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7266 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7267 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7268 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7269 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7270 Op1->getType()->getScalarSizeInBits() == 1); 7271 7272 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7273 return TTI.getArithmeticInstrCost( 7274 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7275 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7276 } 7277 7278 Type *CondTy = SI->getCondition()->getType(); 7279 if (!ScalarCond) 7280 CondTy = VectorType::get(CondTy, VF); 7281 7282 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 7283 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition())) 7284 Pred = Cmp->getPredicate(); 7285 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred, 7286 CostKind, I); 7287 } 7288 case Instruction::ICmp: 7289 case Instruction::FCmp: { 7290 Type *ValTy = I->getOperand(0)->getType(); 7291 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7292 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7293 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7294 VectorTy = ToVectorTy(ValTy, VF); 7295 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7296 cast<CmpInst>(I)->getPredicate(), CostKind, 7297 I); 7298 } 7299 case Instruction::Store: 7300 case Instruction::Load: { 7301 ElementCount Width = VF; 7302 if (Width.isVector()) { 7303 InstWidening Decision = getWideningDecision(I, Width); 7304 assert(Decision != CM_Unknown && 7305 "CM decision should be taken at this point"); 7306 if (Decision == CM_Scalarize) 7307 Width = ElementCount::getFixed(1); 7308 } 7309 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7310 return getMemoryInstructionCost(I, VF); 7311 } 7312 case Instruction::BitCast: 7313 if (I->getType()->isPointerTy()) 7314 return 0; 7315 LLVM_FALLTHROUGH; 7316 case Instruction::ZExt: 7317 case Instruction::SExt: 7318 case Instruction::FPToUI: 7319 case Instruction::FPToSI: 7320 case Instruction::FPExt: 7321 case Instruction::PtrToInt: 7322 case Instruction::IntToPtr: 7323 case Instruction::SIToFP: 7324 case Instruction::UIToFP: 7325 case Instruction::Trunc: 7326 case Instruction::FPTrunc: { 7327 // Computes the CastContextHint from a Load/Store instruction. 7328 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7329 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7330 "Expected a load or a store!"); 7331 7332 if (VF.isScalar() || !TheLoop->contains(I)) 7333 return TTI::CastContextHint::Normal; 7334 7335 switch (getWideningDecision(I, VF)) { 7336 case LoopVectorizationCostModel::CM_GatherScatter: 7337 return TTI::CastContextHint::GatherScatter; 7338 case LoopVectorizationCostModel::CM_Interleave: 7339 return TTI::CastContextHint::Interleave; 7340 case LoopVectorizationCostModel::CM_Scalarize: 7341 case LoopVectorizationCostModel::CM_Widen: 7342 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7343 : TTI::CastContextHint::Normal; 7344 case LoopVectorizationCostModel::CM_Widen_Reverse: 7345 return TTI::CastContextHint::Reversed; 7346 case LoopVectorizationCostModel::CM_Unknown: 7347 llvm_unreachable("Instr did not go through cost modelling?"); 7348 } 7349 7350 llvm_unreachable("Unhandled case!"); 7351 }; 7352 7353 unsigned Opcode = I->getOpcode(); 7354 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7355 // For Trunc, the context is the only user, which must be a StoreInst. 7356 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7357 if (I->hasOneUse()) 7358 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7359 CCH = ComputeCCH(Store); 7360 } 7361 // For Z/Sext, the context is the operand, which must be a LoadInst. 7362 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7363 Opcode == Instruction::FPExt) { 7364 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7365 CCH = ComputeCCH(Load); 7366 } 7367 7368 // We optimize the truncation of induction variables having constant 7369 // integer steps. The cost of these truncations is the same as the scalar 7370 // operation. 7371 if (isOptimizableIVTruncate(I, VF)) { 7372 auto *Trunc = cast<TruncInst>(I); 7373 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7374 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7375 } 7376 7377 // Detect reduction patterns 7378 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7379 return *RedCost; 7380 7381 Type *SrcScalarTy = I->getOperand(0)->getType(); 7382 Type *SrcVecTy = 7383 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7384 if (canTruncateToMinimalBitwidth(I, VF)) { 7385 // This cast is going to be shrunk. This may remove the cast or it might 7386 // turn it into slightly different cast. For example, if MinBW == 16, 7387 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7388 // 7389 // Calculate the modified src and dest types. 7390 Type *MinVecTy = VectorTy; 7391 if (Opcode == Instruction::Trunc) { 7392 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7393 VectorTy = 7394 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7395 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7396 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7397 VectorTy = 7398 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7399 } 7400 } 7401 7402 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7403 } 7404 case Instruction::Call: { 7405 if (RecurrenceDescriptor::isFMulAddIntrinsic(I)) 7406 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7407 return *RedCost; 7408 bool NeedToScalarize; 7409 CallInst *CI = cast<CallInst>(I); 7410 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7411 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7412 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7413 return std::min(CallCost, IntrinsicCost); 7414 } 7415 return CallCost; 7416 } 7417 case Instruction::ExtractValue: 7418 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7419 case Instruction::Alloca: 7420 // We cannot easily widen alloca to a scalable alloca, as 7421 // the result would need to be a vector of pointers. 7422 if (VF.isScalable()) 7423 return InstructionCost::getInvalid(); 7424 LLVM_FALLTHROUGH; 7425 default: 7426 // This opcode is unknown. Assume that it is the same as 'mul'. 7427 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7428 } // end of switch. 7429 } 7430 7431 char LoopVectorize::ID = 0; 7432 7433 static const char lv_name[] = "Loop Vectorization"; 7434 7435 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7436 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7437 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7438 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7439 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7440 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7441 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7442 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7443 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7444 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7445 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7446 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7447 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7448 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7449 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7450 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7451 7452 namespace llvm { 7453 7454 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7455 7456 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7457 bool VectorizeOnlyWhenForced) { 7458 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7459 } 7460 7461 } // end namespace llvm 7462 7463 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7464 // Check if the pointer operand of a load or store instruction is 7465 // consecutive. 7466 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7467 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 7468 return false; 7469 } 7470 7471 void LoopVectorizationCostModel::collectValuesToIgnore() { 7472 // Ignore ephemeral values. 7473 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7474 7475 // Ignore type-promoting instructions we identified during reduction 7476 // detection. 7477 for (auto &Reduction : Legal->getReductionVars()) { 7478 const RecurrenceDescriptor &RedDes = Reduction.second; 7479 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7480 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7481 } 7482 // Ignore type-casting instructions we identified during induction 7483 // detection. 7484 for (auto &Induction : Legal->getInductionVars()) { 7485 const InductionDescriptor &IndDes = Induction.second; 7486 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7487 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7488 } 7489 } 7490 7491 void LoopVectorizationCostModel::collectInLoopReductions() { 7492 for (auto &Reduction : Legal->getReductionVars()) { 7493 PHINode *Phi = Reduction.first; 7494 const RecurrenceDescriptor &RdxDesc = Reduction.second; 7495 7496 // We don't collect reductions that are type promoted (yet). 7497 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7498 continue; 7499 7500 // If the target would prefer this reduction to happen "in-loop", then we 7501 // want to record it as such. 7502 unsigned Opcode = RdxDesc.getOpcode(); 7503 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7504 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7505 TargetTransformInfo::ReductionFlags())) 7506 continue; 7507 7508 // Check that we can correctly put the reductions into the loop, by 7509 // finding the chain of operations that leads from the phi to the loop 7510 // exit value. 7511 SmallVector<Instruction *, 4> ReductionOperations = 7512 RdxDesc.getReductionOpChain(Phi, TheLoop); 7513 bool InLoop = !ReductionOperations.empty(); 7514 if (InLoop) { 7515 InLoopReductionChains[Phi] = ReductionOperations; 7516 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7517 Instruction *LastChain = Phi; 7518 for (auto *I : ReductionOperations) { 7519 InLoopReductionImmediateChains[I] = LastChain; 7520 LastChain = I; 7521 } 7522 } 7523 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7524 << " reduction for phi: " << *Phi << "\n"); 7525 } 7526 } 7527 7528 // TODO: we could return a pair of values that specify the max VF and 7529 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7530 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7531 // doesn't have a cost model that can choose which plan to execute if 7532 // more than one is generated. 7533 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7534 LoopVectorizationCostModel &CM) { 7535 unsigned WidestType; 7536 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7537 return WidestVectorRegBits / WidestType; 7538 } 7539 7540 VectorizationFactor 7541 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7542 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7543 ElementCount VF = UserVF; 7544 // Outer loop handling: They may require CFG and instruction level 7545 // transformations before even evaluating whether vectorization is profitable. 7546 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7547 // the vectorization pipeline. 7548 if (!OrigLoop->isInnermost()) { 7549 // If the user doesn't provide a vectorization factor, determine a 7550 // reasonable one. 7551 if (UserVF.isZero()) { 7552 VF = ElementCount::getFixed(determineVPlanVF( 7553 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7554 .getFixedSize(), 7555 CM)); 7556 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7557 7558 // Make sure we have a VF > 1 for stress testing. 7559 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7560 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7561 << "overriding computed VF.\n"); 7562 VF = ElementCount::getFixed(4); 7563 } 7564 } 7565 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7566 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7567 "VF needs to be a power of two"); 7568 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7569 << "VF " << VF << " to build VPlans.\n"); 7570 buildVPlans(VF, VF); 7571 7572 // For VPlan build stress testing, we bail out after VPlan construction. 7573 if (VPlanBuildStressTest) 7574 return VectorizationFactor::Disabled(); 7575 7576 return {VF, 0 /*Cost*/}; 7577 } 7578 7579 LLVM_DEBUG( 7580 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7581 "VPlan-native path.\n"); 7582 return VectorizationFactor::Disabled(); 7583 } 7584 7585 Optional<VectorizationFactor> 7586 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7587 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7588 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 7589 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 7590 return None; 7591 7592 // Invalidate interleave groups if all blocks of loop will be predicated. 7593 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 7594 !useMaskedInterleavedAccesses(*TTI)) { 7595 LLVM_DEBUG( 7596 dbgs() 7597 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7598 "which requires masked-interleaved support.\n"); 7599 if (CM.InterleaveInfo.invalidateGroups()) 7600 // Invalidating interleave groups also requires invalidating all decisions 7601 // based on them, which includes widening decisions and uniform and scalar 7602 // values. 7603 CM.invalidateCostModelingDecisions(); 7604 } 7605 7606 ElementCount MaxUserVF = 7607 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 7608 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 7609 if (!UserVF.isZero() && UserVFIsLegal) { 7610 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 7611 "VF needs to be a power of two"); 7612 // Collect the instructions (and their associated costs) that will be more 7613 // profitable to scalarize. 7614 if (CM.selectUserVectorizationFactor(UserVF)) { 7615 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7616 CM.collectInLoopReductions(); 7617 buildVPlansWithVPRecipes(UserVF, UserVF); 7618 LLVM_DEBUG(printPlans(dbgs())); 7619 return {{UserVF, 0}}; 7620 } else 7621 reportVectorizationInfo("UserVF ignored because of invalid costs.", 7622 "InvalidCost", ORE, OrigLoop); 7623 } 7624 7625 // Populate the set of Vectorization Factor Candidates. 7626 ElementCountSet VFCandidates; 7627 for (auto VF = ElementCount::getFixed(1); 7628 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 7629 VFCandidates.insert(VF); 7630 for (auto VF = ElementCount::getScalable(1); 7631 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 7632 VFCandidates.insert(VF); 7633 7634 for (const auto &VF : VFCandidates) { 7635 // Collect Uniform and Scalar instructions after vectorization with VF. 7636 CM.collectUniformsAndScalars(VF); 7637 7638 // Collect the instructions (and their associated costs) that will be more 7639 // profitable to scalarize. 7640 if (VF.isVector()) 7641 CM.collectInstsToScalarize(VF); 7642 } 7643 7644 CM.collectInLoopReductions(); 7645 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 7646 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 7647 7648 LLVM_DEBUG(printPlans(dbgs())); 7649 if (!MaxFactors.hasVector()) 7650 return VectorizationFactor::Disabled(); 7651 7652 // Select the optimal vectorization factor. 7653 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 7654 7655 // Check if it is profitable to vectorize with runtime checks. 7656 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 7657 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 7658 bool PragmaThresholdReached = 7659 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 7660 bool ThresholdReached = 7661 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 7662 if ((ThresholdReached && !Hints.allowReordering()) || 7663 PragmaThresholdReached) { 7664 ORE->emit([&]() { 7665 return OptimizationRemarkAnalysisAliasing( 7666 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 7667 OrigLoop->getHeader()) 7668 << "loop not vectorized: cannot prove it is safe to reorder " 7669 "memory operations"; 7670 }); 7671 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 7672 Hints.emitRemarkWithHints(); 7673 return VectorizationFactor::Disabled(); 7674 } 7675 } 7676 return SelectedVF; 7677 } 7678 7679 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 7680 assert(count_if(VPlans, 7681 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 7682 1 && 7683 "Best VF has not a single VPlan."); 7684 7685 for (const VPlanPtr &Plan : VPlans) { 7686 if (Plan->hasVF(VF)) 7687 return *Plan.get(); 7688 } 7689 llvm_unreachable("No plan found!"); 7690 } 7691 7692 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7693 SmallVector<Metadata *, 4> MDs; 7694 // Reserve first location for self reference to the LoopID metadata node. 7695 MDs.push_back(nullptr); 7696 bool IsUnrollMetadata = false; 7697 MDNode *LoopID = L->getLoopID(); 7698 if (LoopID) { 7699 // First find existing loop unrolling disable metadata. 7700 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7701 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7702 if (MD) { 7703 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7704 IsUnrollMetadata = 7705 S && S->getString().startswith("llvm.loop.unroll.disable"); 7706 } 7707 MDs.push_back(LoopID->getOperand(i)); 7708 } 7709 } 7710 7711 if (!IsUnrollMetadata) { 7712 // Add runtime unroll disable metadata. 7713 LLVMContext &Context = L->getHeader()->getContext(); 7714 SmallVector<Metadata *, 1> DisableOperands; 7715 DisableOperands.push_back( 7716 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7717 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7718 MDs.push_back(DisableNode); 7719 MDNode *NewLoopID = MDNode::get(Context, MDs); 7720 // Set operand 0 to refer to the loop id itself. 7721 NewLoopID->replaceOperandWith(0, NewLoopID); 7722 L->setLoopID(NewLoopID); 7723 } 7724 } 7725 7726 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 7727 VPlan &BestVPlan, 7728 InnerLoopVectorizer &ILV, 7729 DominatorTree *DT) { 7730 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 7731 << '\n'); 7732 7733 // Perform the actual loop transformation. 7734 7735 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7736 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 7737 Value *CanonicalIVStartValue; 7738 std::tie(State.CFG.PrevBB, CanonicalIVStartValue) = 7739 ILV.createVectorizedLoopSkeleton(); 7740 ILV.collectPoisonGeneratingRecipes(State); 7741 7742 ILV.printDebugTracesAtStart(); 7743 7744 //===------------------------------------------------===// 7745 // 7746 // Notice: any optimization or new instruction that go 7747 // into the code below should also be implemented in 7748 // the cost-model. 7749 // 7750 //===------------------------------------------------===// 7751 7752 // 2. Copy and widen instructions from the old loop into the new loop. 7753 BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr), 7754 ILV.getOrCreateVectorTripCount(nullptr), 7755 CanonicalIVStartValue, State); 7756 BestVPlan.execute(&State); 7757 7758 // Keep all loop hints from the original loop on the vector loop (we'll 7759 // replace the vectorizer-specific hints below). 7760 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7761 7762 Optional<MDNode *> VectorizedLoopID = 7763 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7764 LLVMLoopVectorizeFollowupVectorized}); 7765 7766 Loop *L = LI->getLoopFor(State.CFG.PrevBB); 7767 if (VectorizedLoopID.hasValue()) 7768 L->setLoopID(VectorizedLoopID.getValue()); 7769 else { 7770 // Keep all loop hints from the original loop on the vector loop (we'll 7771 // replace the vectorizer-specific hints below). 7772 if (MDNode *LID = OrigLoop->getLoopID()) 7773 L->setLoopID(LID); 7774 7775 LoopVectorizeHints Hints(L, true, *ORE); 7776 Hints.setAlreadyVectorized(); 7777 } 7778 // Disable runtime unrolling when vectorizing the epilogue loop. 7779 if (CanonicalIVStartValue) 7780 AddRuntimeUnrollDisableMetaData(L); 7781 7782 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7783 // predication, updating analyses. 7784 ILV.fixVectorizedLoop(State); 7785 7786 ILV.printDebugTracesAtEnd(); 7787 } 7788 7789 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 7790 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 7791 for (const auto &Plan : VPlans) 7792 if (PrintVPlansInDotFormat) 7793 Plan->printDOT(O); 7794 else 7795 Plan->print(O); 7796 } 7797 #endif 7798 7799 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7800 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7801 7802 // We create new control-flow for the vectorized loop, so the original exit 7803 // conditions will be dead after vectorization if it's only used by the 7804 // terminator 7805 SmallVector<BasicBlock*> ExitingBlocks; 7806 OrigLoop->getExitingBlocks(ExitingBlocks); 7807 for (auto *BB : ExitingBlocks) { 7808 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 7809 if (!Cmp || !Cmp->hasOneUse()) 7810 continue; 7811 7812 // TODO: we should introduce a getUniqueExitingBlocks on Loop 7813 if (!DeadInstructions.insert(Cmp).second) 7814 continue; 7815 7816 // The operands of the icmp is often a dead trunc, used by IndUpdate. 7817 // TODO: can recurse through operands in general 7818 for (Value *Op : Cmp->operands()) { 7819 if (isa<TruncInst>(Op) && Op->hasOneUse()) 7820 DeadInstructions.insert(cast<Instruction>(Op)); 7821 } 7822 } 7823 7824 // We create new "steps" for induction variable updates to which the original 7825 // induction variables map. An original update instruction will be dead if 7826 // all its users except the induction variable are dead. 7827 auto *Latch = OrigLoop->getLoopLatch(); 7828 for (auto &Induction : Legal->getInductionVars()) { 7829 PHINode *Ind = Induction.first; 7830 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7831 7832 // If the tail is to be folded by masking, the primary induction variable, 7833 // if exists, isn't dead: it will be used for masking. Don't kill it. 7834 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 7835 continue; 7836 7837 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7838 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7839 })) 7840 DeadInstructions.insert(IndUpdate); 7841 } 7842 } 7843 7844 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7845 7846 //===--------------------------------------------------------------------===// 7847 // EpilogueVectorizerMainLoop 7848 //===--------------------------------------------------------------------===// 7849 7850 /// This function is partially responsible for generating the control flow 7851 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7852 std::pair<BasicBlock *, Value *> 7853 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 7854 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7855 Loop *Lp = createVectorLoopSkeleton(""); 7856 7857 // Generate the code to check the minimum iteration count of the vector 7858 // epilogue (see below). 7859 EPI.EpilogueIterationCountCheck = 7860 emitMinimumIterationCountCheck(LoopScalarPreHeader, true); 7861 EPI.EpilogueIterationCountCheck->setName("iter.check"); 7862 7863 // Generate the code to check any assumptions that we've made for SCEV 7864 // expressions. 7865 EPI.SCEVSafetyCheck = emitSCEVChecks(LoopScalarPreHeader); 7866 7867 // Generate the code that checks at runtime if arrays overlap. We put the 7868 // checks into a separate block to make the more common case of few elements 7869 // faster. 7870 EPI.MemSafetyCheck = emitMemRuntimeChecks(LoopScalarPreHeader); 7871 7872 // Generate the iteration count check for the main loop, *after* the check 7873 // for the epilogue loop, so that the path-length is shorter for the case 7874 // that goes directly through the vector epilogue. The longer-path length for 7875 // the main loop is compensated for, by the gain from vectorizing the larger 7876 // trip count. Note: the branch will get updated later on when we vectorize 7877 // the epilogue. 7878 EPI.MainLoopIterationCountCheck = 7879 emitMinimumIterationCountCheck(LoopScalarPreHeader, false); 7880 7881 // Generate the induction variable. 7882 Value *CountRoundDown = getOrCreateVectorTripCount(LoopVectorPreHeader); 7883 EPI.VectorTripCount = CountRoundDown; 7884 createHeaderBranch(Lp); 7885 7886 // Skip induction resume value creation here because they will be created in 7887 // the second pass. If we created them here, they wouldn't be used anyway, 7888 // because the vplan in the second pass still contains the inductions from the 7889 // original loop. 7890 7891 return {completeLoopSkeleton(OrigLoopID), nullptr}; 7892 } 7893 7894 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 7895 LLVM_DEBUG({ 7896 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 7897 << "Main Loop VF:" << EPI.MainLoopVF 7898 << ", Main Loop UF:" << EPI.MainLoopUF 7899 << ", Epilogue Loop VF:" << EPI.EpilogueVF 7900 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7901 }); 7902 } 7903 7904 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 7905 DEBUG_WITH_TYPE(VerboseDebug, { 7906 dbgs() << "intermediate fn:\n" 7907 << *OrigLoop->getHeader()->getParent() << "\n"; 7908 }); 7909 } 7910 7911 BasicBlock * 7912 EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(BasicBlock *Bypass, 7913 bool ForEpilogue) { 7914 assert(Bypass && "Expected valid bypass basic block."); 7915 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 7916 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 7917 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 7918 // Reuse existing vector loop preheader for TC checks. 7919 // Note that new preheader block is generated for vector loop. 7920 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 7921 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 7922 7923 // Generate code to check if the loop's trip count is less than VF * UF of the 7924 // main vector loop. 7925 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 7926 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7927 7928 Value *CheckMinIters = Builder.CreateICmp( 7929 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 7930 "min.iters.check"); 7931 7932 if (!ForEpilogue) 7933 TCCheckBlock->setName("vector.main.loop.iter.check"); 7934 7935 // Create new preheader for vector loop. 7936 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 7937 DT, LI, nullptr, "vector.ph"); 7938 7939 if (ForEpilogue) { 7940 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 7941 DT->getNode(Bypass)->getIDom()) && 7942 "TC check is expected to dominate Bypass"); 7943 7944 // Update dominator for Bypass & LoopExit. 7945 DT->changeImmediateDominator(Bypass, TCCheckBlock); 7946 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7947 // For loops with multiple exits, there's no edge from the middle block 7948 // to exit blocks (as the epilogue must run) and thus no need to update 7949 // the immediate dominator of the exit blocks. 7950 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 7951 7952 LoopBypassBlocks.push_back(TCCheckBlock); 7953 7954 // Save the trip count so we don't have to regenerate it in the 7955 // vec.epilog.iter.check. This is safe to do because the trip count 7956 // generated here dominates the vector epilog iter check. 7957 EPI.TripCount = Count; 7958 } 7959 7960 ReplaceInstWithInst( 7961 TCCheckBlock->getTerminator(), 7962 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7963 7964 return TCCheckBlock; 7965 } 7966 7967 //===--------------------------------------------------------------------===// 7968 // EpilogueVectorizerEpilogueLoop 7969 //===--------------------------------------------------------------------===// 7970 7971 /// This function is partially responsible for generating the control flow 7972 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7973 std::pair<BasicBlock *, Value *> 7974 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 7975 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7976 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 7977 7978 // Now, compare the remaining count and if there aren't enough iterations to 7979 // execute the vectorized epilogue skip to the scalar part. 7980 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 7981 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 7982 LoopVectorPreHeader = 7983 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 7984 LI, nullptr, "vec.epilog.ph"); 7985 emitMinimumVectorEpilogueIterCountCheck(LoopScalarPreHeader, 7986 VecEpilogueIterationCountCheck); 7987 7988 // Adjust the control flow taking the state info from the main loop 7989 // vectorization into account. 7990 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 7991 "expected this to be saved from the previous pass."); 7992 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 7993 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 7994 7995 DT->changeImmediateDominator(LoopVectorPreHeader, 7996 EPI.MainLoopIterationCountCheck); 7997 7998 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 7999 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8000 8001 if (EPI.SCEVSafetyCheck) 8002 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8003 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8004 if (EPI.MemSafetyCheck) 8005 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8006 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8007 8008 DT->changeImmediateDominator( 8009 VecEpilogueIterationCountCheck, 8010 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8011 8012 DT->changeImmediateDominator(LoopScalarPreHeader, 8013 EPI.EpilogueIterationCountCheck); 8014 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8015 // If there is an epilogue which must run, there's no edge from the 8016 // middle block to exit blocks and thus no need to update the immediate 8017 // dominator of the exit blocks. 8018 DT->changeImmediateDominator(LoopExitBlock, 8019 EPI.EpilogueIterationCountCheck); 8020 8021 // Keep track of bypass blocks, as they feed start values to the induction 8022 // phis in the scalar loop preheader. 8023 if (EPI.SCEVSafetyCheck) 8024 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8025 if (EPI.MemSafetyCheck) 8026 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8027 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8028 8029 // The vec.epilog.iter.check block may contain Phi nodes from reductions which 8030 // merge control-flow from the latch block and the middle block. Update the 8031 // incoming values here and move the Phi into the preheader. 8032 SmallVector<PHINode *, 4> PhisInBlock; 8033 for (PHINode &Phi : VecEpilogueIterationCountCheck->phis()) 8034 PhisInBlock.push_back(&Phi); 8035 8036 for (PHINode *Phi : PhisInBlock) { 8037 Phi->replaceIncomingBlockWith( 8038 VecEpilogueIterationCountCheck->getSinglePredecessor(), 8039 VecEpilogueIterationCountCheck); 8040 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck); 8041 if (EPI.SCEVSafetyCheck) 8042 Phi->removeIncomingValue(EPI.SCEVSafetyCheck); 8043 if (EPI.MemSafetyCheck) 8044 Phi->removeIncomingValue(EPI.MemSafetyCheck); 8045 Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI()); 8046 } 8047 8048 // Generate a resume induction for the vector epilogue and put it in the 8049 // vector epilogue preheader 8050 Type *IdxTy = Legal->getWidestInductionType(); 8051 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8052 LoopVectorPreHeader->getFirstNonPHI()); 8053 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8054 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8055 EPI.MainLoopIterationCountCheck); 8056 8057 // Generate the induction variable. 8058 createHeaderBranch(Lp); 8059 8060 // Generate induction resume values. These variables save the new starting 8061 // indexes for the scalar loop. They are used to test if there are any tail 8062 // iterations left once the vector loop has completed. 8063 // Note that when the vectorized epilogue is skipped due to iteration count 8064 // check, then the resume value for the induction variable comes from 8065 // the trip count of the main vector loop, hence passing the AdditionalBypass 8066 // argument. 8067 createInductionResumeValues({VecEpilogueIterationCountCheck, 8068 EPI.VectorTripCount} /* AdditionalBypass */); 8069 8070 return {completeLoopSkeleton(OrigLoopID), EPResumeVal}; 8071 } 8072 8073 BasicBlock * 8074 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8075 BasicBlock *Bypass, BasicBlock *Insert) { 8076 8077 assert(EPI.TripCount && 8078 "Expected trip count to have been safed in the first pass."); 8079 assert( 8080 (!isa<Instruction>(EPI.TripCount) || 8081 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8082 "saved trip count does not dominate insertion point."); 8083 Value *TC = EPI.TripCount; 8084 IRBuilder<> Builder(Insert->getTerminator()); 8085 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8086 8087 // Generate code to check if the loop's trip count is less than VF * UF of the 8088 // vector epilogue loop. 8089 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 8090 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8091 8092 Value *CheckMinIters = 8093 Builder.CreateICmp(P, Count, 8094 createStepForVF(Builder, Count->getType(), 8095 EPI.EpilogueVF, EPI.EpilogueUF), 8096 "min.epilog.iters.check"); 8097 8098 ReplaceInstWithInst( 8099 Insert->getTerminator(), 8100 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8101 8102 LoopBypassBlocks.push_back(Insert); 8103 return Insert; 8104 } 8105 8106 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8107 LLVM_DEBUG({ 8108 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8109 << "Epilogue Loop VF:" << EPI.EpilogueVF 8110 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8111 }); 8112 } 8113 8114 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8115 DEBUG_WITH_TYPE(VerboseDebug, { 8116 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n"; 8117 }); 8118 } 8119 8120 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8121 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8122 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8123 bool PredicateAtRangeStart = Predicate(Range.Start); 8124 8125 for (ElementCount TmpVF = Range.Start * 2; 8126 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8127 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8128 Range.End = TmpVF; 8129 break; 8130 } 8131 8132 return PredicateAtRangeStart; 8133 } 8134 8135 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8136 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8137 /// of VF's starting at a given VF and extending it as much as possible. Each 8138 /// vectorization decision can potentially shorten this sub-range during 8139 /// buildVPlan(). 8140 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8141 ElementCount MaxVF) { 8142 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8143 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8144 VFRange SubRange = {VF, MaxVFPlusOne}; 8145 VPlans.push_back(buildVPlan(SubRange)); 8146 VF = SubRange.End; 8147 } 8148 } 8149 8150 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8151 VPlanPtr &Plan) { 8152 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8153 8154 // Look for cached value. 8155 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8156 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8157 if (ECEntryIt != EdgeMaskCache.end()) 8158 return ECEntryIt->second; 8159 8160 VPValue *SrcMask = createBlockInMask(Src, Plan); 8161 8162 // The terminator has to be a branch inst! 8163 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8164 assert(BI && "Unexpected terminator found"); 8165 8166 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8167 return EdgeMaskCache[Edge] = SrcMask; 8168 8169 // If source is an exiting block, we know the exit edge is dynamically dead 8170 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8171 // adding uses of an otherwise potentially dead instruction. 8172 if (OrigLoop->isLoopExiting(Src)) 8173 return EdgeMaskCache[Edge] = SrcMask; 8174 8175 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8176 assert(EdgeMask && "No Edge Mask found for condition"); 8177 8178 if (BI->getSuccessor(0) != Dst) 8179 EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc()); 8180 8181 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8182 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8183 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8184 // The select version does not introduce new UB if SrcMask is false and 8185 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8186 VPValue *False = Plan->getOrAddVPValue( 8187 ConstantInt::getFalse(BI->getCondition()->getType())); 8188 EdgeMask = 8189 Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc()); 8190 } 8191 8192 return EdgeMaskCache[Edge] = EdgeMask; 8193 } 8194 8195 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8196 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8197 8198 // Look for cached value. 8199 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8200 if (BCEntryIt != BlockMaskCache.end()) 8201 return BCEntryIt->second; 8202 8203 // All-one mask is modelled as no-mask following the convention for masked 8204 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8205 VPValue *BlockMask = nullptr; 8206 8207 if (OrigLoop->getHeader() == BB) { 8208 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8209 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8210 8211 // Introduce the early-exit compare IV <= BTC to form header block mask. 8212 // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by 8213 // constructing the desired canonical IV in the header block as its first 8214 // non-phi instructions. 8215 assert(CM.foldTailByMasking() && "must fold the tail"); 8216 VPBasicBlock *HeaderVPBB = Plan->getEntry()->getEntryBasicBlock(); 8217 auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi(); 8218 auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV()); 8219 HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi()); 8220 8221 VPBuilder::InsertPointGuard Guard(Builder); 8222 Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint); 8223 if (CM.TTI.emitGetActiveLaneMask()) { 8224 VPValue *TC = Plan->getOrCreateTripCount(); 8225 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC}); 8226 } else { 8227 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8228 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8229 } 8230 return BlockMaskCache[BB] = BlockMask; 8231 } 8232 8233 // This is the block mask. We OR all incoming edges. 8234 for (auto *Predecessor : predecessors(BB)) { 8235 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8236 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8237 return BlockMaskCache[BB] = EdgeMask; 8238 8239 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8240 BlockMask = EdgeMask; 8241 continue; 8242 } 8243 8244 BlockMask = Builder.createOr(BlockMask, EdgeMask, {}); 8245 } 8246 8247 return BlockMaskCache[BB] = BlockMask; 8248 } 8249 8250 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8251 ArrayRef<VPValue *> Operands, 8252 VFRange &Range, 8253 VPlanPtr &Plan) { 8254 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8255 "Must be called with either a load or store"); 8256 8257 auto willWiden = [&](ElementCount VF) -> bool { 8258 if (VF.isScalar()) 8259 return false; 8260 LoopVectorizationCostModel::InstWidening Decision = 8261 CM.getWideningDecision(I, VF); 8262 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8263 "CM decision should be taken at this point."); 8264 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8265 return true; 8266 if (CM.isScalarAfterVectorization(I, VF) || 8267 CM.isProfitableToScalarize(I, VF)) 8268 return false; 8269 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8270 }; 8271 8272 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8273 return nullptr; 8274 8275 VPValue *Mask = nullptr; 8276 if (Legal->isMaskRequired(I)) 8277 Mask = createBlockInMask(I->getParent(), Plan); 8278 8279 // Determine if the pointer operand of the access is either consecutive or 8280 // reverse consecutive. 8281 LoopVectorizationCostModel::InstWidening Decision = 8282 CM.getWideningDecision(I, Range.Start); 8283 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8284 bool Consecutive = 8285 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8286 8287 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8288 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8289 Consecutive, Reverse); 8290 8291 StoreInst *Store = cast<StoreInst>(I); 8292 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8293 Mask, Consecutive, Reverse); 8294 } 8295 8296 static VPWidenIntOrFpInductionRecipe * 8297 createWidenInductionRecipe(PHINode *Phi, Instruction *PhiOrTrunc, 8298 VPValue *Start, const InductionDescriptor &IndDesc, 8299 LoopVectorizationCostModel &CM, ScalarEvolution &SE, 8300 Loop &OrigLoop, VFRange &Range) { 8301 // Returns true if an instruction \p I should be scalarized instead of 8302 // vectorized for the chosen vectorization factor. 8303 auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) { 8304 return CM.isScalarAfterVectorization(I, VF) || 8305 CM.isProfitableToScalarize(I, VF); 8306 }; 8307 8308 bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange( 8309 [&](ElementCount VF) { 8310 // Returns true if we should generate a scalar version of \p IV. 8311 if (ShouldScalarizeInstruction(PhiOrTrunc, VF)) 8312 return true; 8313 auto isScalarInst = [&](User *U) -> bool { 8314 auto *I = cast<Instruction>(U); 8315 return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF); 8316 }; 8317 return any_of(PhiOrTrunc->users(), isScalarInst); 8318 }, 8319 Range); 8320 bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange( 8321 [&](ElementCount VF) { 8322 return ShouldScalarizeInstruction(PhiOrTrunc, VF); 8323 }, 8324 Range); 8325 assert(IndDesc.getStartValue() == 8326 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader())); 8327 assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) && 8328 "step must be loop invariant"); 8329 if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) { 8330 return new VPWidenIntOrFpInductionRecipe( 8331 Phi, Start, IndDesc, TruncI, NeedsScalarIV, !NeedsScalarIVOnly, SE); 8332 } 8333 assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here"); 8334 return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, NeedsScalarIV, 8335 !NeedsScalarIVOnly, SE); 8336 } 8337 8338 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionPHI( 8339 PHINode *Phi, ArrayRef<VPValue *> Operands, VFRange &Range) const { 8340 8341 // Check if this is an integer or fp induction. If so, build the recipe that 8342 // produces its scalar and vector values. 8343 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) 8344 return createWidenInductionRecipe(Phi, Phi, Operands[0], *II, CM, 8345 *PSE.getSE(), *OrigLoop, Range); 8346 8347 return nullptr; 8348 } 8349 8350 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8351 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8352 VPlan &Plan) const { 8353 // Optimize the special case where the source is a constant integer 8354 // induction variable. Notice that we can only optimize the 'trunc' case 8355 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8356 // (c) other casts depend on pointer size. 8357 8358 // Determine whether \p K is a truncation based on an induction variable that 8359 // can be optimized. 8360 auto isOptimizableIVTruncate = 8361 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8362 return [=](ElementCount VF) -> bool { 8363 return CM.isOptimizableIVTruncate(K, VF); 8364 }; 8365 }; 8366 8367 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8368 isOptimizableIVTruncate(I), Range)) { 8369 8370 auto *Phi = cast<PHINode>(I->getOperand(0)); 8371 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi); 8372 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8373 return createWidenInductionRecipe(Phi, I, Start, II, CM, *PSE.getSE(), 8374 *OrigLoop, Range); 8375 } 8376 return nullptr; 8377 } 8378 8379 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8380 ArrayRef<VPValue *> Operands, 8381 VPlanPtr &Plan) { 8382 // If all incoming values are equal, the incoming VPValue can be used directly 8383 // instead of creating a new VPBlendRecipe. 8384 VPValue *FirstIncoming = Operands[0]; 8385 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8386 return FirstIncoming == Inc; 8387 })) { 8388 return Operands[0]; 8389 } 8390 8391 unsigned NumIncoming = Phi->getNumIncomingValues(); 8392 // For in-loop reductions, we do not need to create an additional select. 8393 VPValue *InLoopVal = nullptr; 8394 for (unsigned In = 0; In < NumIncoming; In++) { 8395 PHINode *PhiOp = 8396 dyn_cast_or_null<PHINode>(Operands[In]->getUnderlyingValue()); 8397 if (PhiOp && CM.isInLoopReduction(PhiOp)) { 8398 assert(!InLoopVal && "Found more than one in-loop reduction!"); 8399 InLoopVal = Operands[In]; 8400 } 8401 } 8402 8403 assert((!InLoopVal || NumIncoming == 2) && 8404 "Found an in-loop reduction for PHI with unexpected number of " 8405 "incoming values"); 8406 if (InLoopVal) 8407 return Operands[Operands[0] == InLoopVal ? 1 : 0]; 8408 8409 // We know that all PHIs in non-header blocks are converted into selects, so 8410 // we don't have to worry about the insertion order and we can just use the 8411 // builder. At this point we generate the predication tree. There may be 8412 // duplications since this is a simple recursive scan, but future 8413 // optimizations will clean it up. 8414 SmallVector<VPValue *, 2> OperandsWithMask; 8415 8416 for (unsigned In = 0; In < NumIncoming; In++) { 8417 VPValue *EdgeMask = 8418 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8419 assert((EdgeMask || NumIncoming == 1) && 8420 "Multiple predecessors with one having a full mask"); 8421 OperandsWithMask.push_back(Operands[In]); 8422 if (EdgeMask) 8423 OperandsWithMask.push_back(EdgeMask); 8424 } 8425 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8426 } 8427 8428 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8429 ArrayRef<VPValue *> Operands, 8430 VFRange &Range) const { 8431 8432 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8433 [this, CI](ElementCount VF) { 8434 return CM.isScalarWithPredication(CI, VF); 8435 }, 8436 Range); 8437 8438 if (IsPredicated) 8439 return nullptr; 8440 8441 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8442 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8443 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8444 ID == Intrinsic::pseudoprobe || 8445 ID == Intrinsic::experimental_noalias_scope_decl)) 8446 return nullptr; 8447 8448 auto willWiden = [&](ElementCount VF) -> bool { 8449 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8450 // The following case may be scalarized depending on the VF. 8451 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8452 // version of the instruction. 8453 // Is it beneficial to perform intrinsic call compared to lib call? 8454 bool NeedToScalarize = false; 8455 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8456 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8457 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8458 return UseVectorIntrinsic || !NeedToScalarize; 8459 }; 8460 8461 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8462 return nullptr; 8463 8464 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8465 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8466 } 8467 8468 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8469 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8470 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8471 // Instruction should be widened, unless it is scalar after vectorization, 8472 // scalarization is profitable or it is predicated. 8473 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8474 return CM.isScalarAfterVectorization(I, VF) || 8475 CM.isProfitableToScalarize(I, VF) || 8476 CM.isScalarWithPredication(I, VF); 8477 }; 8478 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8479 Range); 8480 } 8481 8482 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8483 ArrayRef<VPValue *> Operands) const { 8484 auto IsVectorizableOpcode = [](unsigned Opcode) { 8485 switch (Opcode) { 8486 case Instruction::Add: 8487 case Instruction::And: 8488 case Instruction::AShr: 8489 case Instruction::BitCast: 8490 case Instruction::FAdd: 8491 case Instruction::FCmp: 8492 case Instruction::FDiv: 8493 case Instruction::FMul: 8494 case Instruction::FNeg: 8495 case Instruction::FPExt: 8496 case Instruction::FPToSI: 8497 case Instruction::FPToUI: 8498 case Instruction::FPTrunc: 8499 case Instruction::FRem: 8500 case Instruction::FSub: 8501 case Instruction::ICmp: 8502 case Instruction::IntToPtr: 8503 case Instruction::LShr: 8504 case Instruction::Mul: 8505 case Instruction::Or: 8506 case Instruction::PtrToInt: 8507 case Instruction::SDiv: 8508 case Instruction::Select: 8509 case Instruction::SExt: 8510 case Instruction::Shl: 8511 case Instruction::SIToFP: 8512 case Instruction::SRem: 8513 case Instruction::Sub: 8514 case Instruction::Trunc: 8515 case Instruction::UDiv: 8516 case Instruction::UIToFP: 8517 case Instruction::URem: 8518 case Instruction::Xor: 8519 case Instruction::ZExt: 8520 return true; 8521 } 8522 return false; 8523 }; 8524 8525 if (!IsVectorizableOpcode(I->getOpcode())) 8526 return nullptr; 8527 8528 // Success: widen this instruction. 8529 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8530 } 8531 8532 void VPRecipeBuilder::fixHeaderPhis() { 8533 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8534 for (VPHeaderPHIRecipe *R : PhisToFix) { 8535 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8536 VPRecipeBase *IncR = 8537 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8538 R->addOperand(IncR->getVPSingleValue()); 8539 } 8540 } 8541 8542 VPBasicBlock *VPRecipeBuilder::handleReplication( 8543 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8544 VPlanPtr &Plan) { 8545 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8546 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8547 Range); 8548 8549 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8550 [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); }, 8551 Range); 8552 8553 // Even if the instruction is not marked as uniform, there are certain 8554 // intrinsic calls that can be effectively treated as such, so we check for 8555 // them here. Conservatively, we only do this for scalable vectors, since 8556 // for fixed-width VFs we can always fall back on full scalarization. 8557 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 8558 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 8559 case Intrinsic::assume: 8560 case Intrinsic::lifetime_start: 8561 case Intrinsic::lifetime_end: 8562 // For scalable vectors if one of the operands is variant then we still 8563 // want to mark as uniform, which will generate one instruction for just 8564 // the first lane of the vector. We can't scalarize the call in the same 8565 // way as for fixed-width vectors because we don't know how many lanes 8566 // there are. 8567 // 8568 // The reasons for doing it this way for scalable vectors are: 8569 // 1. For the assume intrinsic generating the instruction for the first 8570 // lane is still be better than not generating any at all. For 8571 // example, the input may be a splat across all lanes. 8572 // 2. For the lifetime start/end intrinsics the pointer operand only 8573 // does anything useful when the input comes from a stack object, 8574 // which suggests it should always be uniform. For non-stack objects 8575 // the effect is to poison the object, which still allows us to 8576 // remove the call. 8577 IsUniform = true; 8578 break; 8579 default: 8580 break; 8581 } 8582 } 8583 8584 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8585 IsUniform, IsPredicated); 8586 setRecipe(I, Recipe); 8587 Plan->addVPValue(I, Recipe); 8588 8589 // Find if I uses a predicated instruction. If so, it will use its scalar 8590 // value. Avoid hoisting the insert-element which packs the scalar value into 8591 // a vector value, as that happens iff all users use the vector value. 8592 for (VPValue *Op : Recipe->operands()) { 8593 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8594 if (!PredR) 8595 continue; 8596 auto *RepR = 8597 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8598 assert(RepR->isPredicated() && 8599 "expected Replicate recipe to be predicated"); 8600 RepR->setAlsoPack(false); 8601 } 8602 8603 // Finalize the recipe for Instr, first if it is not predicated. 8604 if (!IsPredicated) { 8605 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8606 VPBB->appendRecipe(Recipe); 8607 return VPBB; 8608 } 8609 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8610 8611 VPBlockBase *SingleSucc = VPBB->getSingleSuccessor(); 8612 assert(SingleSucc && "VPBB must have a single successor when handling " 8613 "predicated replication."); 8614 VPBlockUtils::disconnectBlocks(VPBB, SingleSucc); 8615 // Record predicated instructions for above packing optimizations. 8616 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8617 VPBlockUtils::insertBlockAfter(Region, VPBB); 8618 auto *RegSucc = new VPBasicBlock(); 8619 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8620 VPBlockUtils::connectBlocks(RegSucc, SingleSucc); 8621 return RegSucc; 8622 } 8623 8624 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8625 VPRecipeBase *PredRecipe, 8626 VPlanPtr &Plan) { 8627 // Instructions marked for predication are replicated and placed under an 8628 // if-then construct to prevent side-effects. 8629 8630 // Generate recipes to compute the block mask for this region. 8631 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8632 8633 // Build the triangular if-then region. 8634 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8635 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8636 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8637 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8638 auto *PHIRecipe = Instr->getType()->isVoidTy() 8639 ? nullptr 8640 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8641 if (PHIRecipe) { 8642 Plan->removeVPValueFor(Instr); 8643 Plan->addVPValue(Instr, PHIRecipe); 8644 } 8645 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8646 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8647 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8648 8649 // Note: first set Entry as region entry and then connect successors starting 8650 // from it in order, to propagate the "parent" of each VPBasicBlock. 8651 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8652 VPBlockUtils::connectBlocks(Pred, Exit); 8653 8654 return Region; 8655 } 8656 8657 VPRecipeOrVPValueTy 8658 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8659 ArrayRef<VPValue *> Operands, 8660 VFRange &Range, VPlanPtr &Plan) { 8661 // First, check for specific widening recipes that deal with calls, memory 8662 // operations, inductions and Phi nodes. 8663 if (auto *CI = dyn_cast<CallInst>(Instr)) 8664 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8665 8666 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8667 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8668 8669 VPRecipeBase *Recipe; 8670 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8671 if (Phi->getParent() != OrigLoop->getHeader()) 8672 return tryToBlend(Phi, Operands, Plan); 8673 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range))) 8674 return toVPRecipeResult(Recipe); 8675 8676 VPHeaderPHIRecipe *PhiRecipe = nullptr; 8677 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 8678 VPValue *StartV = Operands[0]; 8679 if (Legal->isReductionVariable(Phi)) { 8680 const RecurrenceDescriptor &RdxDesc = 8681 Legal->getReductionVars().find(Phi)->second; 8682 assert(RdxDesc.getRecurrenceStartValue() == 8683 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8684 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 8685 CM.isInLoopReduction(Phi), 8686 CM.useOrderedReductions(RdxDesc)); 8687 } else { 8688 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 8689 } 8690 8691 // Record the incoming value from the backedge, so we can add the incoming 8692 // value from the backedge after all recipes have been created. 8693 recordRecipeOf(cast<Instruction>( 8694 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8695 PhisToFix.push_back(PhiRecipe); 8696 } else { 8697 // TODO: record backedge value for remaining pointer induction phis. 8698 assert(Phi->getType()->isPointerTy() && 8699 "only pointer phis should be handled here"); 8700 assert(Legal->getInductionVars().count(Phi) && 8701 "Not an induction variable"); 8702 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8703 VPValue *Start = Plan->getOrAddVPValue(II.getStartValue()); 8704 PhiRecipe = new VPWidenPHIRecipe(Phi, Start); 8705 } 8706 8707 return toVPRecipeResult(PhiRecipe); 8708 } 8709 8710 if (isa<TruncInst>(Instr) && 8711 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8712 Range, *Plan))) 8713 return toVPRecipeResult(Recipe); 8714 8715 if (!shouldWiden(Instr, Range)) 8716 return nullptr; 8717 8718 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8719 return toVPRecipeResult(new VPWidenGEPRecipe( 8720 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8721 8722 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8723 bool InvariantCond = 8724 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8725 return toVPRecipeResult(new VPWidenSelectRecipe( 8726 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8727 } 8728 8729 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8730 } 8731 8732 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8733 ElementCount MaxVF) { 8734 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8735 8736 // Collect instructions from the original loop that will become trivially dead 8737 // in the vectorized loop. We don't need to vectorize these instructions. For 8738 // example, original induction update instructions can become dead because we 8739 // separately emit induction "steps" when generating code for the new loop. 8740 // Similarly, we create a new latch condition when setting up the structure 8741 // of the new loop, so the old one can become dead. 8742 SmallPtrSet<Instruction *, 4> DeadInstructions; 8743 collectTriviallyDeadInstructions(DeadInstructions); 8744 8745 // Add assume instructions we need to drop to DeadInstructions, to prevent 8746 // them from being added to the VPlan. 8747 // TODO: We only need to drop assumes in blocks that get flattend. If the 8748 // control flow is preserved, we should keep them. 8749 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8750 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8751 8752 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8753 // Dead instructions do not need sinking. Remove them from SinkAfter. 8754 for (Instruction *I : DeadInstructions) 8755 SinkAfter.erase(I); 8756 8757 // Cannot sink instructions after dead instructions (there won't be any 8758 // recipes for them). Instead, find the first non-dead previous instruction. 8759 for (auto &P : Legal->getSinkAfter()) { 8760 Instruction *SinkTarget = P.second; 8761 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 8762 (void)FirstInst; 8763 while (DeadInstructions.contains(SinkTarget)) { 8764 assert( 8765 SinkTarget != FirstInst && 8766 "Must find a live instruction (at least the one feeding the " 8767 "first-order recurrence PHI) before reaching beginning of the block"); 8768 SinkTarget = SinkTarget->getPrevNode(); 8769 assert(SinkTarget != P.first && 8770 "sink source equals target, no sinking required"); 8771 } 8772 P.second = SinkTarget; 8773 } 8774 8775 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8776 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8777 VFRange SubRange = {VF, MaxVFPlusOne}; 8778 VPlans.push_back( 8779 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8780 VF = SubRange.End; 8781 } 8782 } 8783 8784 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a 8785 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a 8786 // BranchOnCount VPInstruction to the latch. 8787 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL, 8788 bool HasNUW, bool IsVPlanNative) { 8789 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8790 auto *StartV = Plan.getOrAddVPValue(StartIdx); 8791 8792 auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL); 8793 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion(); 8794 VPBasicBlock *Header = TopRegion->getEntryBasicBlock(); 8795 if (IsVPlanNative) 8796 Header = cast<VPBasicBlock>(Header->getSingleSuccessor()); 8797 Header->insert(CanonicalIVPHI, Header->begin()); 8798 8799 auto *CanonicalIVIncrement = 8800 new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW 8801 : VPInstruction::CanonicalIVIncrement, 8802 {CanonicalIVPHI}, DL); 8803 CanonicalIVPHI->addOperand(CanonicalIVIncrement); 8804 8805 VPBasicBlock *EB = TopRegion->getExitBasicBlock(); 8806 if (IsVPlanNative) { 8807 EB = cast<VPBasicBlock>(EB->getSinglePredecessor()); 8808 EB->setCondBit(nullptr); 8809 } 8810 EB->appendRecipe(CanonicalIVIncrement); 8811 8812 auto *BranchOnCount = 8813 new VPInstruction(VPInstruction::BranchOnCount, 8814 {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL); 8815 EB->appendRecipe(BranchOnCount); 8816 } 8817 8818 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8819 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8820 const MapVector<Instruction *, Instruction *> &SinkAfter) { 8821 8822 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8823 8824 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8825 8826 // --------------------------------------------------------------------------- 8827 // Pre-construction: record ingredients whose recipes we'll need to further 8828 // process after constructing the initial VPlan. 8829 // --------------------------------------------------------------------------- 8830 8831 // Mark instructions we'll need to sink later and their targets as 8832 // ingredients whose recipe we'll need to record. 8833 for (auto &Entry : SinkAfter) { 8834 RecipeBuilder.recordRecipeOf(Entry.first); 8835 RecipeBuilder.recordRecipeOf(Entry.second); 8836 } 8837 for (auto &Reduction : CM.getInLoopReductionChains()) { 8838 PHINode *Phi = Reduction.first; 8839 RecurKind Kind = 8840 Legal->getReductionVars().find(Phi)->second.getRecurrenceKind(); 8841 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8842 8843 RecipeBuilder.recordRecipeOf(Phi); 8844 for (auto &R : ReductionOperations) { 8845 RecipeBuilder.recordRecipeOf(R); 8846 // For min/max reductions, where we have a pair of icmp/select, we also 8847 // need to record the ICmp recipe, so it can be removed later. 8848 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 8849 "Only min/max recurrences allowed for inloop reductions"); 8850 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8851 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8852 } 8853 } 8854 8855 // For each interleave group which is relevant for this (possibly trimmed) 8856 // Range, add it to the set of groups to be later applied to the VPlan and add 8857 // placeholders for its members' Recipes which we'll be replacing with a 8858 // single VPInterleaveRecipe. 8859 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8860 auto applyIG = [IG, this](ElementCount VF) -> bool { 8861 return (VF.isVector() && // Query is illegal for VF == 1 8862 CM.getWideningDecision(IG->getInsertPos(), VF) == 8863 LoopVectorizationCostModel::CM_Interleave); 8864 }; 8865 if (!getDecisionAndClampRange(applyIG, Range)) 8866 continue; 8867 InterleaveGroups.insert(IG); 8868 for (unsigned i = 0; i < IG->getFactor(); i++) 8869 if (Instruction *Member = IG->getMember(i)) 8870 RecipeBuilder.recordRecipeOf(Member); 8871 }; 8872 8873 // --------------------------------------------------------------------------- 8874 // Build initial VPlan: Scan the body of the loop in a topological order to 8875 // visit each basic block after having visited its predecessor basic blocks. 8876 // --------------------------------------------------------------------------- 8877 8878 // Create initial VPlan skeleton, with separate header and latch blocks. 8879 VPBasicBlock *HeaderVPBB = new VPBasicBlock(); 8880 VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch"); 8881 VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB); 8882 auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop"); 8883 auto Plan = std::make_unique<VPlan>(TopRegion); 8884 8885 Instruction *DLInst = 8886 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 8887 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), 8888 DLInst ? DLInst->getDebugLoc() : DebugLoc(), 8889 !CM.foldTailByMasking(), false); 8890 8891 // Scan the body of the loop in a topological order to visit each basic block 8892 // after having visited its predecessor basic blocks. 8893 LoopBlocksDFS DFS(OrigLoop); 8894 DFS.perform(LI); 8895 8896 VPBasicBlock *VPBB = HeaderVPBB; 8897 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 8898 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8899 // Relevant instructions from basic block BB will be grouped into VPRecipe 8900 // ingredients and fill a new VPBasicBlock. 8901 unsigned VPBBsForBB = 0; 8902 VPBB->setName(BB->getName()); 8903 Builder.setInsertPoint(VPBB); 8904 8905 // Introduce each ingredient into VPlan. 8906 // TODO: Model and preserve debug instrinsics in VPlan. 8907 for (Instruction &I : BB->instructionsWithoutDebug()) { 8908 Instruction *Instr = &I; 8909 8910 // First filter out irrelevant instructions, to ensure no recipes are 8911 // built for them. 8912 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 8913 continue; 8914 8915 SmallVector<VPValue *, 4> Operands; 8916 auto *Phi = dyn_cast<PHINode>(Instr); 8917 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 8918 Operands.push_back(Plan->getOrAddVPValue( 8919 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 8920 } else { 8921 auto OpRange = Plan->mapToVPValues(Instr->operands()); 8922 Operands = {OpRange.begin(), OpRange.end()}; 8923 } 8924 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 8925 Instr, Operands, Range, Plan)) { 8926 // If Instr can be simplified to an existing VPValue, use it. 8927 if (RecipeOrValue.is<VPValue *>()) { 8928 auto *VPV = RecipeOrValue.get<VPValue *>(); 8929 Plan->addVPValue(Instr, VPV); 8930 // If the re-used value is a recipe, register the recipe for the 8931 // instruction, in case the recipe for Instr needs to be recorded. 8932 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 8933 RecipeBuilder.setRecipe(Instr, R); 8934 continue; 8935 } 8936 // Otherwise, add the new recipe. 8937 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 8938 for (auto *Def : Recipe->definedValues()) { 8939 auto *UV = Def->getUnderlyingValue(); 8940 Plan->addVPValue(UV, Def); 8941 } 8942 8943 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 8944 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 8945 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 8946 // of the header block. That can happen for truncates of induction 8947 // variables. Those recipes are moved to the phi section of the header 8948 // block after applying SinkAfter, which relies on the original 8949 // position of the trunc. 8950 assert(isa<TruncInst>(Instr)); 8951 InductionsToMove.push_back( 8952 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 8953 } 8954 RecipeBuilder.setRecipe(Instr, Recipe); 8955 VPBB->appendRecipe(Recipe); 8956 continue; 8957 } 8958 8959 // Otherwise, if all widening options failed, Instruction is to be 8960 // replicated. This may create a successor for VPBB. 8961 VPBasicBlock *NextVPBB = 8962 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 8963 if (NextVPBB != VPBB) { 8964 VPBB = NextVPBB; 8965 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8966 : ""); 8967 } 8968 } 8969 8970 VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB); 8971 VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor()); 8972 } 8973 8974 // Fold the last, empty block into its predecessor. 8975 VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB); 8976 assert(VPBB && "expected to fold last (empty) block"); 8977 // After here, VPBB should not be used. 8978 VPBB = nullptr; 8979 8980 assert(isa<VPRegionBlock>(Plan->getEntry()) && 8981 !Plan->getEntry()->getEntryBasicBlock()->empty() && 8982 "entry block must be set to a VPRegionBlock having a non-empty entry " 8983 "VPBasicBlock"); 8984 RecipeBuilder.fixHeaderPhis(); 8985 8986 // --------------------------------------------------------------------------- 8987 // Transform initial VPlan: Apply previously taken decisions, in order, to 8988 // bring the VPlan to its final state. 8989 // --------------------------------------------------------------------------- 8990 8991 // Apply Sink-After legal constraints. 8992 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 8993 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 8994 if (Region && Region->isReplicator()) { 8995 assert(Region->getNumSuccessors() == 1 && 8996 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 8997 assert(R->getParent()->size() == 1 && 8998 "A recipe in an original replicator region must be the only " 8999 "recipe in its block"); 9000 return Region; 9001 } 9002 return nullptr; 9003 }; 9004 for (auto &Entry : SinkAfter) { 9005 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 9006 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 9007 9008 auto *TargetRegion = GetReplicateRegion(Target); 9009 auto *SinkRegion = GetReplicateRegion(Sink); 9010 if (!SinkRegion) { 9011 // If the sink source is not a replicate region, sink the recipe directly. 9012 if (TargetRegion) { 9013 // The target is in a replication region, make sure to move Sink to 9014 // the block after it, not into the replication region itself. 9015 VPBasicBlock *NextBlock = 9016 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 9017 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 9018 } else 9019 Sink->moveAfter(Target); 9020 continue; 9021 } 9022 9023 // The sink source is in a replicate region. Unhook the region from the CFG. 9024 auto *SinkPred = SinkRegion->getSinglePredecessor(); 9025 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 9026 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 9027 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 9028 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 9029 9030 if (TargetRegion) { 9031 // The target recipe is also in a replicate region, move the sink region 9032 // after the target region. 9033 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 9034 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 9035 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 9036 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 9037 } else { 9038 // The sink source is in a replicate region, we need to move the whole 9039 // replicate region, which should only contain a single recipe in the 9040 // main block. 9041 auto *SplitBlock = 9042 Target->getParent()->splitAt(std::next(Target->getIterator())); 9043 9044 auto *SplitPred = SplitBlock->getSinglePredecessor(); 9045 9046 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 9047 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 9048 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 9049 } 9050 } 9051 9052 VPlanTransforms::removeRedundantCanonicalIVs(*Plan); 9053 VPlanTransforms::removeRedundantInductionCasts(*Plan); 9054 9055 // Now that sink-after is done, move induction recipes for optimized truncates 9056 // to the phi section of the header block. 9057 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 9058 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 9059 9060 // Adjust the recipes for any inloop reductions. 9061 adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan, 9062 RecipeBuilder, Range.Start); 9063 9064 // Introduce a recipe to combine the incoming and previous values of a 9065 // first-order recurrence. 9066 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9067 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 9068 if (!RecurPhi) 9069 continue; 9070 9071 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 9072 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 9073 auto *Region = GetReplicateRegion(PrevRecipe); 9074 if (Region) 9075 InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor()); 9076 if (Region || PrevRecipe->isPhi()) 9077 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 9078 else 9079 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 9080 9081 auto *RecurSplice = cast<VPInstruction>( 9082 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 9083 {RecurPhi, RecurPhi->getBackedgeValue()})); 9084 9085 RecurPhi->replaceAllUsesWith(RecurSplice); 9086 // Set the first operand of RecurSplice to RecurPhi again, after replacing 9087 // all users. 9088 RecurSplice->setOperand(0, RecurPhi); 9089 } 9090 9091 // Interleave memory: for each Interleave Group we marked earlier as relevant 9092 // for this VPlan, replace the Recipes widening its memory instructions with a 9093 // single VPInterleaveRecipe at its insertion point. 9094 for (auto IG : InterleaveGroups) { 9095 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9096 RecipeBuilder.getRecipe(IG->getInsertPos())); 9097 SmallVector<VPValue *, 4> StoredValues; 9098 for (unsigned i = 0; i < IG->getFactor(); ++i) 9099 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 9100 auto *StoreR = 9101 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 9102 StoredValues.push_back(StoreR->getStoredValue()); 9103 } 9104 9105 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9106 Recipe->getMask()); 9107 VPIG->insertBefore(Recipe); 9108 unsigned J = 0; 9109 for (unsigned i = 0; i < IG->getFactor(); ++i) 9110 if (Instruction *Member = IG->getMember(i)) { 9111 if (!Member->getType()->isVoidTy()) { 9112 VPValue *OriginalV = Plan->getVPValue(Member); 9113 Plan->removeVPValueFor(Member); 9114 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9115 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9116 J++; 9117 } 9118 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9119 } 9120 } 9121 9122 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9123 // in ways that accessing values using original IR values is incorrect. 9124 Plan->disableValue2VPValue(); 9125 9126 VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE()); 9127 VPlanTransforms::sinkScalarOperands(*Plan); 9128 VPlanTransforms::mergeReplicateRegions(*Plan); 9129 VPlanTransforms::removeDeadRecipes(*Plan, *OrigLoop); 9130 9131 std::string PlanName; 9132 raw_string_ostream RSO(PlanName); 9133 ElementCount VF = Range.Start; 9134 Plan->addVF(VF); 9135 RSO << "Initial VPlan for VF={" << VF; 9136 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9137 Plan->addVF(VF); 9138 RSO << "," << VF; 9139 } 9140 RSO << "},UF>=1"; 9141 RSO.flush(); 9142 Plan->setName(PlanName); 9143 9144 // Fold Exit block into its predecessor if possible. 9145 // TODO: Fold block earlier once all VPlan transforms properly maintain a 9146 // VPBasicBlock as exit. 9147 VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit()); 9148 9149 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9150 return Plan; 9151 } 9152 9153 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9154 // Outer loop handling: They may require CFG and instruction level 9155 // transformations before even evaluating whether vectorization is profitable. 9156 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9157 // the vectorization pipeline. 9158 assert(!OrigLoop->isInnermost()); 9159 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9160 9161 // Create new empty VPlan 9162 auto Plan = std::make_unique<VPlan>(); 9163 9164 // Build hierarchical CFG 9165 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9166 HCFGBuilder.buildHierarchicalCFG(); 9167 9168 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9169 VF *= 2) 9170 Plan->addVF(VF); 9171 9172 if (EnableVPlanPredication) { 9173 VPlanPredicator VPP(*Plan); 9174 VPP.predicate(); 9175 9176 // Avoid running transformation to recipes until masked code generation in 9177 // VPlan-native path is in place. 9178 return Plan; 9179 } 9180 9181 SmallPtrSet<Instruction *, 1> DeadInstructions; 9182 VPlanTransforms::VPInstructionsToVPRecipes( 9183 OrigLoop, Plan, 9184 [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); }, 9185 DeadInstructions, *PSE.getSE()); 9186 9187 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(), 9188 true, true); 9189 return Plan; 9190 } 9191 9192 // Adjust the recipes for reductions. For in-loop reductions the chain of 9193 // instructions leading from the loop exit instr to the phi need to be converted 9194 // to reductions, with one operand being vector and the other being the scalar 9195 // reduction chain. For other reductions, a select is introduced between the phi 9196 // and live-out recipes when folding the tail. 9197 void LoopVectorizationPlanner::adjustRecipesForReductions( 9198 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9199 ElementCount MinVF) { 9200 for (auto &Reduction : CM.getInLoopReductionChains()) { 9201 PHINode *Phi = Reduction.first; 9202 const RecurrenceDescriptor &RdxDesc = 9203 Legal->getReductionVars().find(Phi)->second; 9204 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9205 9206 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9207 continue; 9208 9209 // ReductionOperations are orders top-down from the phi's use to the 9210 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9211 // which of the two operands will remain scalar and which will be reduced. 9212 // For minmax the chain will be the select instructions. 9213 Instruction *Chain = Phi; 9214 for (Instruction *R : ReductionOperations) { 9215 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9216 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9217 9218 VPValue *ChainOp = Plan->getVPValue(Chain); 9219 unsigned FirstOpId; 9220 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9221 "Only min/max recurrences allowed for inloop reductions"); 9222 // Recognize a call to the llvm.fmuladd intrinsic. 9223 bool IsFMulAdd = (Kind == RecurKind::FMulAdd); 9224 assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) && 9225 "Expected instruction to be a call to the llvm.fmuladd intrinsic"); 9226 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9227 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9228 "Expected to replace a VPWidenSelectSC"); 9229 FirstOpId = 1; 9230 } else { 9231 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) || 9232 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) && 9233 "Expected to replace a VPWidenSC"); 9234 FirstOpId = 0; 9235 } 9236 unsigned VecOpId = 9237 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9238 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9239 9240 auto *CondOp = CM.blockNeedsPredicationForAnyReason(R->getParent()) 9241 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9242 : nullptr; 9243 9244 if (IsFMulAdd) { 9245 // If the instruction is a call to the llvm.fmuladd intrinsic then we 9246 // need to create an fmul recipe to use as the vector operand for the 9247 // fadd reduction. 9248 VPInstruction *FMulRecipe = new VPInstruction( 9249 Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))}); 9250 FMulRecipe->setFastMathFlags(R->getFastMathFlags()); 9251 WidenRecipe->getParent()->insert(FMulRecipe, 9252 WidenRecipe->getIterator()); 9253 VecOp = FMulRecipe; 9254 } 9255 VPReductionRecipe *RedRecipe = 9256 new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9257 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9258 Plan->removeVPValueFor(R); 9259 Plan->addVPValue(R, RedRecipe); 9260 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9261 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9262 WidenRecipe->eraseFromParent(); 9263 9264 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9265 VPRecipeBase *CompareRecipe = 9266 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9267 assert(isa<VPWidenRecipe>(CompareRecipe) && 9268 "Expected to replace a VPWidenSC"); 9269 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9270 "Expected no remaining users"); 9271 CompareRecipe->eraseFromParent(); 9272 } 9273 Chain = R; 9274 } 9275 } 9276 9277 // If tail is folded by masking, introduce selects between the phi 9278 // and the live-out instruction of each reduction, at the beginning of the 9279 // dedicated latch block. 9280 if (CM.foldTailByMasking()) { 9281 Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin()); 9282 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9283 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9284 if (!PhiR || PhiR->isInLoop()) 9285 continue; 9286 VPValue *Cond = 9287 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9288 VPValue *Red = PhiR->getBackedgeValue(); 9289 assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB && 9290 "reduction recipe must be defined before latch"); 9291 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9292 } 9293 } 9294 } 9295 9296 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9297 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9298 VPSlotTracker &SlotTracker) const { 9299 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9300 IG->getInsertPos()->printAsOperand(O, false); 9301 O << ", "; 9302 getAddr()->printAsOperand(O, SlotTracker); 9303 VPValue *Mask = getMask(); 9304 if (Mask) { 9305 O << ", "; 9306 Mask->printAsOperand(O, SlotTracker); 9307 } 9308 9309 unsigned OpIdx = 0; 9310 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9311 if (!IG->getMember(i)) 9312 continue; 9313 if (getNumStoreOperands() > 0) { 9314 O << "\n" << Indent << " store "; 9315 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9316 O << " to index " << i; 9317 } else { 9318 O << "\n" << Indent << " "; 9319 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9320 O << " = load from index " << i; 9321 } 9322 ++OpIdx; 9323 } 9324 } 9325 #endif 9326 9327 void VPWidenCallRecipe::execute(VPTransformState &State) { 9328 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9329 *this, State); 9330 } 9331 9332 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9333 auto &I = *cast<SelectInst>(getUnderlyingInstr()); 9334 State.ILV->setDebugLocFromInst(&I); 9335 9336 // The condition can be loop invariant but still defined inside the 9337 // loop. This means that we can't just use the original 'cond' value. 9338 // We have to take the 'vectorized' value and pick the first lane. 9339 // Instcombine will make this a no-op. 9340 auto *InvarCond = 9341 InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr; 9342 9343 for (unsigned Part = 0; Part < State.UF; ++Part) { 9344 Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part); 9345 Value *Op0 = State.get(getOperand(1), Part); 9346 Value *Op1 = State.get(getOperand(2), Part); 9347 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1); 9348 State.set(this, Sel, Part); 9349 State.ILV->addMetadata(Sel, &I); 9350 } 9351 } 9352 9353 void VPWidenRecipe::execute(VPTransformState &State) { 9354 auto &I = *cast<Instruction>(getUnderlyingValue()); 9355 auto &Builder = State.Builder; 9356 switch (I.getOpcode()) { 9357 case Instruction::Call: 9358 case Instruction::Br: 9359 case Instruction::PHI: 9360 case Instruction::GetElementPtr: 9361 case Instruction::Select: 9362 llvm_unreachable("This instruction is handled by a different recipe."); 9363 case Instruction::UDiv: 9364 case Instruction::SDiv: 9365 case Instruction::SRem: 9366 case Instruction::URem: 9367 case Instruction::Add: 9368 case Instruction::FAdd: 9369 case Instruction::Sub: 9370 case Instruction::FSub: 9371 case Instruction::FNeg: 9372 case Instruction::Mul: 9373 case Instruction::FMul: 9374 case Instruction::FDiv: 9375 case Instruction::FRem: 9376 case Instruction::Shl: 9377 case Instruction::LShr: 9378 case Instruction::AShr: 9379 case Instruction::And: 9380 case Instruction::Or: 9381 case Instruction::Xor: { 9382 // Just widen unops and binops. 9383 State.ILV->setDebugLocFromInst(&I); 9384 9385 for (unsigned Part = 0; Part < State.UF; ++Part) { 9386 SmallVector<Value *, 2> Ops; 9387 for (VPValue *VPOp : operands()) 9388 Ops.push_back(State.get(VPOp, Part)); 9389 9390 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 9391 9392 if (auto *VecOp = dyn_cast<Instruction>(V)) { 9393 VecOp->copyIRFlags(&I); 9394 9395 // If the instruction is vectorized and was in a basic block that needed 9396 // predication, we can't propagate poison-generating flags (nuw/nsw, 9397 // exact, etc.). The control flow has been linearized and the 9398 // instruction is no longer guarded by the predicate, which could make 9399 // the flag properties to no longer hold. 9400 if (State.MayGeneratePoisonRecipes.contains(this)) 9401 VecOp->dropPoisonGeneratingFlags(); 9402 } 9403 9404 // Use this vector value for all users of the original instruction. 9405 State.set(this, V, Part); 9406 State.ILV->addMetadata(V, &I); 9407 } 9408 9409 break; 9410 } 9411 case Instruction::ICmp: 9412 case Instruction::FCmp: { 9413 // Widen compares. Generate vector compares. 9414 bool FCmp = (I.getOpcode() == Instruction::FCmp); 9415 auto *Cmp = cast<CmpInst>(&I); 9416 State.ILV->setDebugLocFromInst(Cmp); 9417 for (unsigned Part = 0; Part < State.UF; ++Part) { 9418 Value *A = State.get(getOperand(0), Part); 9419 Value *B = State.get(getOperand(1), Part); 9420 Value *C = nullptr; 9421 if (FCmp) { 9422 // Propagate fast math flags. 9423 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9424 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 9425 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 9426 } else { 9427 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 9428 } 9429 State.set(this, C, Part); 9430 State.ILV->addMetadata(C, &I); 9431 } 9432 9433 break; 9434 } 9435 9436 case Instruction::ZExt: 9437 case Instruction::SExt: 9438 case Instruction::FPToUI: 9439 case Instruction::FPToSI: 9440 case Instruction::FPExt: 9441 case Instruction::PtrToInt: 9442 case Instruction::IntToPtr: 9443 case Instruction::SIToFP: 9444 case Instruction::UIToFP: 9445 case Instruction::Trunc: 9446 case Instruction::FPTrunc: 9447 case Instruction::BitCast: { 9448 auto *CI = cast<CastInst>(&I); 9449 State.ILV->setDebugLocFromInst(CI); 9450 9451 /// Vectorize casts. 9452 Type *DestTy = (State.VF.isScalar()) 9453 ? CI->getType() 9454 : VectorType::get(CI->getType(), State.VF); 9455 9456 for (unsigned Part = 0; Part < State.UF; ++Part) { 9457 Value *A = State.get(getOperand(0), Part); 9458 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 9459 State.set(this, Cast, Part); 9460 State.ILV->addMetadata(Cast, &I); 9461 } 9462 break; 9463 } 9464 default: 9465 // This instruction is not vectorized by simple widening. 9466 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 9467 llvm_unreachable("Unhandled instruction!"); 9468 } // end of switch. 9469 } 9470 9471 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9472 auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr()); 9473 // Construct a vector GEP by widening the operands of the scalar GEP as 9474 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 9475 // results in a vector of pointers when at least one operand of the GEP 9476 // is vector-typed. Thus, to keep the representation compact, we only use 9477 // vector-typed operands for loop-varying values. 9478 9479 if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 9480 // If we are vectorizing, but the GEP has only loop-invariant operands, 9481 // the GEP we build (by only using vector-typed operands for 9482 // loop-varying values) would be a scalar pointer. Thus, to ensure we 9483 // produce a vector of pointers, we need to either arbitrarily pick an 9484 // operand to broadcast, or broadcast a clone of the original GEP. 9485 // Here, we broadcast a clone of the original. 9486 // 9487 // TODO: If at some point we decide to scalarize instructions having 9488 // loop-invariant operands, this special case will no longer be 9489 // required. We would add the scalarization decision to 9490 // collectLoopScalars() and teach getVectorValue() to broadcast 9491 // the lane-zero scalar value. 9492 auto *Clone = State.Builder.Insert(GEP->clone()); 9493 for (unsigned Part = 0; Part < State.UF; ++Part) { 9494 Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone); 9495 State.set(this, EntryPart, Part); 9496 State.ILV->addMetadata(EntryPart, GEP); 9497 } 9498 } else { 9499 // If the GEP has at least one loop-varying operand, we are sure to 9500 // produce a vector of pointers. But if we are only unrolling, we want 9501 // to produce a scalar GEP for each unroll part. Thus, the GEP we 9502 // produce with the code below will be scalar (if VF == 1) or vector 9503 // (otherwise). Note that for the unroll-only case, we still maintain 9504 // values in the vector mapping with initVector, as we do for other 9505 // instructions. 9506 for (unsigned Part = 0; Part < State.UF; ++Part) { 9507 // The pointer operand of the new GEP. If it's loop-invariant, we 9508 // won't broadcast it. 9509 auto *Ptr = IsPtrLoopInvariant 9510 ? State.get(getOperand(0), VPIteration(0, 0)) 9511 : State.get(getOperand(0), Part); 9512 9513 // Collect all the indices for the new GEP. If any index is 9514 // loop-invariant, we won't broadcast it. 9515 SmallVector<Value *, 4> Indices; 9516 for (unsigned I = 1, E = getNumOperands(); I < E; I++) { 9517 VPValue *Operand = getOperand(I); 9518 if (IsIndexLoopInvariant[I - 1]) 9519 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 9520 else 9521 Indices.push_back(State.get(Operand, Part)); 9522 } 9523 9524 // If the GEP instruction is vectorized and was in a basic block that 9525 // needed predication, we can't propagate the poison-generating 'inbounds' 9526 // flag. The control flow has been linearized and the GEP is no longer 9527 // guarded by the predicate, which could make the 'inbounds' properties to 9528 // no longer hold. 9529 bool IsInBounds = 9530 GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0; 9531 9532 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 9533 // but it should be a vector, otherwise. 9534 auto *NewGEP = IsInBounds 9535 ? State.Builder.CreateInBoundsGEP( 9536 GEP->getSourceElementType(), Ptr, Indices) 9537 : State.Builder.CreateGEP(GEP->getSourceElementType(), 9538 Ptr, Indices); 9539 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) && 9540 "NewGEP is not a pointer vector"); 9541 State.set(this, NewGEP, Part); 9542 State.ILV->addMetadata(NewGEP, GEP); 9543 } 9544 } 9545 } 9546 9547 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9548 assert(!State.Instance && "Int or FP induction being replicated."); 9549 9550 Value *Start = getStartValue()->getLiveInIRValue(); 9551 const InductionDescriptor &ID = getInductionDescriptor(); 9552 TruncInst *Trunc = getTruncInst(); 9553 IRBuilderBase &Builder = State.Builder; 9554 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 9555 assert(State.VF.isVector() && "must have vector VF"); 9556 9557 // The value from the original loop to which we are mapping the new induction 9558 // variable. 9559 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 9560 9561 auto &DL = EntryVal->getModule()->getDataLayout(); 9562 9563 // Generate code for the induction step. Note that induction steps are 9564 // required to be loop-invariant 9565 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 9566 if (SE.isSCEVable(IV->getType())) { 9567 SCEVExpander Exp(SE, DL, "induction"); 9568 return Exp.expandCodeFor(Step, Step->getType(), 9569 State.CFG.VectorPreHeader->getTerminator()); 9570 } 9571 return cast<SCEVUnknown>(Step)->getValue(); 9572 }; 9573 9574 // Fast-math-flags propagate from the original induction instruction. 9575 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9576 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 9577 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 9578 9579 // Now do the actual transformations, and start with creating the step value. 9580 Value *Step = CreateStepValue(ID.getStep()); 9581 9582 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 9583 "Expected either an induction phi-node or a truncate of it!"); 9584 9585 // Construct the initial value of the vector IV in the vector loop preheader 9586 auto CurrIP = Builder.saveIP(); 9587 Builder.SetInsertPoint(State.CFG.VectorPreHeader->getTerminator()); 9588 if (isa<TruncInst>(EntryVal)) { 9589 assert(Start->getType()->isIntegerTy() && 9590 "Truncation requires an integer type"); 9591 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 9592 Step = Builder.CreateTrunc(Step, TruncType); 9593 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 9594 } 9595 9596 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 9597 Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start); 9598 Value *SteppedStart = getStepVector( 9599 SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder); 9600 9601 // We create vector phi nodes for both integer and floating-point induction 9602 // variables. Here, we determine the kind of arithmetic we will perform. 9603 Instruction::BinaryOps AddOp; 9604 Instruction::BinaryOps MulOp; 9605 if (Step->getType()->isIntegerTy()) { 9606 AddOp = Instruction::Add; 9607 MulOp = Instruction::Mul; 9608 } else { 9609 AddOp = ID.getInductionOpcode(); 9610 MulOp = Instruction::FMul; 9611 } 9612 9613 // Multiply the vectorization factor by the step using integer or 9614 // floating-point arithmetic as appropriate. 9615 Type *StepType = Step->getType(); 9616 Value *RuntimeVF; 9617 if (Step->getType()->isFloatingPointTy()) 9618 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF); 9619 else 9620 RuntimeVF = getRuntimeVF(Builder, StepType, State.VF); 9621 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 9622 9623 // Create a vector splat to use in the induction update. 9624 // 9625 // FIXME: If the step is non-constant, we create the vector splat with 9626 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 9627 // handle a constant vector splat. 9628 Value *SplatVF = isa<Constant>(Mul) 9629 ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul)) 9630 : Builder.CreateVectorSplat(State.VF, Mul); 9631 Builder.restoreIP(CurrIP); 9632 9633 // We may need to add the step a number of times, depending on the unroll 9634 // factor. The last of those goes into the PHI. 9635 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 9636 &*State.CFG.PrevBB->getFirstInsertionPt()); 9637 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 9638 Instruction *LastInduction = VecInd; 9639 for (unsigned Part = 0; Part < State.UF; ++Part) { 9640 State.set(this, LastInduction, Part); 9641 9642 if (isa<TruncInst>(EntryVal)) 9643 State.ILV->addMetadata(LastInduction, EntryVal); 9644 9645 LastInduction = cast<Instruction>( 9646 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 9647 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 9648 } 9649 9650 // Move the last step to the end of the latch block. This ensures consistent 9651 // placement of all induction updates. 9652 auto *LoopVectorLatch = 9653 State.LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch(); 9654 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 9655 LastInduction->moveBefore(Br); 9656 LastInduction->setName("vec.ind.next"); 9657 9658 VecInd->addIncoming(SteppedStart, State.CFG.VectorPreHeader); 9659 VecInd->addIncoming(LastInduction, LoopVectorLatch); 9660 } 9661 9662 void VPScalarIVStepsRecipe::execute(VPTransformState &State) { 9663 assert(!State.Instance && "VPScalarIVStepsRecipe being replicated."); 9664 9665 // Fast-math-flags propagate from the original induction instruction. 9666 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder); 9667 if (IndDesc.getInductionBinOp() && 9668 isa<FPMathOperator>(IndDesc.getInductionBinOp())) 9669 State.Builder.setFastMathFlags( 9670 IndDesc.getInductionBinOp()->getFastMathFlags()); 9671 9672 Value *Step = State.get(getStepValue(), VPIteration(0, 0)); 9673 auto CreateScalarIV = [&](Value *&Step) -> Value * { 9674 Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0)); 9675 auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0); 9676 if (!isCanonical() || CanonicalIV->getType() != Ty) { 9677 ScalarIV = 9678 Ty->isIntegerTy() 9679 ? State.Builder.CreateSExtOrTrunc(ScalarIV, Ty) 9680 : State.Builder.CreateCast(Instruction::SIToFP, ScalarIV, Ty); 9681 ScalarIV = emitTransformedIndex(State.Builder, ScalarIV, 9682 getStartValue()->getLiveInIRValue(), Step, 9683 IndDesc); 9684 ScalarIV->setName("offset.idx"); 9685 } 9686 if (TruncToTy) { 9687 assert(Step->getType()->isIntegerTy() && 9688 "Truncation requires an integer step"); 9689 ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncToTy); 9690 Step = State.Builder.CreateTrunc(Step, TruncToTy); 9691 } 9692 return ScalarIV; 9693 }; 9694 9695 Value *ScalarIV = CreateScalarIV(Step); 9696 if (State.VF.isVector()) { 9697 buildScalarSteps(ScalarIV, Step, IndDesc, this, State); 9698 return; 9699 } 9700 9701 for (unsigned Part = 0; Part < State.UF; ++Part) { 9702 assert(!State.VF.isScalable() && "scalable vectors not yet supported."); 9703 Value *EntryPart; 9704 if (Step->getType()->isFloatingPointTy()) { 9705 Value *StartIdx = 9706 getRuntimeVFAsFloat(State.Builder, Step->getType(), State.VF * Part); 9707 // Floating-point operations inherit FMF via the builder's flags. 9708 Value *MulOp = State.Builder.CreateFMul(StartIdx, Step); 9709 EntryPart = State.Builder.CreateBinOp(IndDesc.getInductionOpcode(), 9710 ScalarIV, MulOp); 9711 } else { 9712 Value *StartIdx = 9713 getRuntimeVF(State.Builder, Step->getType(), State.VF * Part); 9714 EntryPart = State.Builder.CreateAdd( 9715 ScalarIV, State.Builder.CreateMul(StartIdx, Step), "induction"); 9716 } 9717 State.set(this, EntryPart, Part); 9718 } 9719 } 9720 9721 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9722 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9723 State); 9724 } 9725 9726 void VPBlendRecipe::execute(VPTransformState &State) { 9727 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9728 // We know that all PHIs in non-header blocks are converted into 9729 // selects, so we don't have to worry about the insertion order and we 9730 // can just use the builder. 9731 // At this point we generate the predication tree. There may be 9732 // duplications since this is a simple recursive scan, but future 9733 // optimizations will clean it up. 9734 9735 unsigned NumIncoming = getNumIncomingValues(); 9736 9737 // Generate a sequence of selects of the form: 9738 // SELECT(Mask3, In3, 9739 // SELECT(Mask2, In2, 9740 // SELECT(Mask1, In1, 9741 // In0))) 9742 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9743 // are essentially undef are taken from In0. 9744 InnerLoopVectorizer::VectorParts Entry(State.UF); 9745 for (unsigned In = 0; In < NumIncoming; ++In) { 9746 for (unsigned Part = 0; Part < State.UF; ++Part) { 9747 // We might have single edge PHIs (blocks) - use an identity 9748 // 'select' for the first PHI operand. 9749 Value *In0 = State.get(getIncomingValue(In), Part); 9750 if (In == 0) 9751 Entry[Part] = In0; // Initialize with the first incoming value. 9752 else { 9753 // Select between the current value and the previous incoming edge 9754 // based on the incoming mask. 9755 Value *Cond = State.get(getMask(In), Part); 9756 Entry[Part] = 9757 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9758 } 9759 } 9760 } 9761 for (unsigned Part = 0; Part < State.UF; ++Part) 9762 State.set(this, Entry[Part], Part); 9763 } 9764 9765 void VPInterleaveRecipe::execute(VPTransformState &State) { 9766 assert(!State.Instance && "Interleave group being replicated."); 9767 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9768 getStoredValues(), getMask()); 9769 } 9770 9771 void VPReductionRecipe::execute(VPTransformState &State) { 9772 assert(!State.Instance && "Reduction being replicated."); 9773 Value *PrevInChain = State.get(getChainOp(), 0); 9774 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9775 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9776 // Propagate the fast-math flags carried by the underlying instruction. 9777 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9778 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9779 for (unsigned Part = 0; Part < State.UF; ++Part) { 9780 Value *NewVecOp = State.get(getVecOp(), Part); 9781 if (VPValue *Cond = getCondOp()) { 9782 Value *NewCond = State.get(Cond, Part); 9783 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9784 Value *Iden = RdxDesc->getRecurrenceIdentity( 9785 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9786 Value *IdenVec = 9787 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9788 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9789 NewVecOp = Select; 9790 } 9791 Value *NewRed; 9792 Value *NextInChain; 9793 if (IsOrdered) { 9794 if (State.VF.isVector()) 9795 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9796 PrevInChain); 9797 else 9798 NewRed = State.Builder.CreateBinOp( 9799 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9800 NewVecOp); 9801 PrevInChain = NewRed; 9802 } else { 9803 PrevInChain = State.get(getChainOp(), Part); 9804 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9805 } 9806 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9807 NextInChain = 9808 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9809 NewRed, PrevInChain); 9810 } else if (IsOrdered) 9811 NextInChain = NewRed; 9812 else 9813 NextInChain = State.Builder.CreateBinOp( 9814 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9815 PrevInChain); 9816 State.set(this, NextInChain, Part); 9817 } 9818 } 9819 9820 void VPReplicateRecipe::execute(VPTransformState &State) { 9821 if (State.Instance) { // Generate a single instance. 9822 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9823 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance, 9824 IsPredicated, State); 9825 // Insert scalar instance packing it into a vector. 9826 if (AlsoPack && State.VF.isVector()) { 9827 // If we're constructing lane 0, initialize to start from poison. 9828 if (State.Instance->Lane.isFirstLane()) { 9829 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9830 Value *Poison = PoisonValue::get( 9831 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9832 State.set(this, Poison, State.Instance->Part); 9833 } 9834 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9835 } 9836 return; 9837 } 9838 9839 // Generate scalar instances for all VF lanes of all UF parts, unless the 9840 // instruction is uniform inwhich case generate only the first lane for each 9841 // of the UF parts. 9842 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9843 assert((!State.VF.isScalable() || IsUniform) && 9844 "Can't scalarize a scalable vector"); 9845 for (unsigned Part = 0; Part < State.UF; ++Part) 9846 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9847 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 9848 VPIteration(Part, Lane), IsPredicated, 9849 State); 9850 } 9851 9852 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9853 assert(State.Instance && "Branch on Mask works only on single instance."); 9854 9855 unsigned Part = State.Instance->Part; 9856 unsigned Lane = State.Instance->Lane.getKnownLane(); 9857 9858 Value *ConditionBit = nullptr; 9859 VPValue *BlockInMask = getMask(); 9860 if (BlockInMask) { 9861 ConditionBit = State.get(BlockInMask, Part); 9862 if (ConditionBit->getType()->isVectorTy()) 9863 ConditionBit = State.Builder.CreateExtractElement( 9864 ConditionBit, State.Builder.getInt32(Lane)); 9865 } else // Block in mask is all-one. 9866 ConditionBit = State.Builder.getTrue(); 9867 9868 // Replace the temporary unreachable terminator with a new conditional branch, 9869 // whose two destinations will be set later when they are created. 9870 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9871 assert(isa<UnreachableInst>(CurrentTerminator) && 9872 "Expected to replace unreachable terminator with conditional branch."); 9873 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9874 CondBr->setSuccessor(0, nullptr); 9875 ReplaceInstWithInst(CurrentTerminator, CondBr); 9876 } 9877 9878 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9879 assert(State.Instance && "Predicated instruction PHI works per instance."); 9880 Instruction *ScalarPredInst = 9881 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9882 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9883 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9884 assert(PredicatingBB && "Predicated block has no single predecessor."); 9885 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9886 "operand must be VPReplicateRecipe"); 9887 9888 // By current pack/unpack logic we need to generate only a single phi node: if 9889 // a vector value for the predicated instruction exists at this point it means 9890 // the instruction has vector users only, and a phi for the vector value is 9891 // needed. In this case the recipe of the predicated instruction is marked to 9892 // also do that packing, thereby "hoisting" the insert-element sequence. 9893 // Otherwise, a phi node for the scalar value is needed. 9894 unsigned Part = State.Instance->Part; 9895 if (State.hasVectorValue(getOperand(0), Part)) { 9896 Value *VectorValue = State.get(getOperand(0), Part); 9897 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9898 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9899 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9900 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9901 if (State.hasVectorValue(this, Part)) 9902 State.reset(this, VPhi, Part); 9903 else 9904 State.set(this, VPhi, Part); 9905 // NOTE: Currently we need to update the value of the operand, so the next 9906 // predicated iteration inserts its generated value in the correct vector. 9907 State.reset(getOperand(0), VPhi, Part); 9908 } else { 9909 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9910 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9911 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9912 PredicatingBB); 9913 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9914 if (State.hasScalarValue(this, *State.Instance)) 9915 State.reset(this, Phi, *State.Instance); 9916 else 9917 State.set(this, Phi, *State.Instance); 9918 // NOTE: Currently we need to update the value of the operand, so the next 9919 // predicated iteration inserts its generated value in the correct vector. 9920 State.reset(getOperand(0), Phi, *State.Instance); 9921 } 9922 } 9923 9924 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9925 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9926 9927 // Attempt to issue a wide load. 9928 LoadInst *LI = dyn_cast<LoadInst>(&Ingredient); 9929 StoreInst *SI = dyn_cast<StoreInst>(&Ingredient); 9930 9931 assert((LI || SI) && "Invalid Load/Store instruction"); 9932 assert((!SI || StoredValue) && "No stored value provided for widened store"); 9933 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 9934 9935 Type *ScalarDataTy = getLoadStoreType(&Ingredient); 9936 9937 auto *DataTy = VectorType::get(ScalarDataTy, State.VF); 9938 const Align Alignment = getLoadStoreAlignment(&Ingredient); 9939 bool CreateGatherScatter = !Consecutive; 9940 9941 auto &Builder = State.Builder; 9942 InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF); 9943 bool isMaskRequired = getMask(); 9944 if (isMaskRequired) 9945 for (unsigned Part = 0; Part < State.UF; ++Part) 9946 BlockInMaskParts[Part] = State.get(getMask(), Part); 9947 9948 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 9949 // Calculate the pointer for the specific unroll-part. 9950 GetElementPtrInst *PartPtr = nullptr; 9951 9952 bool InBounds = false; 9953 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 9954 InBounds = gep->isInBounds(); 9955 if (Reverse) { 9956 // If the address is consecutive but reversed, then the 9957 // wide store needs to start at the last vector element. 9958 // RunTimeVF = VScale * VF.getKnownMinValue() 9959 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 9960 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF); 9961 // NumElt = -Part * RunTimeVF 9962 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 9963 // LastLane = 1 - RunTimeVF 9964 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 9965 PartPtr = 9966 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 9967 PartPtr->setIsInBounds(InBounds); 9968 PartPtr = cast<GetElementPtrInst>( 9969 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 9970 PartPtr->setIsInBounds(InBounds); 9971 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 9972 BlockInMaskParts[Part] = 9973 Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse"); 9974 } else { 9975 Value *Increment = 9976 createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part); 9977 PartPtr = cast<GetElementPtrInst>( 9978 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 9979 PartPtr->setIsInBounds(InBounds); 9980 } 9981 9982 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 9983 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 9984 }; 9985 9986 // Handle Stores: 9987 if (SI) { 9988 State.ILV->setDebugLocFromInst(SI); 9989 9990 for (unsigned Part = 0; Part < State.UF; ++Part) { 9991 Instruction *NewSI = nullptr; 9992 Value *StoredVal = State.get(StoredValue, Part); 9993 if (CreateGatherScatter) { 9994 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 9995 Value *VectorGep = State.get(getAddr(), Part); 9996 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 9997 MaskPart); 9998 } else { 9999 if (Reverse) { 10000 // If we store to reverse consecutive memory locations, then we need 10001 // to reverse the order of elements in the stored value. 10002 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse"); 10003 // We don't want to update the value in the map as it might be used in 10004 // another expression. So don't call resetVectorValue(StoredVal). 10005 } 10006 auto *VecPtr = 10007 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10008 if (isMaskRequired) 10009 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 10010 BlockInMaskParts[Part]); 10011 else 10012 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 10013 } 10014 State.ILV->addMetadata(NewSI, SI); 10015 } 10016 return; 10017 } 10018 10019 // Handle loads. 10020 assert(LI && "Must have a load instruction"); 10021 State.ILV->setDebugLocFromInst(LI); 10022 for (unsigned Part = 0; Part < State.UF; ++Part) { 10023 Value *NewLI; 10024 if (CreateGatherScatter) { 10025 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 10026 Value *VectorGep = State.get(getAddr(), Part); 10027 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 10028 nullptr, "wide.masked.gather"); 10029 State.ILV->addMetadata(NewLI, LI); 10030 } else { 10031 auto *VecPtr = 10032 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10033 if (isMaskRequired) 10034 NewLI = Builder.CreateMaskedLoad( 10035 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 10036 PoisonValue::get(DataTy), "wide.masked.load"); 10037 else 10038 NewLI = 10039 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 10040 10041 // Add metadata to the load, but setVectorValue to the reverse shuffle. 10042 State.ILV->addMetadata(NewLI, LI); 10043 if (Reverse) 10044 NewLI = Builder.CreateVectorReverse(NewLI, "reverse"); 10045 } 10046 10047 State.set(this, NewLI, Part); 10048 } 10049 } 10050 10051 // Determine how to lower the scalar epilogue, which depends on 1) optimising 10052 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 10053 // predication, and 4) a TTI hook that analyses whether the loop is suitable 10054 // for predication. 10055 static ScalarEpilogueLowering getScalarEpilogueLowering( 10056 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 10057 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 10058 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 10059 LoopVectorizationLegality &LVL) { 10060 // 1) OptSize takes precedence over all other options, i.e. if this is set, 10061 // don't look at hints or options, and don't request a scalar epilogue. 10062 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 10063 // LoopAccessInfo (due to code dependency and not being able to reliably get 10064 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 10065 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 10066 // versioning when the vectorization is forced, unlike hasOptSize. So revert 10067 // back to the old way and vectorize with versioning when forced. See D81345.) 10068 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 10069 PGSOQueryType::IRPass) && 10070 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 10071 return CM_ScalarEpilogueNotAllowedOptSize; 10072 10073 // 2) If set, obey the directives 10074 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 10075 switch (PreferPredicateOverEpilogue) { 10076 case PreferPredicateTy::ScalarEpilogue: 10077 return CM_ScalarEpilogueAllowed; 10078 case PreferPredicateTy::PredicateElseScalarEpilogue: 10079 return CM_ScalarEpilogueNotNeededUsePredicate; 10080 case PreferPredicateTy::PredicateOrDontVectorize: 10081 return CM_ScalarEpilogueNotAllowedUsePredicate; 10082 }; 10083 } 10084 10085 // 3) If set, obey the hints 10086 switch (Hints.getPredicate()) { 10087 case LoopVectorizeHints::FK_Enabled: 10088 return CM_ScalarEpilogueNotNeededUsePredicate; 10089 case LoopVectorizeHints::FK_Disabled: 10090 return CM_ScalarEpilogueAllowed; 10091 }; 10092 10093 // 4) if the TTI hook indicates this is profitable, request predication. 10094 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 10095 LVL.getLAI())) 10096 return CM_ScalarEpilogueNotNeededUsePredicate; 10097 10098 return CM_ScalarEpilogueAllowed; 10099 } 10100 10101 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 10102 // If Values have been set for this Def return the one relevant for \p Part. 10103 if (hasVectorValue(Def, Part)) 10104 return Data.PerPartOutput[Def][Part]; 10105 10106 if (!hasScalarValue(Def, {Part, 0})) { 10107 Value *IRV = Def->getLiveInIRValue(); 10108 Value *B = ILV->getBroadcastInstrs(IRV); 10109 set(Def, B, Part); 10110 return B; 10111 } 10112 10113 Value *ScalarValue = get(Def, {Part, 0}); 10114 // If we aren't vectorizing, we can just copy the scalar map values over 10115 // to the vector map. 10116 if (VF.isScalar()) { 10117 set(Def, ScalarValue, Part); 10118 return ScalarValue; 10119 } 10120 10121 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 10122 bool IsUniform = RepR && RepR->isUniform(); 10123 10124 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 10125 // Check if there is a scalar value for the selected lane. 10126 if (!hasScalarValue(Def, {Part, LastLane})) { 10127 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 10128 assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) || 10129 isa<VPScalarIVStepsRecipe>(Def->getDef())) && 10130 "unexpected recipe found to be invariant"); 10131 IsUniform = true; 10132 LastLane = 0; 10133 } 10134 10135 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 10136 // Set the insert point after the last scalarized instruction or after the 10137 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 10138 // will directly follow the scalar definitions. 10139 auto OldIP = Builder.saveIP(); 10140 auto NewIP = 10141 isa<PHINode>(LastInst) 10142 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 10143 : std::next(BasicBlock::iterator(LastInst)); 10144 Builder.SetInsertPoint(&*NewIP); 10145 10146 // However, if we are vectorizing, we need to construct the vector values. 10147 // If the value is known to be uniform after vectorization, we can just 10148 // broadcast the scalar value corresponding to lane zero for each unroll 10149 // iteration. Otherwise, we construct the vector values using 10150 // insertelement instructions. Since the resulting vectors are stored in 10151 // State, we will only generate the insertelements once. 10152 Value *VectorValue = nullptr; 10153 if (IsUniform) { 10154 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 10155 set(Def, VectorValue, Part); 10156 } else { 10157 // Initialize packing with insertelements to start from undef. 10158 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 10159 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10160 set(Def, Undef, Part); 10161 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10162 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10163 VectorValue = get(Def, Part); 10164 } 10165 Builder.restoreIP(OldIP); 10166 return VectorValue; 10167 } 10168 10169 // Process the loop in the VPlan-native vectorization path. This path builds 10170 // VPlan upfront in the vectorization pipeline, which allows to apply 10171 // VPlan-to-VPlan transformations from the very beginning without modifying the 10172 // input LLVM IR. 10173 static bool processLoopInVPlanNativePath( 10174 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10175 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10176 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10177 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10178 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10179 LoopVectorizationRequirements &Requirements) { 10180 10181 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10182 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10183 return false; 10184 } 10185 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10186 Function *F = L->getHeader()->getParent(); 10187 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10188 10189 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10190 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10191 10192 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10193 &Hints, IAI); 10194 // Use the planner for outer loop vectorization. 10195 // TODO: CM is not used at this point inside the planner. Turn CM into an 10196 // optional argument if we don't need it in the future. 10197 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10198 Requirements, ORE); 10199 10200 // Get user vectorization factor. 10201 ElementCount UserVF = Hints.getWidth(); 10202 10203 CM.collectElementTypesForWidening(); 10204 10205 // Plan how to best vectorize, return the best VF and its cost. 10206 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10207 10208 // If we are stress testing VPlan builds, do not attempt to generate vector 10209 // code. Masked vector code generation support will follow soon. 10210 // Also, do not attempt to vectorize if no vector code will be produced. 10211 if (VPlanBuildStressTest || EnableVPlanPredication || 10212 VectorizationFactor::Disabled() == VF) 10213 return false; 10214 10215 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10216 10217 { 10218 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10219 F->getParent()->getDataLayout()); 10220 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10221 &CM, BFI, PSI, Checks); 10222 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10223 << L->getHeader()->getParent()->getName() << "\"\n"); 10224 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT); 10225 } 10226 10227 // Mark the loop as already vectorized to avoid vectorizing again. 10228 Hints.setAlreadyVectorized(); 10229 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10230 return true; 10231 } 10232 10233 // Emit a remark if there are stores to floats that required a floating point 10234 // extension. If the vectorized loop was generated with floating point there 10235 // will be a performance penalty from the conversion overhead and the change in 10236 // the vector width. 10237 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10238 SmallVector<Instruction *, 4> Worklist; 10239 for (BasicBlock *BB : L->getBlocks()) { 10240 for (Instruction &Inst : *BB) { 10241 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10242 if (S->getValueOperand()->getType()->isFloatTy()) 10243 Worklist.push_back(S); 10244 } 10245 } 10246 } 10247 10248 // Traverse the floating point stores upwards searching, for floating point 10249 // conversions. 10250 SmallPtrSet<const Instruction *, 4> Visited; 10251 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10252 while (!Worklist.empty()) { 10253 auto *I = Worklist.pop_back_val(); 10254 if (!L->contains(I)) 10255 continue; 10256 if (!Visited.insert(I).second) 10257 continue; 10258 10259 // Emit a remark if the floating point store required a floating 10260 // point conversion. 10261 // TODO: More work could be done to identify the root cause such as a 10262 // constant or a function return type and point the user to it. 10263 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10264 ORE->emit([&]() { 10265 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10266 I->getDebugLoc(), L->getHeader()) 10267 << "floating point conversion changes vector width. " 10268 << "Mixed floating point precision requires an up/down " 10269 << "cast that will negatively impact performance."; 10270 }); 10271 10272 for (Use &Op : I->operands()) 10273 if (auto *OpI = dyn_cast<Instruction>(Op)) 10274 Worklist.push_back(OpI); 10275 } 10276 } 10277 10278 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10279 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10280 !EnableLoopInterleaving), 10281 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10282 !EnableLoopVectorization) {} 10283 10284 bool LoopVectorizePass::processLoop(Loop *L) { 10285 assert((EnableVPlanNativePath || L->isInnermost()) && 10286 "VPlan-native path is not enabled. Only process inner loops."); 10287 10288 #ifndef NDEBUG 10289 const std::string DebugLocStr = getDebugLocString(L); 10290 #endif /* NDEBUG */ 10291 10292 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '" 10293 << L->getHeader()->getParent()->getName() << "' from " 10294 << DebugLocStr << "\n"); 10295 10296 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI); 10297 10298 LLVM_DEBUG( 10299 dbgs() << "LV: Loop hints:" 10300 << " force=" 10301 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10302 ? "disabled" 10303 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10304 ? "enabled" 10305 : "?")) 10306 << " width=" << Hints.getWidth() 10307 << " interleave=" << Hints.getInterleave() << "\n"); 10308 10309 // Function containing loop 10310 Function *F = L->getHeader()->getParent(); 10311 10312 // Looking at the diagnostic output is the only way to determine if a loop 10313 // was vectorized (other than looking at the IR or machine code), so it 10314 // is important to generate an optimization remark for each loop. Most of 10315 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10316 // generated as OptimizationRemark and OptimizationRemarkMissed are 10317 // less verbose reporting vectorized loops and unvectorized loops that may 10318 // benefit from vectorization, respectively. 10319 10320 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10321 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10322 return false; 10323 } 10324 10325 PredicatedScalarEvolution PSE(*SE, *L); 10326 10327 // Check if it is legal to vectorize the loop. 10328 LoopVectorizationRequirements Requirements; 10329 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10330 &Requirements, &Hints, DB, AC, BFI, PSI); 10331 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10332 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10333 Hints.emitRemarkWithHints(); 10334 return false; 10335 } 10336 10337 // Check the function attributes and profiles to find out if this function 10338 // should be optimized for size. 10339 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10340 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10341 10342 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10343 // here. They may require CFG and instruction level transformations before 10344 // even evaluating whether vectorization is profitable. Since we cannot modify 10345 // the incoming IR, we need to build VPlan upfront in the vectorization 10346 // pipeline. 10347 if (!L->isInnermost()) 10348 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10349 ORE, BFI, PSI, Hints, Requirements); 10350 10351 assert(L->isInnermost() && "Inner loop expected."); 10352 10353 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10354 // count by optimizing for size, to minimize overheads. 10355 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10356 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10357 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10358 << "This loop is worth vectorizing only if no scalar " 10359 << "iteration overheads are incurred."); 10360 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10361 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10362 else { 10363 LLVM_DEBUG(dbgs() << "\n"); 10364 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10365 } 10366 } 10367 10368 // Check the function attributes to see if implicit floats are allowed. 10369 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10370 // an integer loop and the vector instructions selected are purely integer 10371 // vector instructions? 10372 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10373 reportVectorizationFailure( 10374 "Can't vectorize when the NoImplicitFloat attribute is used", 10375 "loop not vectorized due to NoImplicitFloat attribute", 10376 "NoImplicitFloat", ORE, L); 10377 Hints.emitRemarkWithHints(); 10378 return false; 10379 } 10380 10381 // Check if the target supports potentially unsafe FP vectorization. 10382 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10383 // for the target we're vectorizing for, to make sure none of the 10384 // additional fp-math flags can help. 10385 if (Hints.isPotentiallyUnsafe() && 10386 TTI->isFPVectorizationPotentiallyUnsafe()) { 10387 reportVectorizationFailure( 10388 "Potentially unsafe FP op prevents vectorization", 10389 "loop not vectorized due to unsafe FP support.", 10390 "UnsafeFP", ORE, L); 10391 Hints.emitRemarkWithHints(); 10392 return false; 10393 } 10394 10395 bool AllowOrderedReductions; 10396 // If the flag is set, use that instead and override the TTI behaviour. 10397 if (ForceOrderedReductions.getNumOccurrences() > 0) 10398 AllowOrderedReductions = ForceOrderedReductions; 10399 else 10400 AllowOrderedReductions = TTI->enableOrderedReductions(); 10401 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10402 ORE->emit([&]() { 10403 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10404 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10405 ExactFPMathInst->getDebugLoc(), 10406 ExactFPMathInst->getParent()) 10407 << "loop not vectorized: cannot prove it is safe to reorder " 10408 "floating-point operations"; 10409 }); 10410 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10411 "reorder floating-point operations\n"); 10412 Hints.emitRemarkWithHints(); 10413 return false; 10414 } 10415 10416 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10417 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10418 10419 // If an override option has been passed in for interleaved accesses, use it. 10420 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10421 UseInterleaved = EnableInterleavedMemAccesses; 10422 10423 // Analyze interleaved memory accesses. 10424 if (UseInterleaved) { 10425 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10426 } 10427 10428 // Use the cost model. 10429 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10430 F, &Hints, IAI); 10431 CM.collectValuesToIgnore(); 10432 CM.collectElementTypesForWidening(); 10433 10434 // Use the planner for vectorization. 10435 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10436 Requirements, ORE); 10437 10438 // Get user vectorization factor and interleave count. 10439 ElementCount UserVF = Hints.getWidth(); 10440 unsigned UserIC = Hints.getInterleave(); 10441 10442 // Plan how to best vectorize, return the best VF and its cost. 10443 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10444 10445 VectorizationFactor VF = VectorizationFactor::Disabled(); 10446 unsigned IC = 1; 10447 10448 if (MaybeVF) { 10449 VF = *MaybeVF; 10450 // Select the interleave count. 10451 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10452 } 10453 10454 // Identify the diagnostic messages that should be produced. 10455 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10456 bool VectorizeLoop = true, InterleaveLoop = true; 10457 if (VF.Width.isScalar()) { 10458 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10459 VecDiagMsg = std::make_pair( 10460 "VectorizationNotBeneficial", 10461 "the cost-model indicates that vectorization is not beneficial"); 10462 VectorizeLoop = false; 10463 } 10464 10465 if (!MaybeVF && UserIC > 1) { 10466 // Tell the user interleaving was avoided up-front, despite being explicitly 10467 // requested. 10468 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10469 "interleaving should be avoided up front\n"); 10470 IntDiagMsg = std::make_pair( 10471 "InterleavingAvoided", 10472 "Ignoring UserIC, because interleaving was avoided up front"); 10473 InterleaveLoop = false; 10474 } else if (IC == 1 && UserIC <= 1) { 10475 // Tell the user interleaving is not beneficial. 10476 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10477 IntDiagMsg = std::make_pair( 10478 "InterleavingNotBeneficial", 10479 "the cost-model indicates that interleaving is not beneficial"); 10480 InterleaveLoop = false; 10481 if (UserIC == 1) { 10482 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10483 IntDiagMsg.second += 10484 " and is explicitly disabled or interleave count is set to 1"; 10485 } 10486 } else if (IC > 1 && UserIC == 1) { 10487 // Tell the user interleaving is beneficial, but it explicitly disabled. 10488 LLVM_DEBUG( 10489 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10490 IntDiagMsg = std::make_pair( 10491 "InterleavingBeneficialButDisabled", 10492 "the cost-model indicates that interleaving is beneficial " 10493 "but is explicitly disabled or interleave count is set to 1"); 10494 InterleaveLoop = false; 10495 } 10496 10497 // Override IC if user provided an interleave count. 10498 IC = UserIC > 0 ? UserIC : IC; 10499 10500 // Emit diagnostic messages, if any. 10501 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10502 if (!VectorizeLoop && !InterleaveLoop) { 10503 // Do not vectorize or interleaving the loop. 10504 ORE->emit([&]() { 10505 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10506 L->getStartLoc(), L->getHeader()) 10507 << VecDiagMsg.second; 10508 }); 10509 ORE->emit([&]() { 10510 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10511 L->getStartLoc(), L->getHeader()) 10512 << IntDiagMsg.second; 10513 }); 10514 return false; 10515 } else if (!VectorizeLoop && InterleaveLoop) { 10516 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10517 ORE->emit([&]() { 10518 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10519 L->getStartLoc(), L->getHeader()) 10520 << VecDiagMsg.second; 10521 }); 10522 } else if (VectorizeLoop && !InterleaveLoop) { 10523 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10524 << ") in " << DebugLocStr << '\n'); 10525 ORE->emit([&]() { 10526 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10527 L->getStartLoc(), L->getHeader()) 10528 << IntDiagMsg.second; 10529 }); 10530 } else if (VectorizeLoop && InterleaveLoop) { 10531 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10532 << ") in " << DebugLocStr << '\n'); 10533 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10534 } 10535 10536 bool DisableRuntimeUnroll = false; 10537 MDNode *OrigLoopID = L->getLoopID(); 10538 { 10539 // Optimistically generate runtime checks. Drop them if they turn out to not 10540 // be profitable. Limit the scope of Checks, so the cleanup happens 10541 // immediately after vector codegeneration is done. 10542 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10543 F->getParent()->getDataLayout()); 10544 if (!VF.Width.isScalar() || IC > 1) 10545 Checks.Create(L, *LVL.getLAI(), PSE.getPredicate()); 10546 10547 using namespace ore; 10548 if (!VectorizeLoop) { 10549 assert(IC > 1 && "interleave count should not be 1 or 0"); 10550 // If we decided that it is not legal to vectorize the loop, then 10551 // interleave it. 10552 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10553 &CM, BFI, PSI, Checks); 10554 10555 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10556 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT); 10557 10558 ORE->emit([&]() { 10559 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10560 L->getHeader()) 10561 << "interleaved loop (interleaved count: " 10562 << NV("InterleaveCount", IC) << ")"; 10563 }); 10564 } else { 10565 // If we decided that it is *legal* to vectorize the loop, then do it. 10566 10567 // Consider vectorizing the epilogue too if it's profitable. 10568 VectorizationFactor EpilogueVF = 10569 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10570 if (EpilogueVF.Width.isVector()) { 10571 10572 // The first pass vectorizes the main loop and creates a scalar epilogue 10573 // to be vectorized by executing the plan (potentially with a different 10574 // factor) again shortly afterwards. 10575 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10576 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10577 EPI, &LVL, &CM, BFI, PSI, Checks); 10578 10579 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10580 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10581 DT); 10582 ++LoopsVectorized; 10583 10584 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10585 formLCSSARecursively(*L, *DT, LI, SE); 10586 10587 // Second pass vectorizes the epilogue and adjusts the control flow 10588 // edges from the first pass. 10589 EPI.MainLoopVF = EPI.EpilogueVF; 10590 EPI.MainLoopUF = EPI.EpilogueUF; 10591 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10592 ORE, EPI, &LVL, &CM, BFI, PSI, 10593 Checks); 10594 10595 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10596 10597 // Ensure that the start values for any VPReductionPHIRecipes are 10598 // updated before vectorising the epilogue loop. 10599 VPBasicBlock *Header = BestEpiPlan.getEntry()->getEntryBasicBlock(); 10600 for (VPRecipeBase &R : Header->phis()) { 10601 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) { 10602 if (auto *Resume = MainILV.getReductionResumeValue( 10603 ReductionPhi->getRecurrenceDescriptor())) { 10604 VPValue *StartVal = new VPValue(Resume); 10605 BestEpiPlan.addExternalDef(StartVal); 10606 ReductionPhi->setOperand(0, StartVal); 10607 } 10608 } 10609 } 10610 10611 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10612 DT); 10613 ++LoopsEpilogueVectorized; 10614 10615 if (!MainILV.areSafetyChecksAdded()) 10616 DisableRuntimeUnroll = true; 10617 } else { 10618 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10619 &LVL, &CM, BFI, PSI, Checks); 10620 10621 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10622 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT); 10623 ++LoopsVectorized; 10624 10625 // Add metadata to disable runtime unrolling a scalar loop when there 10626 // are no runtime checks about strides and memory. A scalar loop that is 10627 // rarely used is not worth unrolling. 10628 if (!LB.areSafetyChecksAdded()) 10629 DisableRuntimeUnroll = true; 10630 } 10631 // Report the vectorization decision. 10632 ORE->emit([&]() { 10633 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10634 L->getHeader()) 10635 << "vectorized loop (vectorization width: " 10636 << NV("VectorizationFactor", VF.Width) 10637 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10638 }); 10639 } 10640 10641 if (ORE->allowExtraAnalysis(LV_NAME)) 10642 checkMixedPrecision(L, ORE); 10643 } 10644 10645 Optional<MDNode *> RemainderLoopID = 10646 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10647 LLVMLoopVectorizeFollowupEpilogue}); 10648 if (RemainderLoopID.hasValue()) { 10649 L->setLoopID(RemainderLoopID.getValue()); 10650 } else { 10651 if (DisableRuntimeUnroll) 10652 AddRuntimeUnrollDisableMetaData(L); 10653 10654 // Mark the loop as already vectorized to avoid vectorizing again. 10655 Hints.setAlreadyVectorized(); 10656 } 10657 10658 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10659 return true; 10660 } 10661 10662 LoopVectorizeResult LoopVectorizePass::runImpl( 10663 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10664 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10665 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10666 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10667 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10668 SE = &SE_; 10669 LI = &LI_; 10670 TTI = &TTI_; 10671 DT = &DT_; 10672 BFI = &BFI_; 10673 TLI = TLI_; 10674 AA = &AA_; 10675 AC = &AC_; 10676 GetLAA = &GetLAA_; 10677 DB = &DB_; 10678 ORE = &ORE_; 10679 PSI = PSI_; 10680 10681 // Don't attempt if 10682 // 1. the target claims to have no vector registers, and 10683 // 2. interleaving won't help ILP. 10684 // 10685 // The second condition is necessary because, even if the target has no 10686 // vector registers, loop vectorization may still enable scalar 10687 // interleaving. 10688 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10689 TTI->getMaxInterleaveFactor(1) < 2) 10690 return LoopVectorizeResult(false, false); 10691 10692 bool Changed = false, CFGChanged = false; 10693 10694 // The vectorizer requires loops to be in simplified form. 10695 // Since simplification may add new inner loops, it has to run before the 10696 // legality and profitability checks. This means running the loop vectorizer 10697 // will simplify all loops, regardless of whether anything end up being 10698 // vectorized. 10699 for (auto &L : *LI) 10700 Changed |= CFGChanged |= 10701 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10702 10703 // Build up a worklist of inner-loops to vectorize. This is necessary as 10704 // the act of vectorizing or partially unrolling a loop creates new loops 10705 // and can invalidate iterators across the loops. 10706 SmallVector<Loop *, 8> Worklist; 10707 10708 for (Loop *L : *LI) 10709 collectSupportedLoops(*L, LI, ORE, Worklist); 10710 10711 LoopsAnalyzed += Worklist.size(); 10712 10713 // Now walk the identified inner loops. 10714 while (!Worklist.empty()) { 10715 Loop *L = Worklist.pop_back_val(); 10716 10717 // For the inner loops we actually process, form LCSSA to simplify the 10718 // transform. 10719 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10720 10721 Changed |= CFGChanged |= processLoop(L); 10722 } 10723 10724 // Process each loop nest in the function. 10725 return LoopVectorizeResult(Changed, CFGChanged); 10726 } 10727 10728 PreservedAnalyses LoopVectorizePass::run(Function &F, 10729 FunctionAnalysisManager &AM) { 10730 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10731 auto &LI = AM.getResult<LoopAnalysis>(F); 10732 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10733 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10734 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10735 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10736 auto &AA = AM.getResult<AAManager>(F); 10737 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10738 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10739 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10740 10741 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10742 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10743 [&](Loop &L) -> const LoopAccessInfo & { 10744 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10745 TLI, TTI, nullptr, nullptr, nullptr}; 10746 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10747 }; 10748 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10749 ProfileSummaryInfo *PSI = 10750 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10751 LoopVectorizeResult Result = 10752 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10753 if (!Result.MadeAnyChange) 10754 return PreservedAnalyses::all(); 10755 PreservedAnalyses PA; 10756 10757 // We currently do not preserve loopinfo/dominator analyses with outer loop 10758 // vectorization. Until this is addressed, mark these analyses as preserved 10759 // only for non-VPlan-native path. 10760 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10761 if (!EnableVPlanNativePath) { 10762 PA.preserve<LoopAnalysis>(); 10763 PA.preserve<DominatorTreeAnalysis>(); 10764 } 10765 10766 if (Result.MadeCFGChange) { 10767 // Making CFG changes likely means a loop got vectorized. Indicate that 10768 // extra simplification passes should be run. 10769 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only 10770 // be run if runtime checks have been added. 10771 AM.getResult<ShouldRunExtraVectorPasses>(F); 10772 PA.preserve<ShouldRunExtraVectorPasses>(); 10773 } else { 10774 PA.preserveSet<CFGAnalyses>(); 10775 } 10776 return PA; 10777 } 10778 10779 void LoopVectorizePass::printPipeline( 10780 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10781 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10782 OS, MapClassName2PassName); 10783 10784 OS << "<"; 10785 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10786 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10787 OS << ">"; 10788 } 10789