1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/Metadata.h" 116 #include "llvm/IR/Module.h" 117 #include "llvm/IR/Operator.h" 118 #include "llvm/IR/PatternMatch.h" 119 #include "llvm/IR/Type.h" 120 #include "llvm/IR/Use.h" 121 #include "llvm/IR/User.h" 122 #include "llvm/IR/Value.h" 123 #include "llvm/IR/ValueHandle.h" 124 #include "llvm/IR/Verifier.h" 125 #include "llvm/InitializePasses.h" 126 #include "llvm/Pass.h" 127 #include "llvm/Support/Casting.h" 128 #include "llvm/Support/CommandLine.h" 129 #include "llvm/Support/Compiler.h" 130 #include "llvm/Support/Debug.h" 131 #include "llvm/Support/ErrorHandling.h" 132 #include "llvm/Support/InstructionCost.h" 133 #include "llvm/Support/MathExtras.h" 134 #include "llvm/Support/raw_ostream.h" 135 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 136 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 137 #include "llvm/Transforms/Utils/LoopSimplify.h" 138 #include "llvm/Transforms/Utils/LoopUtils.h" 139 #include "llvm/Transforms/Utils/LoopVersioning.h" 140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 141 #include "llvm/Transforms/Utils/SizeOpts.h" 142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 143 #include <algorithm> 144 #include <cassert> 145 #include <cstdint> 146 #include <functional> 147 #include <iterator> 148 #include <limits> 149 #include <map> 150 #include <memory> 151 #include <string> 152 #include <tuple> 153 #include <utility> 154 155 using namespace llvm; 156 157 #define LV_NAME "loop-vectorize" 158 #define DEBUG_TYPE LV_NAME 159 160 #ifndef NDEBUG 161 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 162 #endif 163 164 /// @{ 165 /// Metadata attribute names 166 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 167 const char LLVMLoopVectorizeFollowupVectorized[] = 168 "llvm.loop.vectorize.followup_vectorized"; 169 const char LLVMLoopVectorizeFollowupEpilogue[] = 170 "llvm.loop.vectorize.followup_epilogue"; 171 /// @} 172 173 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 174 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 175 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 176 177 static cl::opt<bool> EnableEpilogueVectorization( 178 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 179 cl::desc("Enable vectorization of epilogue loops.")); 180 181 static cl::opt<unsigned> EpilogueVectorizationForceVF( 182 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 183 cl::desc("When epilogue vectorization is enabled, and a value greater than " 184 "1 is specified, forces the given VF for all applicable epilogue " 185 "loops.")); 186 187 static cl::opt<unsigned> EpilogueVectorizationMinVF( 188 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 189 cl::desc("Only loops with vectorization factor equal to or larger than " 190 "the specified value are considered for epilogue vectorization.")); 191 192 /// Loops with a known constant trip count below this number are vectorized only 193 /// if no scalar iteration overheads are incurred. 194 static cl::opt<unsigned> TinyTripCountVectorThreshold( 195 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 196 cl::desc("Loops with a constant trip count that is smaller than this " 197 "value are vectorized only if no scalar iteration overheads " 198 "are incurred.")); 199 200 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 201 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 202 cl::desc("The maximum allowed number of runtime memory checks with a " 203 "vectorize(enable) pragma.")); 204 205 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 206 // that predication is preferred, and this lists all options. I.e., the 207 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 208 // and predicate the instructions accordingly. If tail-folding fails, there are 209 // different fallback strategies depending on these values: 210 namespace PreferPredicateTy { 211 enum Option { 212 ScalarEpilogue = 0, 213 PredicateElseScalarEpilogue, 214 PredicateOrDontVectorize 215 }; 216 } // namespace PreferPredicateTy 217 218 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 219 "prefer-predicate-over-epilogue", 220 cl::init(PreferPredicateTy::ScalarEpilogue), 221 cl::Hidden, 222 cl::desc("Tail-folding and predication preferences over creating a scalar " 223 "epilogue loop."), 224 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 225 "scalar-epilogue", 226 "Don't tail-predicate loops, create scalar epilogue"), 227 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 228 "predicate-else-scalar-epilogue", 229 "prefer tail-folding, create scalar epilogue if tail " 230 "folding fails."), 231 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 232 "predicate-dont-vectorize", 233 "prefers tail-folding, don't attempt vectorization if " 234 "tail-folding fails."))); 235 236 static cl::opt<bool> MaximizeBandwidth( 237 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 238 cl::desc("Maximize bandwidth when selecting vectorization factor which " 239 "will be determined by the smallest type in loop.")); 240 241 static cl::opt<bool> EnableInterleavedMemAccesses( 242 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 243 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 244 245 /// An interleave-group may need masking if it resides in a block that needs 246 /// predication, or in order to mask away gaps. 247 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 248 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 249 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 250 251 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 252 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 253 cl::desc("We don't interleave loops with a estimated constant trip count " 254 "below this number")); 255 256 static cl::opt<unsigned> ForceTargetNumScalarRegs( 257 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 258 cl::desc("A flag that overrides the target's number of scalar registers.")); 259 260 static cl::opt<unsigned> ForceTargetNumVectorRegs( 261 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 262 cl::desc("A flag that overrides the target's number of vector registers.")); 263 264 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 265 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 266 cl::desc("A flag that overrides the target's max interleave factor for " 267 "scalar loops.")); 268 269 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 270 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 271 cl::desc("A flag that overrides the target's max interleave factor for " 272 "vectorized loops.")); 273 274 static cl::opt<unsigned> ForceTargetInstructionCost( 275 "force-target-instruction-cost", cl::init(0), cl::Hidden, 276 cl::desc("A flag that overrides the target's expected cost for " 277 "an instruction to a single constant value. Mostly " 278 "useful for getting consistent testing.")); 279 280 static cl::opt<bool> ForceTargetSupportsScalableVectors( 281 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 282 cl::desc( 283 "Pretend that scalable vectors are supported, even if the target does " 284 "not support them. This flag should only be used for testing.")); 285 286 static cl::opt<unsigned> SmallLoopCost( 287 "small-loop-cost", cl::init(20), cl::Hidden, 288 cl::desc( 289 "The cost of a loop that is considered 'small' by the interleaver.")); 290 291 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 292 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 293 cl::desc("Enable the use of the block frequency analysis to access PGO " 294 "heuristics minimizing code growth in cold regions and being more " 295 "aggressive in hot regions.")); 296 297 // Runtime interleave loops for load/store throughput. 298 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 299 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 300 cl::desc( 301 "Enable runtime interleaving until load/store ports are saturated")); 302 303 /// Interleave small loops with scalar reductions. 304 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 305 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 306 cl::desc("Enable interleaving for loops with small iteration counts that " 307 "contain scalar reductions to expose ILP.")); 308 309 /// The number of stores in a loop that are allowed to need predication. 310 static cl::opt<unsigned> NumberOfStoresToPredicate( 311 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 312 cl::desc("Max number of stores to be predicated behind an if.")); 313 314 static cl::opt<bool> EnableIndVarRegisterHeur( 315 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 316 cl::desc("Count the induction variable only once when interleaving")); 317 318 static cl::opt<bool> EnableCondStoresVectorization( 319 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 320 cl::desc("Enable if predication of stores during vectorization.")); 321 322 static cl::opt<unsigned> MaxNestedScalarReductionIC( 323 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 324 cl::desc("The maximum interleave count to use when interleaving a scalar " 325 "reduction in a nested loop.")); 326 327 static cl::opt<bool> 328 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 329 cl::Hidden, 330 cl::desc("Prefer in-loop vector reductions, " 331 "overriding the targets preference.")); 332 333 static cl::opt<bool> ForceOrderedReductions( 334 "force-ordered-reductions", cl::init(false), cl::Hidden, 335 cl::desc("Enable the vectorisation of loops with in-order (strict) " 336 "FP reductions")); 337 338 static cl::opt<bool> PreferPredicatedReductionSelect( 339 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 340 cl::desc( 341 "Prefer predicating a reduction operation over an after loop select.")); 342 343 cl::opt<bool> EnableVPlanNativePath( 344 "enable-vplan-native-path", cl::init(false), cl::Hidden, 345 cl::desc("Enable VPlan-native vectorization path with " 346 "support for outer loop vectorization.")); 347 348 // FIXME: Remove this switch once we have divergence analysis. Currently we 349 // assume divergent non-backedge branches when this switch is true. 350 cl::opt<bool> EnableVPlanPredication( 351 "enable-vplan-predication", cl::init(false), cl::Hidden, 352 cl::desc("Enable VPlan-native vectorization path predicator with " 353 "support for outer loop vectorization.")); 354 355 // This flag enables the stress testing of the VPlan H-CFG construction in the 356 // VPlan-native vectorization path. It must be used in conjuction with 357 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 358 // verification of the H-CFGs built. 359 static cl::opt<bool> VPlanBuildStressTest( 360 "vplan-build-stress-test", cl::init(false), cl::Hidden, 361 cl::desc( 362 "Build VPlan for every supported loop nest in the function and bail " 363 "out right after the build (stress test the VPlan H-CFG construction " 364 "in the VPlan-native vectorization path).")); 365 366 cl::opt<bool> llvm::EnableLoopInterleaving( 367 "interleave-loops", cl::init(true), cl::Hidden, 368 cl::desc("Enable loop interleaving in Loop vectorization passes")); 369 cl::opt<bool> llvm::EnableLoopVectorization( 370 "vectorize-loops", cl::init(true), cl::Hidden, 371 cl::desc("Run the Loop vectorization passes")); 372 373 cl::opt<bool> PrintVPlansInDotFormat( 374 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 375 cl::desc("Use dot format instead of plain text when dumping VPlans")); 376 377 /// A helper function that returns true if the given type is irregular. The 378 /// type is irregular if its allocated size doesn't equal the store size of an 379 /// element of the corresponding vector type. 380 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 381 // Determine if an array of N elements of type Ty is "bitcast compatible" 382 // with a <N x Ty> vector. 383 // This is only true if there is no padding between the array elements. 384 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 385 } 386 387 /// A helper function that returns the reciprocal of the block probability of 388 /// predicated blocks. If we return X, we are assuming the predicated block 389 /// will execute once for every X iterations of the loop header. 390 /// 391 /// TODO: We should use actual block probability here, if available. Currently, 392 /// we always assume predicated blocks have a 50% chance of executing. 393 static unsigned getReciprocalPredBlockProb() { return 2; } 394 395 /// A helper function that returns an integer or floating-point constant with 396 /// value C. 397 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 398 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 399 : ConstantFP::get(Ty, C); 400 } 401 402 /// Returns "best known" trip count for the specified loop \p L as defined by 403 /// the following procedure: 404 /// 1) Returns exact trip count if it is known. 405 /// 2) Returns expected trip count according to profile data if any. 406 /// 3) Returns upper bound estimate if it is known. 407 /// 4) Returns None if all of the above failed. 408 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 409 // Check if exact trip count is known. 410 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 411 return ExpectedTC; 412 413 // Check if there is an expected trip count available from profile data. 414 if (LoopVectorizeWithBlockFrequency) 415 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 416 return EstimatedTC; 417 418 // Check if upper bound estimate is known. 419 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 420 return ExpectedTC; 421 422 return None; 423 } 424 425 // Forward declare GeneratedRTChecks. 426 class GeneratedRTChecks; 427 428 namespace llvm { 429 430 AnalysisKey ShouldRunExtraVectorPasses::Key; 431 432 /// InnerLoopVectorizer vectorizes loops which contain only one basic 433 /// block to a specified vectorization factor (VF). 434 /// This class performs the widening of scalars into vectors, or multiple 435 /// scalars. This class also implements the following features: 436 /// * It inserts an epilogue loop for handling loops that don't have iteration 437 /// counts that are known to be a multiple of the vectorization factor. 438 /// * It handles the code generation for reduction variables. 439 /// * Scalarization (implementation using scalars) of un-vectorizable 440 /// instructions. 441 /// InnerLoopVectorizer does not perform any vectorization-legality 442 /// checks, and relies on the caller to check for the different legality 443 /// aspects. The InnerLoopVectorizer relies on the 444 /// LoopVectorizationLegality class to provide information about the induction 445 /// and reduction variables that were found to a given vectorization factor. 446 class InnerLoopVectorizer { 447 public: 448 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 449 LoopInfo *LI, DominatorTree *DT, 450 const TargetLibraryInfo *TLI, 451 const TargetTransformInfo *TTI, AssumptionCache *AC, 452 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 453 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 454 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 455 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 456 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 457 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 458 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 459 PSI(PSI), RTChecks(RTChecks) { 460 // Query this against the original loop and save it here because the profile 461 // of the original loop header may change as the transformation happens. 462 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 463 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 464 } 465 466 virtual ~InnerLoopVectorizer() = default; 467 468 /// Create a new empty loop that will contain vectorized instructions later 469 /// on, while the old loop will be used as the scalar remainder. Control flow 470 /// is generated around the vectorized (and scalar epilogue) loops consisting 471 /// of various checks and bypasses. Return the pre-header block of the new 472 /// loop and the start value for the canonical induction, if it is != 0. The 473 /// latter is the case when vectorizing the epilogue loop. In the case of 474 /// epilogue vectorization, this function is overriden to handle the more 475 /// complex control flow around the loops. 476 virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton(); 477 478 /// Widen a single call instruction within the innermost loop. 479 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 480 VPTransformState &State); 481 482 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 483 void fixVectorizedLoop(VPTransformState &State); 484 485 // Return true if any runtime check is added. 486 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 487 488 /// A type for vectorized values in the new loop. Each value from the 489 /// original loop, when vectorized, is represented by UF vector values in the 490 /// new unrolled loop, where UF is the unroll factor. 491 using VectorParts = SmallVector<Value *, 2>; 492 493 /// Vectorize a single vector PHINode in a block in the VPlan-native path 494 /// only. 495 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 496 VPTransformState &State); 497 498 /// A helper function to scalarize a single Instruction in the innermost loop. 499 /// Generates a sequence of scalar instances for each lane between \p MinLane 500 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 501 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p 502 /// Instr's operands. 503 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe, 504 const VPIteration &Instance, bool IfPredicateInstr, 505 VPTransformState &State); 506 507 /// Construct the vector value of a scalarized value \p V one lane at a time. 508 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 509 VPTransformState &State); 510 511 /// Try to vectorize interleaved access group \p Group with the base address 512 /// given in \p Addr, optionally masking the vector operations if \p 513 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 514 /// values in the vectorized loop. 515 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 516 ArrayRef<VPValue *> VPDefs, 517 VPTransformState &State, VPValue *Addr, 518 ArrayRef<VPValue *> StoredValues, 519 VPValue *BlockInMask = nullptr); 520 521 /// Set the debug location in the builder \p Ptr using the debug location in 522 /// \p V. If \p Ptr is None then it uses the class member's Builder. 523 void setDebugLocFromInst(const Value *V, 524 Optional<IRBuilderBase *> CustomBuilder = None); 525 526 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 527 void fixNonInductionPHIs(VPTransformState &State); 528 529 /// Returns true if the reordering of FP operations is not allowed, but we are 530 /// able to vectorize with strict in-order reductions for the given RdxDesc. 531 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc); 532 533 /// Create a broadcast instruction. This method generates a broadcast 534 /// instruction (shuffle) for loop invariant values and for the induction 535 /// value. If this is the induction variable then we extend it to N, N+1, ... 536 /// this is needed because each iteration in the loop corresponds to a SIMD 537 /// element. 538 virtual Value *getBroadcastInstrs(Value *V); 539 540 /// Add metadata from one instruction to another. 541 /// 542 /// This includes both the original MDs from \p From and additional ones (\see 543 /// addNewMetadata). Use this for *newly created* instructions in the vector 544 /// loop. 545 void addMetadata(Instruction *To, Instruction *From); 546 547 /// Similar to the previous function but it adds the metadata to a 548 /// vector of instructions. 549 void addMetadata(ArrayRef<Value *> To, Instruction *From); 550 551 // Returns the resume value (bc.merge.rdx) for a reduction as 552 // generated by fixReduction. 553 PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc); 554 555 protected: 556 friend class LoopVectorizationPlanner; 557 558 /// A small list of PHINodes. 559 using PhiVector = SmallVector<PHINode *, 4>; 560 561 /// A type for scalarized values in the new loop. Each value from the 562 /// original loop, when scalarized, is represented by UF x VF scalar values 563 /// in the new unrolled loop, where UF is the unroll factor and VF is the 564 /// vectorization factor. 565 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 566 567 /// Set up the values of the IVs correctly when exiting the vector loop. 568 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 569 Value *CountRoundDown, Value *EndValue, 570 BasicBlock *MiddleBlock, BasicBlock *VectorHeader); 571 572 /// Introduce a conditional branch (on true, condition to be set later) at the 573 /// end of the header=latch connecting it to itself (across the backedge) and 574 /// to the exit block of \p L. 575 void createHeaderBranch(Loop *L); 576 577 /// Handle all cross-iteration phis in the header. 578 void fixCrossIterationPHIs(VPTransformState &State); 579 580 /// Create the exit value of first order recurrences in the middle block and 581 /// update their users. 582 void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR, 583 VPTransformState &State); 584 585 /// Create code for the loop exit value of the reduction. 586 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 587 588 /// Clear NSW/NUW flags from reduction instructions if necessary. 589 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 590 VPTransformState &State); 591 592 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 593 /// means we need to add the appropriate incoming value from the middle 594 /// block as exiting edges from the scalar epilogue loop (if present) are 595 /// already in place, and we exit the vector loop exclusively to the middle 596 /// block. 597 void fixLCSSAPHIs(VPTransformState &State); 598 599 /// Iteratively sink the scalarized operands of a predicated instruction into 600 /// the block that was created for it. 601 void sinkScalarOperands(Instruction *PredInst); 602 603 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 604 /// represented as. 605 void truncateToMinimalBitwidths(VPTransformState &State); 606 607 /// Returns (and creates if needed) the original loop trip count. 608 Value *getOrCreateTripCount(BasicBlock *InsertBlock); 609 610 /// Returns (and creates if needed) the trip count of the widened loop. 611 Value *getOrCreateVectorTripCount(BasicBlock *InsertBlock); 612 613 /// Returns a bitcasted value to the requested vector type. 614 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 615 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 616 const DataLayout &DL); 617 618 /// Emit a bypass check to see if the vector trip count is zero, including if 619 /// it overflows. 620 void emitMinimumIterationCountCheck(BasicBlock *Bypass); 621 622 /// Emit a bypass check to see if all of the SCEV assumptions we've 623 /// had to make are correct. Returns the block containing the checks or 624 /// nullptr if no checks have been added. 625 BasicBlock *emitSCEVChecks(BasicBlock *Bypass); 626 627 /// Emit bypass checks to check any memory assumptions we may have made. 628 /// Returns the block containing the checks or nullptr if no checks have been 629 /// added. 630 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass); 631 632 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 633 /// vector loop preheader, middle block and scalar preheader. Also 634 /// allocate a loop object for the new vector loop and return it. 635 Loop *createVectorLoopSkeleton(StringRef Prefix); 636 637 /// Create new phi nodes for the induction variables to resume iteration count 638 /// in the scalar epilogue, from where the vectorized loop left off. 639 /// In cases where the loop skeleton is more complicated (eg. epilogue 640 /// vectorization) and the resume values can come from an additional bypass 641 /// block, the \p AdditionalBypass pair provides information about the bypass 642 /// block and the end value on the edge from bypass to this loop. 643 void createInductionResumeValues( 644 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 645 646 /// Complete the loop skeleton by adding debug MDs, creating appropriate 647 /// conditional branches in the middle block, preparing the builder and 648 /// running the verifier. Return the preheader of the completed vector loop. 649 BasicBlock *completeLoopSkeleton(MDNode *OrigLoopID); 650 651 /// Add additional metadata to \p To that was not present on \p Orig. 652 /// 653 /// Currently this is used to add the noalias annotations based on the 654 /// inserted memchecks. Use this for instructions that are *cloned* into the 655 /// vector loop. 656 void addNewMetadata(Instruction *To, const Instruction *Orig); 657 658 /// Collect poison-generating recipes that may generate a poison value that is 659 /// used after vectorization, even when their operands are not poison. Those 660 /// recipes meet the following conditions: 661 /// * Contribute to the address computation of a recipe generating a widen 662 /// memory load/store (VPWidenMemoryInstructionRecipe or 663 /// VPInterleaveRecipe). 664 /// * Such a widen memory load/store has at least one underlying Instruction 665 /// that is in a basic block that needs predication and after vectorization 666 /// the generated instruction won't be predicated. 667 void collectPoisonGeneratingRecipes(VPTransformState &State); 668 669 /// Allow subclasses to override and print debug traces before/after vplan 670 /// execution, when trace information is requested. 671 virtual void printDebugTracesAtStart(){}; 672 virtual void printDebugTracesAtEnd(){}; 673 674 /// The original loop. 675 Loop *OrigLoop; 676 677 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 678 /// dynamic knowledge to simplify SCEV expressions and converts them to a 679 /// more usable form. 680 PredicatedScalarEvolution &PSE; 681 682 /// Loop Info. 683 LoopInfo *LI; 684 685 /// Dominator Tree. 686 DominatorTree *DT; 687 688 /// Alias Analysis. 689 AAResults *AA; 690 691 /// Target Library Info. 692 const TargetLibraryInfo *TLI; 693 694 /// Target Transform Info. 695 const TargetTransformInfo *TTI; 696 697 /// Assumption Cache. 698 AssumptionCache *AC; 699 700 /// Interface to emit optimization remarks. 701 OptimizationRemarkEmitter *ORE; 702 703 /// LoopVersioning. It's only set up (non-null) if memchecks were 704 /// used. 705 /// 706 /// This is currently only used to add no-alias metadata based on the 707 /// memchecks. The actually versioning is performed manually. 708 std::unique_ptr<LoopVersioning> LVer; 709 710 /// The vectorization SIMD factor to use. Each vector will have this many 711 /// vector elements. 712 ElementCount VF; 713 714 /// The vectorization unroll factor to use. Each scalar is vectorized to this 715 /// many different vector instructions. 716 unsigned UF; 717 718 /// The builder that we use 719 IRBuilder<> Builder; 720 721 // --- Vectorization state --- 722 723 /// The vector-loop preheader. 724 BasicBlock *LoopVectorPreHeader; 725 726 /// The scalar-loop preheader. 727 BasicBlock *LoopScalarPreHeader; 728 729 /// Middle Block between the vector and the scalar. 730 BasicBlock *LoopMiddleBlock; 731 732 /// The unique ExitBlock of the scalar loop if one exists. Note that 733 /// there can be multiple exiting edges reaching this block. 734 BasicBlock *LoopExitBlock; 735 736 /// The scalar loop body. 737 BasicBlock *LoopScalarBody; 738 739 /// A list of all bypass blocks. The first block is the entry of the loop. 740 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 741 742 /// Store instructions that were predicated. 743 SmallVector<Instruction *, 4> PredicatedInstructions; 744 745 /// Trip count of the original loop. 746 Value *TripCount = nullptr; 747 748 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 749 Value *VectorTripCount = nullptr; 750 751 /// The legality analysis. 752 LoopVectorizationLegality *Legal; 753 754 /// The profitablity analysis. 755 LoopVectorizationCostModel *Cost; 756 757 // Record whether runtime checks are added. 758 bool AddedSafetyChecks = false; 759 760 // Holds the end values for each induction variable. We save the end values 761 // so we can later fix-up the external users of the induction variables. 762 DenseMap<PHINode *, Value *> IVEndValues; 763 764 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 765 // fixed up at the end of vector code generation. 766 SmallVector<PHINode *, 8> OrigPHIsToFix; 767 768 /// BFI and PSI are used to check for profile guided size optimizations. 769 BlockFrequencyInfo *BFI; 770 ProfileSummaryInfo *PSI; 771 772 // Whether this loop should be optimized for size based on profile guided size 773 // optimizatios. 774 bool OptForSizeBasedOnProfile; 775 776 /// Structure to hold information about generated runtime checks, responsible 777 /// for cleaning the checks, if vectorization turns out unprofitable. 778 GeneratedRTChecks &RTChecks; 779 780 // Holds the resume values for reductions in the loops, used to set the 781 // correct start value of reduction PHIs when vectorizing the epilogue. 782 SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4> 783 ReductionResumeValues; 784 }; 785 786 class InnerLoopUnroller : public InnerLoopVectorizer { 787 public: 788 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 789 LoopInfo *LI, DominatorTree *DT, 790 const TargetLibraryInfo *TLI, 791 const TargetTransformInfo *TTI, AssumptionCache *AC, 792 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 793 LoopVectorizationLegality *LVL, 794 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 795 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 796 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 797 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 798 BFI, PSI, Check) {} 799 800 private: 801 Value *getBroadcastInstrs(Value *V) override; 802 }; 803 804 /// Encapsulate information regarding vectorization of a loop and its epilogue. 805 /// This information is meant to be updated and used across two stages of 806 /// epilogue vectorization. 807 struct EpilogueLoopVectorizationInfo { 808 ElementCount MainLoopVF = ElementCount::getFixed(0); 809 unsigned MainLoopUF = 0; 810 ElementCount EpilogueVF = ElementCount::getFixed(0); 811 unsigned EpilogueUF = 0; 812 BasicBlock *MainLoopIterationCountCheck = nullptr; 813 BasicBlock *EpilogueIterationCountCheck = nullptr; 814 BasicBlock *SCEVSafetyCheck = nullptr; 815 BasicBlock *MemSafetyCheck = nullptr; 816 Value *TripCount = nullptr; 817 Value *VectorTripCount = nullptr; 818 819 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 820 ElementCount EVF, unsigned EUF) 821 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 822 assert(EUF == 1 && 823 "A high UF for the epilogue loop is likely not beneficial."); 824 } 825 }; 826 827 /// An extension of the inner loop vectorizer that creates a skeleton for a 828 /// vectorized loop that has its epilogue (residual) also vectorized. 829 /// The idea is to run the vplan on a given loop twice, firstly to setup the 830 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 831 /// from the first step and vectorize the epilogue. This is achieved by 832 /// deriving two concrete strategy classes from this base class and invoking 833 /// them in succession from the loop vectorizer planner. 834 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 835 public: 836 InnerLoopAndEpilogueVectorizer( 837 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 838 DominatorTree *DT, const TargetLibraryInfo *TLI, 839 const TargetTransformInfo *TTI, AssumptionCache *AC, 840 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 841 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 842 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 843 GeneratedRTChecks &Checks) 844 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 845 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 846 Checks), 847 EPI(EPI) {} 848 849 // Override this function to handle the more complex control flow around the 850 // three loops. 851 std::pair<BasicBlock *, Value *> 852 createVectorizedLoopSkeleton() final override { 853 return createEpilogueVectorizedLoopSkeleton(); 854 } 855 856 /// The interface for creating a vectorized skeleton using one of two 857 /// different strategies, each corresponding to one execution of the vplan 858 /// as described above. 859 virtual std::pair<BasicBlock *, Value *> 860 createEpilogueVectorizedLoopSkeleton() = 0; 861 862 /// Holds and updates state information required to vectorize the main loop 863 /// and its epilogue in two separate passes. This setup helps us avoid 864 /// regenerating and recomputing runtime safety checks. It also helps us to 865 /// shorten the iteration-count-check path length for the cases where the 866 /// iteration count of the loop is so small that the main vector loop is 867 /// completely skipped. 868 EpilogueLoopVectorizationInfo &EPI; 869 }; 870 871 /// A specialized derived class of inner loop vectorizer that performs 872 /// vectorization of *main* loops in the process of vectorizing loops and their 873 /// epilogues. 874 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 875 public: 876 EpilogueVectorizerMainLoop( 877 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 878 DominatorTree *DT, const TargetLibraryInfo *TLI, 879 const TargetTransformInfo *TTI, AssumptionCache *AC, 880 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 881 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 882 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 883 GeneratedRTChecks &Check) 884 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 885 EPI, LVL, CM, BFI, PSI, Check) {} 886 /// Implements the interface for creating a vectorized skeleton using the 887 /// *main loop* strategy (ie the first pass of vplan execution). 888 std::pair<BasicBlock *, Value *> 889 createEpilogueVectorizedLoopSkeleton() final override; 890 891 protected: 892 /// Emits an iteration count bypass check once for the main loop (when \p 893 /// ForEpilogue is false) and once for the epilogue loop (when \p 894 /// ForEpilogue is true). 895 BasicBlock *emitMinimumIterationCountCheck(BasicBlock *Bypass, 896 bool ForEpilogue); 897 void printDebugTracesAtStart() override; 898 void printDebugTracesAtEnd() override; 899 }; 900 901 // A specialized derived class of inner loop vectorizer that performs 902 // vectorization of *epilogue* loops in the process of vectorizing loops and 903 // their epilogues. 904 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 905 public: 906 EpilogueVectorizerEpilogueLoop( 907 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 908 DominatorTree *DT, const TargetLibraryInfo *TLI, 909 const TargetTransformInfo *TTI, AssumptionCache *AC, 910 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 911 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 912 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 913 GeneratedRTChecks &Checks) 914 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 915 EPI, LVL, CM, BFI, PSI, Checks) {} 916 /// Implements the interface for creating a vectorized skeleton using the 917 /// *epilogue loop* strategy (ie the second pass of vplan execution). 918 std::pair<BasicBlock *, Value *> 919 createEpilogueVectorizedLoopSkeleton() final override; 920 921 protected: 922 /// Emits an iteration count bypass check after the main vector loop has 923 /// finished to see if there are any iterations left to execute by either 924 /// the vector epilogue or the scalar epilogue. 925 BasicBlock *emitMinimumVectorEpilogueIterCountCheck( 926 BasicBlock *Bypass, 927 BasicBlock *Insert); 928 void printDebugTracesAtStart() override; 929 void printDebugTracesAtEnd() override; 930 }; 931 } // end namespace llvm 932 933 /// Look for a meaningful debug location on the instruction or it's 934 /// operands. 935 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 936 if (!I) 937 return I; 938 939 DebugLoc Empty; 940 if (I->getDebugLoc() != Empty) 941 return I; 942 943 for (Use &Op : I->operands()) { 944 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 945 if (OpInst->getDebugLoc() != Empty) 946 return OpInst; 947 } 948 949 return I; 950 } 951 952 void InnerLoopVectorizer::setDebugLocFromInst( 953 const Value *V, Optional<IRBuilderBase *> CustomBuilder) { 954 IRBuilderBase *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 955 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 956 const DILocation *DIL = Inst->getDebugLoc(); 957 958 // When a FSDiscriminator is enabled, we don't need to add the multiply 959 // factors to the discriminators. 960 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 961 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 962 // FIXME: For scalable vectors, assume vscale=1. 963 auto NewDIL = 964 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 965 if (NewDIL) 966 B->SetCurrentDebugLocation(NewDIL.getValue()); 967 else 968 LLVM_DEBUG(dbgs() 969 << "Failed to create new discriminator: " 970 << DIL->getFilename() << " Line: " << DIL->getLine()); 971 } else 972 B->SetCurrentDebugLocation(DIL); 973 } else 974 B->SetCurrentDebugLocation(DebugLoc()); 975 } 976 977 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 978 /// is passed, the message relates to that particular instruction. 979 #ifndef NDEBUG 980 static void debugVectorizationMessage(const StringRef Prefix, 981 const StringRef DebugMsg, 982 Instruction *I) { 983 dbgs() << "LV: " << Prefix << DebugMsg; 984 if (I != nullptr) 985 dbgs() << " " << *I; 986 else 987 dbgs() << '.'; 988 dbgs() << '\n'; 989 } 990 #endif 991 992 /// Create an analysis remark that explains why vectorization failed 993 /// 994 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 995 /// RemarkName is the identifier for the remark. If \p I is passed it is an 996 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 997 /// the location of the remark. \return the remark object that can be 998 /// streamed to. 999 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1000 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1001 Value *CodeRegion = TheLoop->getHeader(); 1002 DebugLoc DL = TheLoop->getStartLoc(); 1003 1004 if (I) { 1005 CodeRegion = I->getParent(); 1006 // If there is no debug location attached to the instruction, revert back to 1007 // using the loop's. 1008 if (I->getDebugLoc()) 1009 DL = I->getDebugLoc(); 1010 } 1011 1012 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1013 } 1014 1015 namespace llvm { 1016 1017 /// Return a value for Step multiplied by VF. 1018 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, 1019 int64_t Step) { 1020 assert(Ty->isIntegerTy() && "Expected an integer step"); 1021 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 1022 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1023 } 1024 1025 /// Return the runtime value for VF. 1026 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) { 1027 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1028 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1029 } 1030 1031 static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy, 1032 ElementCount VF) { 1033 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 1034 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 1035 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 1036 return B.CreateUIToFP(RuntimeVF, FTy); 1037 } 1038 1039 void reportVectorizationFailure(const StringRef DebugMsg, 1040 const StringRef OREMsg, const StringRef ORETag, 1041 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1042 Instruction *I) { 1043 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1044 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1045 ORE->emit( 1046 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1047 << "loop not vectorized: " << OREMsg); 1048 } 1049 1050 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1051 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1052 Instruction *I) { 1053 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1054 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1055 ORE->emit( 1056 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1057 << Msg); 1058 } 1059 1060 } // end namespace llvm 1061 1062 #ifndef NDEBUG 1063 /// \return string containing a file name and a line # for the given loop. 1064 static std::string getDebugLocString(const Loop *L) { 1065 std::string Result; 1066 if (L) { 1067 raw_string_ostream OS(Result); 1068 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1069 LoopDbgLoc.print(OS); 1070 else 1071 // Just print the module name. 1072 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1073 OS.flush(); 1074 } 1075 return Result; 1076 } 1077 #endif 1078 1079 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1080 const Instruction *Orig) { 1081 // If the loop was versioned with memchecks, add the corresponding no-alias 1082 // metadata. 1083 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1084 LVer->annotateInstWithNoAlias(To, Orig); 1085 } 1086 1087 void InnerLoopVectorizer::collectPoisonGeneratingRecipes( 1088 VPTransformState &State) { 1089 1090 // Collect recipes in the backward slice of `Root` that may generate a poison 1091 // value that is used after vectorization. 1092 SmallPtrSet<VPRecipeBase *, 16> Visited; 1093 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1094 SmallVector<VPRecipeBase *, 16> Worklist; 1095 Worklist.push_back(Root); 1096 1097 // Traverse the backward slice of Root through its use-def chain. 1098 while (!Worklist.empty()) { 1099 VPRecipeBase *CurRec = Worklist.back(); 1100 Worklist.pop_back(); 1101 1102 if (!Visited.insert(CurRec).second) 1103 continue; 1104 1105 // Prune search if we find another recipe generating a widen memory 1106 // instruction. Widen memory instructions involved in address computation 1107 // will lead to gather/scatter instructions, which don't need to be 1108 // handled. 1109 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) || 1110 isa<VPInterleaveRecipe>(CurRec) || 1111 isa<VPScalarIVStepsRecipe>(CurRec) || 1112 isa<VPCanonicalIVPHIRecipe>(CurRec)) 1113 continue; 1114 1115 // This recipe contributes to the address computation of a widen 1116 // load/store. Collect recipe if its underlying instruction has 1117 // poison-generating flags. 1118 Instruction *Instr = CurRec->getUnderlyingInstr(); 1119 if (Instr && Instr->hasPoisonGeneratingFlags()) 1120 State.MayGeneratePoisonRecipes.insert(CurRec); 1121 1122 // Add new definitions to the worklist. 1123 for (VPValue *operand : CurRec->operands()) 1124 if (VPDef *OpDef = operand->getDef()) 1125 Worklist.push_back(cast<VPRecipeBase>(OpDef)); 1126 } 1127 }); 1128 1129 // Traverse all the recipes in the VPlan and collect the poison-generating 1130 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1131 // VPInterleaveRecipe. 1132 auto Iter = depth_first( 1133 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry())); 1134 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1135 for (VPRecipeBase &Recipe : *VPBB) { 1136 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) { 1137 Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr(); 1138 VPDef *AddrDef = WidenRec->getAddr()->getDef(); 1139 if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr && 1140 Legal->blockNeedsPredication(UnderlyingInstr->getParent())) 1141 collectPoisonGeneratingInstrsInBackwardSlice( 1142 cast<VPRecipeBase>(AddrDef)); 1143 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1144 VPDef *AddrDef = InterleaveRec->getAddr()->getDef(); 1145 if (AddrDef) { 1146 // Check if any member of the interleave group needs predication. 1147 const InterleaveGroup<Instruction> *InterGroup = 1148 InterleaveRec->getInterleaveGroup(); 1149 bool NeedPredication = false; 1150 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1151 I < NumMembers; ++I) { 1152 Instruction *Member = InterGroup->getMember(I); 1153 if (Member) 1154 NeedPredication |= 1155 Legal->blockNeedsPredication(Member->getParent()); 1156 } 1157 1158 if (NeedPredication) 1159 collectPoisonGeneratingInstrsInBackwardSlice( 1160 cast<VPRecipeBase>(AddrDef)); 1161 } 1162 } 1163 } 1164 } 1165 } 1166 1167 void InnerLoopVectorizer::addMetadata(Instruction *To, 1168 Instruction *From) { 1169 propagateMetadata(To, From); 1170 addNewMetadata(To, From); 1171 } 1172 1173 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1174 Instruction *From) { 1175 for (Value *V : To) { 1176 if (Instruction *I = dyn_cast<Instruction>(V)) 1177 addMetadata(I, From); 1178 } 1179 } 1180 1181 PHINode *InnerLoopVectorizer::getReductionResumeValue( 1182 const RecurrenceDescriptor &RdxDesc) { 1183 auto It = ReductionResumeValues.find(&RdxDesc); 1184 assert(It != ReductionResumeValues.end() && 1185 "Expected to find a resume value for the reduction."); 1186 return It->second; 1187 } 1188 1189 namespace llvm { 1190 1191 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1192 // lowered. 1193 enum ScalarEpilogueLowering { 1194 1195 // The default: allowing scalar epilogues. 1196 CM_ScalarEpilogueAllowed, 1197 1198 // Vectorization with OptForSize: don't allow epilogues. 1199 CM_ScalarEpilogueNotAllowedOptSize, 1200 1201 // A special case of vectorisation with OptForSize: loops with a very small 1202 // trip count are considered for vectorization under OptForSize, thereby 1203 // making sure the cost of their loop body is dominant, free of runtime 1204 // guards and scalar iteration overheads. 1205 CM_ScalarEpilogueNotAllowedLowTripLoop, 1206 1207 // Loop hint predicate indicating an epilogue is undesired. 1208 CM_ScalarEpilogueNotNeededUsePredicate, 1209 1210 // Directive indicating we must either tail fold or not vectorize 1211 CM_ScalarEpilogueNotAllowedUsePredicate 1212 }; 1213 1214 /// ElementCountComparator creates a total ordering for ElementCount 1215 /// for the purposes of using it in a set structure. 1216 struct ElementCountComparator { 1217 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1218 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1219 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1220 } 1221 }; 1222 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1223 1224 /// LoopVectorizationCostModel - estimates the expected speedups due to 1225 /// vectorization. 1226 /// In many cases vectorization is not profitable. This can happen because of 1227 /// a number of reasons. In this class we mainly attempt to predict the 1228 /// expected speedup/slowdowns due to the supported instruction set. We use the 1229 /// TargetTransformInfo to query the different backends for the cost of 1230 /// different operations. 1231 class LoopVectorizationCostModel { 1232 public: 1233 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1234 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1235 LoopVectorizationLegality *Legal, 1236 const TargetTransformInfo &TTI, 1237 const TargetLibraryInfo *TLI, DemandedBits *DB, 1238 AssumptionCache *AC, 1239 OptimizationRemarkEmitter *ORE, const Function *F, 1240 const LoopVectorizeHints *Hints, 1241 InterleavedAccessInfo &IAI) 1242 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1243 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1244 Hints(Hints), InterleaveInfo(IAI) {} 1245 1246 /// \return An upper bound for the vectorization factors (both fixed and 1247 /// scalable). If the factors are 0, vectorization and interleaving should be 1248 /// avoided up front. 1249 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1250 1251 /// \return True if runtime checks are required for vectorization, and false 1252 /// otherwise. 1253 bool runtimeChecksRequired(); 1254 1255 /// \return The most profitable vectorization factor and the cost of that VF. 1256 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1257 /// then this vectorization factor will be selected if vectorization is 1258 /// possible. 1259 VectorizationFactor 1260 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1261 1262 VectorizationFactor 1263 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1264 const LoopVectorizationPlanner &LVP); 1265 1266 /// Setup cost-based decisions for user vectorization factor. 1267 /// \return true if the UserVF is a feasible VF to be chosen. 1268 bool selectUserVectorizationFactor(ElementCount UserVF) { 1269 collectUniformsAndScalars(UserVF); 1270 collectInstsToScalarize(UserVF); 1271 return expectedCost(UserVF).first.isValid(); 1272 } 1273 1274 /// \return The size (in bits) of the smallest and widest types in the code 1275 /// that needs to be vectorized. We ignore values that remain scalar such as 1276 /// 64 bit loop indices. 1277 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1278 1279 /// \return The desired interleave count. 1280 /// If interleave count has been specified by metadata it will be returned. 1281 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1282 /// are the selected vectorization factor and the cost of the selected VF. 1283 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1284 1285 /// Memory access instruction may be vectorized in more than one way. 1286 /// Form of instruction after vectorization depends on cost. 1287 /// This function takes cost-based decisions for Load/Store instructions 1288 /// and collects them in a map. This decisions map is used for building 1289 /// the lists of loop-uniform and loop-scalar instructions. 1290 /// The calculated cost is saved with widening decision in order to 1291 /// avoid redundant calculations. 1292 void setCostBasedWideningDecision(ElementCount VF); 1293 1294 /// A struct that represents some properties of the register usage 1295 /// of a loop. 1296 struct RegisterUsage { 1297 /// Holds the number of loop invariant values that are used in the loop. 1298 /// The key is ClassID of target-provided register class. 1299 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1300 /// Holds the maximum number of concurrent live intervals in the loop. 1301 /// The key is ClassID of target-provided register class. 1302 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1303 }; 1304 1305 /// \return Returns information about the register usages of the loop for the 1306 /// given vectorization factors. 1307 SmallVector<RegisterUsage, 8> 1308 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1309 1310 /// Collect values we want to ignore in the cost model. 1311 void collectValuesToIgnore(); 1312 1313 /// Collect all element types in the loop for which widening is needed. 1314 void collectElementTypesForWidening(); 1315 1316 /// Split reductions into those that happen in the loop, and those that happen 1317 /// outside. In loop reductions are collected into InLoopReductionChains. 1318 void collectInLoopReductions(); 1319 1320 /// Returns true if we should use strict in-order reductions for the given 1321 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1322 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1323 /// of FP operations. 1324 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1325 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1326 } 1327 1328 /// \returns The smallest bitwidth each instruction can be represented with. 1329 /// The vector equivalents of these instructions should be truncated to this 1330 /// type. 1331 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1332 return MinBWs; 1333 } 1334 1335 /// \returns True if it is more profitable to scalarize instruction \p I for 1336 /// vectorization factor \p VF. 1337 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1338 assert(VF.isVector() && 1339 "Profitable to scalarize relevant only for VF > 1."); 1340 1341 // Cost model is not run in the VPlan-native path - return conservative 1342 // result until this changes. 1343 if (EnableVPlanNativePath) 1344 return false; 1345 1346 auto Scalars = InstsToScalarize.find(VF); 1347 assert(Scalars != InstsToScalarize.end() && 1348 "VF not yet analyzed for scalarization profitability"); 1349 return Scalars->second.find(I) != Scalars->second.end(); 1350 } 1351 1352 /// Returns true if \p I is known to be uniform after vectorization. 1353 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1354 if (VF.isScalar()) 1355 return true; 1356 1357 // Cost model is not run in the VPlan-native path - return conservative 1358 // result until this changes. 1359 if (EnableVPlanNativePath) 1360 return false; 1361 1362 auto UniformsPerVF = Uniforms.find(VF); 1363 assert(UniformsPerVF != Uniforms.end() && 1364 "VF not yet analyzed for uniformity"); 1365 return UniformsPerVF->second.count(I); 1366 } 1367 1368 /// Returns true if \p I is known to be scalar after vectorization. 1369 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1370 if (VF.isScalar()) 1371 return true; 1372 1373 // Cost model is not run in the VPlan-native path - return conservative 1374 // result until this changes. 1375 if (EnableVPlanNativePath) 1376 return false; 1377 1378 auto ScalarsPerVF = Scalars.find(VF); 1379 assert(ScalarsPerVF != Scalars.end() && 1380 "Scalar values are not calculated for VF"); 1381 return ScalarsPerVF->second.count(I); 1382 } 1383 1384 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1385 /// for vectorization factor \p VF. 1386 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1387 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1388 !isProfitableToScalarize(I, VF) && 1389 !isScalarAfterVectorization(I, VF); 1390 } 1391 1392 /// Decision that was taken during cost calculation for memory instruction. 1393 enum InstWidening { 1394 CM_Unknown, 1395 CM_Widen, // For consecutive accesses with stride +1. 1396 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1397 CM_Interleave, 1398 CM_GatherScatter, 1399 CM_Scalarize 1400 }; 1401 1402 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1403 /// instruction \p I and vector width \p VF. 1404 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1405 InstructionCost Cost) { 1406 assert(VF.isVector() && "Expected VF >=2"); 1407 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1408 } 1409 1410 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1411 /// interleaving group \p Grp and vector width \p VF. 1412 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1413 ElementCount VF, InstWidening W, 1414 InstructionCost Cost) { 1415 assert(VF.isVector() && "Expected VF >=2"); 1416 /// Broadcast this decicion to all instructions inside the group. 1417 /// But the cost will be assigned to one instruction only. 1418 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1419 if (auto *I = Grp->getMember(i)) { 1420 if (Grp->getInsertPos() == I) 1421 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1422 else 1423 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1424 } 1425 } 1426 } 1427 1428 /// Return the cost model decision for the given instruction \p I and vector 1429 /// width \p VF. Return CM_Unknown if this instruction did not pass 1430 /// through the cost modeling. 1431 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1432 assert(VF.isVector() && "Expected VF to be a vector VF"); 1433 // Cost model is not run in the VPlan-native path - return conservative 1434 // result until this changes. 1435 if (EnableVPlanNativePath) 1436 return CM_GatherScatter; 1437 1438 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1439 auto Itr = WideningDecisions.find(InstOnVF); 1440 if (Itr == WideningDecisions.end()) 1441 return CM_Unknown; 1442 return Itr->second.first; 1443 } 1444 1445 /// Return the vectorization cost for the given instruction \p I and vector 1446 /// width \p VF. 1447 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1448 assert(VF.isVector() && "Expected VF >=2"); 1449 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1450 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1451 "The cost is not calculated"); 1452 return WideningDecisions[InstOnVF].second; 1453 } 1454 1455 /// Return True if instruction \p I is an optimizable truncate whose operand 1456 /// is an induction variable. Such a truncate will be removed by adding a new 1457 /// induction variable with the destination type. 1458 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1459 // If the instruction is not a truncate, return false. 1460 auto *Trunc = dyn_cast<TruncInst>(I); 1461 if (!Trunc) 1462 return false; 1463 1464 // Get the source and destination types of the truncate. 1465 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1466 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1467 1468 // If the truncate is free for the given types, return false. Replacing a 1469 // free truncate with an induction variable would add an induction variable 1470 // update instruction to each iteration of the loop. We exclude from this 1471 // check the primary induction variable since it will need an update 1472 // instruction regardless. 1473 Value *Op = Trunc->getOperand(0); 1474 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1475 return false; 1476 1477 // If the truncated value is not an induction variable, return false. 1478 return Legal->isInductionPhi(Op); 1479 } 1480 1481 /// Collects the instructions to scalarize for each predicated instruction in 1482 /// the loop. 1483 void collectInstsToScalarize(ElementCount VF); 1484 1485 /// Collect Uniform and Scalar values for the given \p VF. 1486 /// The sets depend on CM decision for Load/Store instructions 1487 /// that may be vectorized as interleave, gather-scatter or scalarized. 1488 void collectUniformsAndScalars(ElementCount VF) { 1489 // Do the analysis once. 1490 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1491 return; 1492 setCostBasedWideningDecision(VF); 1493 collectLoopUniforms(VF); 1494 collectLoopScalars(VF); 1495 } 1496 1497 /// Returns true if the target machine supports masked store operation 1498 /// for the given \p DataType and kind of access to \p Ptr. 1499 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1500 return Legal->isConsecutivePtr(DataType, Ptr) && 1501 TTI.isLegalMaskedStore(DataType, Alignment); 1502 } 1503 1504 /// Returns true if the target machine supports masked load operation 1505 /// for the given \p DataType and kind of access to \p Ptr. 1506 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1507 return Legal->isConsecutivePtr(DataType, Ptr) && 1508 TTI.isLegalMaskedLoad(DataType, Alignment); 1509 } 1510 1511 /// Returns true if the target machine can represent \p V as a masked gather 1512 /// or scatter operation. 1513 bool isLegalGatherOrScatter(Value *V, 1514 ElementCount VF = ElementCount::getFixed(1)) { 1515 bool LI = isa<LoadInst>(V); 1516 bool SI = isa<StoreInst>(V); 1517 if (!LI && !SI) 1518 return false; 1519 auto *Ty = getLoadStoreType(V); 1520 Align Align = getLoadStoreAlignment(V); 1521 if (VF.isVector()) 1522 Ty = VectorType::get(Ty, VF); 1523 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1524 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1525 } 1526 1527 /// Returns true if the target machine supports all of the reduction 1528 /// variables found for the given VF. 1529 bool canVectorizeReductions(ElementCount VF) const { 1530 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1531 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1532 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1533 })); 1534 } 1535 1536 /// Returns true if \p I is an instruction that will be scalarized with 1537 /// predication when vectorizing \p I with vectorization factor \p VF. Such 1538 /// instructions include conditional stores and instructions that may divide 1539 /// by zero. 1540 bool isScalarWithPredication(Instruction *I, ElementCount VF) const; 1541 1542 // Returns true if \p I is an instruction that will be predicated either 1543 // through scalar predication or masked load/store or masked gather/scatter. 1544 // \p VF is the vectorization factor that will be used to vectorize \p I. 1545 // Superset of instructions that return true for isScalarWithPredication. 1546 bool isPredicatedInst(Instruction *I, ElementCount VF, 1547 bool IsKnownUniform = false) { 1548 // When we know the load is uniform and the original scalar loop was not 1549 // predicated we don't need to mark it as a predicated instruction. Any 1550 // vectorised blocks created when tail-folding are something artificial we 1551 // have introduced and we know there is always at least one active lane. 1552 // That's why we call Legal->blockNeedsPredication here because it doesn't 1553 // query tail-folding. 1554 if (IsKnownUniform && isa<LoadInst>(I) && 1555 !Legal->blockNeedsPredication(I->getParent())) 1556 return false; 1557 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1558 return false; 1559 // Loads and stores that need some form of masked operation are predicated 1560 // instructions. 1561 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1562 return Legal->isMaskRequired(I); 1563 return isScalarWithPredication(I, VF); 1564 } 1565 1566 /// Returns true if \p I is a memory instruction with consecutive memory 1567 /// access that can be widened. 1568 bool 1569 memoryInstructionCanBeWidened(Instruction *I, 1570 ElementCount VF = ElementCount::getFixed(1)); 1571 1572 /// Returns true if \p I is a memory instruction in an interleaved-group 1573 /// of memory accesses that can be vectorized with wide vector loads/stores 1574 /// and shuffles. 1575 bool 1576 interleavedAccessCanBeWidened(Instruction *I, 1577 ElementCount VF = ElementCount::getFixed(1)); 1578 1579 /// Check if \p Instr belongs to any interleaved access group. 1580 bool isAccessInterleaved(Instruction *Instr) { 1581 return InterleaveInfo.isInterleaved(Instr); 1582 } 1583 1584 /// Get the interleaved access group that \p Instr belongs to. 1585 const InterleaveGroup<Instruction> * 1586 getInterleavedAccessGroup(Instruction *Instr) { 1587 return InterleaveInfo.getInterleaveGroup(Instr); 1588 } 1589 1590 /// Returns true if we're required to use a scalar epilogue for at least 1591 /// the final iteration of the original loop. 1592 bool requiresScalarEpilogue(ElementCount VF) const { 1593 if (!isScalarEpilogueAllowed()) 1594 return false; 1595 // If we might exit from anywhere but the latch, must run the exiting 1596 // iteration in scalar form. 1597 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1598 return true; 1599 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1600 } 1601 1602 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1603 /// loop hint annotation. 1604 bool isScalarEpilogueAllowed() const { 1605 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1606 } 1607 1608 /// Returns true if all loop blocks should be masked to fold tail loop. 1609 bool foldTailByMasking() const { return FoldTailByMasking; } 1610 1611 /// Returns true if the instructions in this block requires predication 1612 /// for any reason, e.g. because tail folding now requires a predicate 1613 /// or because the block in the original loop was predicated. 1614 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1615 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1616 } 1617 1618 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1619 /// nodes to the chain of instructions representing the reductions. Uses a 1620 /// MapVector to ensure deterministic iteration order. 1621 using ReductionChainMap = 1622 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1623 1624 /// Return the chain of instructions representing an inloop reduction. 1625 const ReductionChainMap &getInLoopReductionChains() const { 1626 return InLoopReductionChains; 1627 } 1628 1629 /// Returns true if the Phi is part of an inloop reduction. 1630 bool isInLoopReduction(PHINode *Phi) const { 1631 return InLoopReductionChains.count(Phi); 1632 } 1633 1634 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1635 /// with factor VF. Return the cost of the instruction, including 1636 /// scalarization overhead if it's needed. 1637 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1638 1639 /// Estimate cost of a call instruction CI if it were vectorized with factor 1640 /// VF. Return the cost of the instruction, including scalarization overhead 1641 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1642 /// scalarized - 1643 /// i.e. either vector version isn't available, or is too expensive. 1644 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1645 bool &NeedToScalarize) const; 1646 1647 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1648 /// that of B. 1649 bool isMoreProfitable(const VectorizationFactor &A, 1650 const VectorizationFactor &B) const; 1651 1652 /// Invalidates decisions already taken by the cost model. 1653 void invalidateCostModelingDecisions() { 1654 WideningDecisions.clear(); 1655 Uniforms.clear(); 1656 Scalars.clear(); 1657 } 1658 1659 private: 1660 unsigned NumPredStores = 0; 1661 1662 /// Convenience function that returns the value of vscale_range iff 1663 /// vscale_range.min == vscale_range.max or otherwise returns the value 1664 /// returned by the corresponding TLI method. 1665 Optional<unsigned> getVScaleForTuning() const; 1666 1667 /// \return An upper bound for the vectorization factors for both 1668 /// fixed and scalable vectorization, where the minimum-known number of 1669 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1670 /// disabled or unsupported, then the scalable part will be equal to 1671 /// ElementCount::getScalable(0). 1672 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1673 ElementCount UserVF, 1674 bool FoldTailByMasking); 1675 1676 /// \return the maximized element count based on the targets vector 1677 /// registers and the loop trip-count, but limited to a maximum safe VF. 1678 /// This is a helper function of computeFeasibleMaxVF. 1679 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1680 /// issue that occurred on one of the buildbots which cannot be reproduced 1681 /// without having access to the properietary compiler (see comments on 1682 /// D98509). The issue is currently under investigation and this workaround 1683 /// will be removed as soon as possible. 1684 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1685 unsigned SmallestType, 1686 unsigned WidestType, 1687 const ElementCount &MaxSafeVF, 1688 bool FoldTailByMasking); 1689 1690 /// \return the maximum legal scalable VF, based on the safe max number 1691 /// of elements. 1692 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1693 1694 /// The vectorization cost is a combination of the cost itself and a boolean 1695 /// indicating whether any of the contributing operations will actually 1696 /// operate on vector values after type legalization in the backend. If this 1697 /// latter value is false, then all operations will be scalarized (i.e. no 1698 /// vectorization has actually taken place). 1699 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1700 1701 /// Returns the expected execution cost. The unit of the cost does 1702 /// not matter because we use the 'cost' units to compare different 1703 /// vector widths. The cost that is returned is *not* normalized by 1704 /// the factor width. If \p Invalid is not nullptr, this function 1705 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1706 /// each instruction that has an Invalid cost for the given VF. 1707 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1708 VectorizationCostTy 1709 expectedCost(ElementCount VF, 1710 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1711 1712 /// Returns the execution time cost of an instruction for a given vector 1713 /// width. Vector width of one means scalar. 1714 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1715 1716 /// The cost-computation logic from getInstructionCost which provides 1717 /// the vector type as an output parameter. 1718 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1719 Type *&VectorTy); 1720 1721 /// Return the cost of instructions in an inloop reduction pattern, if I is 1722 /// part of that pattern. 1723 Optional<InstructionCost> 1724 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1725 TTI::TargetCostKind CostKind); 1726 1727 /// Calculate vectorization cost of memory instruction \p I. 1728 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1729 1730 /// The cost computation for scalarized memory instruction. 1731 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1732 1733 /// The cost computation for interleaving group of memory instructions. 1734 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1735 1736 /// The cost computation for Gather/Scatter instruction. 1737 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1738 1739 /// The cost computation for widening instruction \p I with consecutive 1740 /// memory access. 1741 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1742 1743 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1744 /// Load: scalar load + broadcast. 1745 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1746 /// element) 1747 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1748 1749 /// Estimate the overhead of scalarizing an instruction. This is a 1750 /// convenience wrapper for the type-based getScalarizationOverhead API. 1751 InstructionCost getScalarizationOverhead(Instruction *I, 1752 ElementCount VF) const; 1753 1754 /// Returns whether the instruction is a load or store and will be a emitted 1755 /// as a vector operation. 1756 bool isConsecutiveLoadOrStore(Instruction *I); 1757 1758 /// Returns true if an artificially high cost for emulated masked memrefs 1759 /// should be used. 1760 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF); 1761 1762 /// Map of scalar integer values to the smallest bitwidth they can be legally 1763 /// represented as. The vector equivalents of these values should be truncated 1764 /// to this type. 1765 MapVector<Instruction *, uint64_t> MinBWs; 1766 1767 /// A type representing the costs for instructions if they were to be 1768 /// scalarized rather than vectorized. The entries are Instruction-Cost 1769 /// pairs. 1770 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1771 1772 /// A set containing all BasicBlocks that are known to present after 1773 /// vectorization as a predicated block. 1774 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1775 1776 /// Records whether it is allowed to have the original scalar loop execute at 1777 /// least once. This may be needed as a fallback loop in case runtime 1778 /// aliasing/dependence checks fail, or to handle the tail/remainder 1779 /// iterations when the trip count is unknown or doesn't divide by the VF, 1780 /// or as a peel-loop to handle gaps in interleave-groups. 1781 /// Under optsize and when the trip count is very small we don't allow any 1782 /// iterations to execute in the scalar loop. 1783 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1784 1785 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1786 bool FoldTailByMasking = false; 1787 1788 /// A map holding scalar costs for different vectorization factors. The 1789 /// presence of a cost for an instruction in the mapping indicates that the 1790 /// instruction will be scalarized when vectorizing with the associated 1791 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1792 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1793 1794 /// Holds the instructions known to be uniform after vectorization. 1795 /// The data is collected per VF. 1796 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1797 1798 /// Holds the instructions known to be scalar after vectorization. 1799 /// The data is collected per VF. 1800 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1801 1802 /// Holds the instructions (address computations) that are forced to be 1803 /// scalarized. 1804 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1805 1806 /// PHINodes of the reductions that should be expanded in-loop along with 1807 /// their associated chains of reduction operations, in program order from top 1808 /// (PHI) to bottom 1809 ReductionChainMap InLoopReductionChains; 1810 1811 /// A Map of inloop reduction operations and their immediate chain operand. 1812 /// FIXME: This can be removed once reductions can be costed correctly in 1813 /// vplan. This was added to allow quick lookup to the inloop operations, 1814 /// without having to loop through InLoopReductionChains. 1815 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1816 1817 /// Returns the expected difference in cost from scalarizing the expression 1818 /// feeding a predicated instruction \p PredInst. The instructions to 1819 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1820 /// non-negative return value implies the expression will be scalarized. 1821 /// Currently, only single-use chains are considered for scalarization. 1822 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1823 ElementCount VF); 1824 1825 /// Collect the instructions that are uniform after vectorization. An 1826 /// instruction is uniform if we represent it with a single scalar value in 1827 /// the vectorized loop corresponding to each vector iteration. Examples of 1828 /// uniform instructions include pointer operands of consecutive or 1829 /// interleaved memory accesses. Note that although uniformity implies an 1830 /// instruction will be scalar, the reverse is not true. In general, a 1831 /// scalarized instruction will be represented by VF scalar values in the 1832 /// vectorized loop, each corresponding to an iteration of the original 1833 /// scalar loop. 1834 void collectLoopUniforms(ElementCount VF); 1835 1836 /// Collect the instructions that are scalar after vectorization. An 1837 /// instruction is scalar if it is known to be uniform or will be scalarized 1838 /// during vectorization. collectLoopScalars should only add non-uniform nodes 1839 /// to the list if they are used by a load/store instruction that is marked as 1840 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by 1841 /// VF values in the vectorized loop, each corresponding to an iteration of 1842 /// the original scalar loop. 1843 void collectLoopScalars(ElementCount VF); 1844 1845 /// Keeps cost model vectorization decision and cost for instructions. 1846 /// Right now it is used for memory instructions only. 1847 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1848 std::pair<InstWidening, InstructionCost>>; 1849 1850 DecisionList WideningDecisions; 1851 1852 /// Returns true if \p V is expected to be vectorized and it needs to be 1853 /// extracted. 1854 bool needsExtract(Value *V, ElementCount VF) const { 1855 Instruction *I = dyn_cast<Instruction>(V); 1856 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1857 TheLoop->isLoopInvariant(I)) 1858 return false; 1859 1860 // Assume we can vectorize V (and hence we need extraction) if the 1861 // scalars are not computed yet. This can happen, because it is called 1862 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1863 // the scalars are collected. That should be a safe assumption in most 1864 // cases, because we check if the operands have vectorizable types 1865 // beforehand in LoopVectorizationLegality. 1866 return Scalars.find(VF) == Scalars.end() || 1867 !isScalarAfterVectorization(I, VF); 1868 }; 1869 1870 /// Returns a range containing only operands needing to be extracted. 1871 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1872 ElementCount VF) const { 1873 return SmallVector<Value *, 4>(make_filter_range( 1874 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1875 } 1876 1877 /// Determines if we have the infrastructure to vectorize loop \p L and its 1878 /// epilogue, assuming the main loop is vectorized by \p VF. 1879 bool isCandidateForEpilogueVectorization(const Loop &L, 1880 const ElementCount VF) const; 1881 1882 /// Returns true if epilogue vectorization is considered profitable, and 1883 /// false otherwise. 1884 /// \p VF is the vectorization factor chosen for the original loop. 1885 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1886 1887 public: 1888 /// The loop that we evaluate. 1889 Loop *TheLoop; 1890 1891 /// Predicated scalar evolution analysis. 1892 PredicatedScalarEvolution &PSE; 1893 1894 /// Loop Info analysis. 1895 LoopInfo *LI; 1896 1897 /// Vectorization legality. 1898 LoopVectorizationLegality *Legal; 1899 1900 /// Vector target information. 1901 const TargetTransformInfo &TTI; 1902 1903 /// Target Library Info. 1904 const TargetLibraryInfo *TLI; 1905 1906 /// Demanded bits analysis. 1907 DemandedBits *DB; 1908 1909 /// Assumption cache. 1910 AssumptionCache *AC; 1911 1912 /// Interface to emit optimization remarks. 1913 OptimizationRemarkEmitter *ORE; 1914 1915 const Function *TheFunction; 1916 1917 /// Loop Vectorize Hint. 1918 const LoopVectorizeHints *Hints; 1919 1920 /// The interleave access information contains groups of interleaved accesses 1921 /// with the same stride and close to each other. 1922 InterleavedAccessInfo &InterleaveInfo; 1923 1924 /// Values to ignore in the cost model. 1925 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1926 1927 /// Values to ignore in the cost model when VF > 1. 1928 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1929 1930 /// All element types found in the loop. 1931 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1932 1933 /// Profitable vector factors. 1934 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1935 }; 1936 } // end namespace llvm 1937 1938 /// Helper struct to manage generating runtime checks for vectorization. 1939 /// 1940 /// The runtime checks are created up-front in temporary blocks to allow better 1941 /// estimating the cost and un-linked from the existing IR. After deciding to 1942 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1943 /// temporary blocks are completely removed. 1944 class GeneratedRTChecks { 1945 /// Basic block which contains the generated SCEV checks, if any. 1946 BasicBlock *SCEVCheckBlock = nullptr; 1947 1948 /// The value representing the result of the generated SCEV checks. If it is 1949 /// nullptr, either no SCEV checks have been generated or they have been used. 1950 Value *SCEVCheckCond = nullptr; 1951 1952 /// Basic block which contains the generated memory runtime checks, if any. 1953 BasicBlock *MemCheckBlock = nullptr; 1954 1955 /// The value representing the result of the generated memory runtime checks. 1956 /// If it is nullptr, either no memory runtime checks have been generated or 1957 /// they have been used. 1958 Value *MemRuntimeCheckCond = nullptr; 1959 1960 DominatorTree *DT; 1961 LoopInfo *LI; 1962 1963 SCEVExpander SCEVExp; 1964 SCEVExpander MemCheckExp; 1965 1966 public: 1967 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1968 const DataLayout &DL) 1969 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1970 MemCheckExp(SE, DL, "scev.check") {} 1971 1972 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1973 /// accurately estimate the cost of the runtime checks. The blocks are 1974 /// un-linked from the IR and is added back during vector code generation. If 1975 /// there is no vector code generation, the check blocks are removed 1976 /// completely. 1977 void Create(Loop *L, const LoopAccessInfo &LAI, 1978 const SCEVPredicate &Pred) { 1979 1980 BasicBlock *LoopHeader = L->getHeader(); 1981 BasicBlock *Preheader = L->getLoopPreheader(); 1982 1983 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1984 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1985 // may be used by SCEVExpander. The blocks will be un-linked from their 1986 // predecessors and removed from LI & DT at the end of the function. 1987 if (!Pred.isAlwaysTrue()) { 1988 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1989 nullptr, "vector.scevcheck"); 1990 1991 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1992 &Pred, SCEVCheckBlock->getTerminator()); 1993 } 1994 1995 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1996 if (RtPtrChecking.Need) { 1997 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1998 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1999 "vector.memcheck"); 2000 2001 MemRuntimeCheckCond = 2002 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 2003 RtPtrChecking.getChecks(), MemCheckExp); 2004 assert(MemRuntimeCheckCond && 2005 "no RT checks generated although RtPtrChecking " 2006 "claimed checks are required"); 2007 } 2008 2009 if (!MemCheckBlock && !SCEVCheckBlock) 2010 return; 2011 2012 // Unhook the temporary block with the checks, update various places 2013 // accordingly. 2014 if (SCEVCheckBlock) 2015 SCEVCheckBlock->replaceAllUsesWith(Preheader); 2016 if (MemCheckBlock) 2017 MemCheckBlock->replaceAllUsesWith(Preheader); 2018 2019 if (SCEVCheckBlock) { 2020 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2021 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 2022 Preheader->getTerminator()->eraseFromParent(); 2023 } 2024 if (MemCheckBlock) { 2025 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2026 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2027 Preheader->getTerminator()->eraseFromParent(); 2028 } 2029 2030 DT->changeImmediateDominator(LoopHeader, Preheader); 2031 if (MemCheckBlock) { 2032 DT->eraseNode(MemCheckBlock); 2033 LI->removeBlock(MemCheckBlock); 2034 } 2035 if (SCEVCheckBlock) { 2036 DT->eraseNode(SCEVCheckBlock); 2037 LI->removeBlock(SCEVCheckBlock); 2038 } 2039 } 2040 2041 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2042 /// unused. 2043 ~GeneratedRTChecks() { 2044 SCEVExpanderCleaner SCEVCleaner(SCEVExp); 2045 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp); 2046 if (!SCEVCheckCond) 2047 SCEVCleaner.markResultUsed(); 2048 2049 if (!MemRuntimeCheckCond) 2050 MemCheckCleaner.markResultUsed(); 2051 2052 if (MemRuntimeCheckCond) { 2053 auto &SE = *MemCheckExp.getSE(); 2054 // Memory runtime check generation creates compares that use expanded 2055 // values. Remove them before running the SCEVExpanderCleaners. 2056 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2057 if (MemCheckExp.isInsertedInstruction(&I)) 2058 continue; 2059 SE.forgetValue(&I); 2060 I.eraseFromParent(); 2061 } 2062 } 2063 MemCheckCleaner.cleanup(); 2064 SCEVCleaner.cleanup(); 2065 2066 if (SCEVCheckCond) 2067 SCEVCheckBlock->eraseFromParent(); 2068 if (MemRuntimeCheckCond) 2069 MemCheckBlock->eraseFromParent(); 2070 } 2071 2072 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2073 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2074 /// depending on the generated condition. 2075 BasicBlock *emitSCEVChecks(BasicBlock *Bypass, 2076 BasicBlock *LoopVectorPreHeader, 2077 BasicBlock *LoopExitBlock) { 2078 if (!SCEVCheckCond) 2079 return nullptr; 2080 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2081 if (C->isZero()) 2082 return nullptr; 2083 2084 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2085 2086 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2087 // Create new preheader for vector loop. 2088 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2089 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2090 2091 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2092 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2093 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2094 SCEVCheckBlock); 2095 2096 DT->addNewBlock(SCEVCheckBlock, Pred); 2097 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2098 2099 ReplaceInstWithInst( 2100 SCEVCheckBlock->getTerminator(), 2101 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2102 // Mark the check as used, to prevent it from being removed during cleanup. 2103 SCEVCheckCond = nullptr; 2104 return SCEVCheckBlock; 2105 } 2106 2107 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2108 /// the branches to branch to the vector preheader or \p Bypass, depending on 2109 /// the generated condition. 2110 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass, 2111 BasicBlock *LoopVectorPreHeader) { 2112 // Check if we generated code that checks in runtime if arrays overlap. 2113 if (!MemRuntimeCheckCond) 2114 return nullptr; 2115 2116 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2117 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2118 MemCheckBlock); 2119 2120 DT->addNewBlock(MemCheckBlock, Pred); 2121 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2122 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2123 2124 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2125 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2126 2127 ReplaceInstWithInst( 2128 MemCheckBlock->getTerminator(), 2129 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2130 MemCheckBlock->getTerminator()->setDebugLoc( 2131 Pred->getTerminator()->getDebugLoc()); 2132 2133 // Mark the check as used, to prevent it from being removed during cleanup. 2134 MemRuntimeCheckCond = nullptr; 2135 return MemCheckBlock; 2136 } 2137 }; 2138 2139 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2140 // vectorization. The loop needs to be annotated with #pragma omp simd 2141 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2142 // vector length information is not provided, vectorization is not considered 2143 // explicit. Interleave hints are not allowed either. These limitations will be 2144 // relaxed in the future. 2145 // Please, note that we are currently forced to abuse the pragma 'clang 2146 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2147 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2148 // provides *explicit vectorization hints* (LV can bypass legal checks and 2149 // assume that vectorization is legal). However, both hints are implemented 2150 // using the same metadata (llvm.loop.vectorize, processed by 2151 // LoopVectorizeHints). This will be fixed in the future when the native IR 2152 // representation for pragma 'omp simd' is introduced. 2153 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2154 OptimizationRemarkEmitter *ORE) { 2155 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2156 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2157 2158 // Only outer loops with an explicit vectorization hint are supported. 2159 // Unannotated outer loops are ignored. 2160 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2161 return false; 2162 2163 Function *Fn = OuterLp->getHeader()->getParent(); 2164 if (!Hints.allowVectorization(Fn, OuterLp, 2165 true /*VectorizeOnlyWhenForced*/)) { 2166 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2167 return false; 2168 } 2169 2170 if (Hints.getInterleave() > 1) { 2171 // TODO: Interleave support is future work. 2172 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2173 "outer loops.\n"); 2174 Hints.emitRemarkWithHints(); 2175 return false; 2176 } 2177 2178 return true; 2179 } 2180 2181 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2182 OptimizationRemarkEmitter *ORE, 2183 SmallVectorImpl<Loop *> &V) { 2184 // Collect inner loops and outer loops without irreducible control flow. For 2185 // now, only collect outer loops that have explicit vectorization hints. If we 2186 // are stress testing the VPlan H-CFG construction, we collect the outermost 2187 // loop of every loop nest. 2188 if (L.isInnermost() || VPlanBuildStressTest || 2189 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2190 LoopBlocksRPO RPOT(&L); 2191 RPOT.perform(LI); 2192 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2193 V.push_back(&L); 2194 // TODO: Collect inner loops inside marked outer loops in case 2195 // vectorization fails for the outer loop. Do not invoke 2196 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2197 // already known to be reducible. We can use an inherited attribute for 2198 // that. 2199 return; 2200 } 2201 } 2202 for (Loop *InnerL : L) 2203 collectSupportedLoops(*InnerL, LI, ORE, V); 2204 } 2205 2206 namespace { 2207 2208 /// The LoopVectorize Pass. 2209 struct LoopVectorize : public FunctionPass { 2210 /// Pass identification, replacement for typeid 2211 static char ID; 2212 2213 LoopVectorizePass Impl; 2214 2215 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2216 bool VectorizeOnlyWhenForced = false) 2217 : FunctionPass(ID), 2218 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2219 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2220 } 2221 2222 bool runOnFunction(Function &F) override { 2223 if (skipFunction(F)) 2224 return false; 2225 2226 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2227 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2228 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2229 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2230 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2231 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2232 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2233 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2234 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2235 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2236 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2237 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2238 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2239 2240 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2241 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2242 2243 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2244 GetLAA, *ORE, PSI).MadeAnyChange; 2245 } 2246 2247 void getAnalysisUsage(AnalysisUsage &AU) const override { 2248 AU.addRequired<AssumptionCacheTracker>(); 2249 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2250 AU.addRequired<DominatorTreeWrapperPass>(); 2251 AU.addRequired<LoopInfoWrapperPass>(); 2252 AU.addRequired<ScalarEvolutionWrapperPass>(); 2253 AU.addRequired<TargetTransformInfoWrapperPass>(); 2254 AU.addRequired<AAResultsWrapperPass>(); 2255 AU.addRequired<LoopAccessLegacyAnalysis>(); 2256 AU.addRequired<DemandedBitsWrapperPass>(); 2257 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2258 AU.addRequired<InjectTLIMappingsLegacy>(); 2259 2260 // We currently do not preserve loopinfo/dominator analyses with outer loop 2261 // vectorization. Until this is addressed, mark these analyses as preserved 2262 // only for non-VPlan-native path. 2263 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2264 if (!EnableVPlanNativePath) { 2265 AU.addPreserved<LoopInfoWrapperPass>(); 2266 AU.addPreserved<DominatorTreeWrapperPass>(); 2267 } 2268 2269 AU.addPreserved<BasicAAWrapperPass>(); 2270 AU.addPreserved<GlobalsAAWrapperPass>(); 2271 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2272 } 2273 }; 2274 2275 } // end anonymous namespace 2276 2277 //===----------------------------------------------------------------------===// 2278 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2279 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2280 //===----------------------------------------------------------------------===// 2281 2282 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2283 // We need to place the broadcast of invariant variables outside the loop, 2284 // but only if it's proven safe to do so. Else, broadcast will be inside 2285 // vector loop body. 2286 Instruction *Instr = dyn_cast<Instruction>(V); 2287 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2288 (!Instr || 2289 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2290 // Place the code for broadcasting invariant variables in the new preheader. 2291 IRBuilder<>::InsertPointGuard Guard(Builder); 2292 if (SafeToHoist) 2293 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2294 2295 // Broadcast the scalar into all locations in the vector. 2296 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2297 2298 return Shuf; 2299 } 2300 2301 /// This function adds 2302 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 2303 /// to each vector element of Val. The sequence starts at StartIndex. 2304 /// \p Opcode is relevant for FP induction variable. 2305 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step, 2306 Instruction::BinaryOps BinOp, ElementCount VF, 2307 IRBuilderBase &Builder) { 2308 assert(VF.isVector() && "only vector VFs are supported"); 2309 2310 // Create and check the types. 2311 auto *ValVTy = cast<VectorType>(Val->getType()); 2312 ElementCount VLen = ValVTy->getElementCount(); 2313 2314 Type *STy = Val->getType()->getScalarType(); 2315 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2316 "Induction Step must be an integer or FP"); 2317 assert(Step->getType() == STy && "Step has wrong type"); 2318 2319 SmallVector<Constant *, 8> Indices; 2320 2321 // Create a vector of consecutive numbers from zero to VF. 2322 VectorType *InitVecValVTy = ValVTy; 2323 if (STy->isFloatingPointTy()) { 2324 Type *InitVecValSTy = 2325 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2326 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2327 } 2328 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2329 2330 // Splat the StartIdx 2331 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2332 2333 if (STy->isIntegerTy()) { 2334 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2335 Step = Builder.CreateVectorSplat(VLen, Step); 2336 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2337 // FIXME: The newly created binary instructions should contain nsw/nuw 2338 // flags, which can be found from the original scalar operations. 2339 Step = Builder.CreateMul(InitVec, Step); 2340 return Builder.CreateAdd(Val, Step, "induction"); 2341 } 2342 2343 // Floating point induction. 2344 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2345 "Binary Opcode should be specified for FP induction"); 2346 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2347 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2348 2349 Step = Builder.CreateVectorSplat(VLen, Step); 2350 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2351 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2352 } 2353 2354 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 2355 /// variable on which to base the steps, \p Step is the size of the step. 2356 static void buildScalarSteps(Value *ScalarIV, Value *Step, 2357 const InductionDescriptor &ID, VPValue *Def, 2358 VPTransformState &State) { 2359 IRBuilderBase &Builder = State.Builder; 2360 // We shouldn't have to build scalar steps if we aren't vectorizing. 2361 assert(State.VF.isVector() && "VF should be greater than one"); 2362 // Get the value type and ensure it and the step have the same integer type. 2363 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2364 assert(ScalarIVTy == Step->getType() && 2365 "Val and Step should have the same type"); 2366 2367 // We build scalar steps for both integer and floating-point induction 2368 // variables. Here, we determine the kind of arithmetic we will perform. 2369 Instruction::BinaryOps AddOp; 2370 Instruction::BinaryOps MulOp; 2371 if (ScalarIVTy->isIntegerTy()) { 2372 AddOp = Instruction::Add; 2373 MulOp = Instruction::Mul; 2374 } else { 2375 AddOp = ID.getInductionOpcode(); 2376 MulOp = Instruction::FMul; 2377 } 2378 2379 // Determine the number of scalars we need to generate for each unroll 2380 // iteration. 2381 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def); 2382 unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue(); 2383 // Compute the scalar steps and save the results in State. 2384 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2385 ScalarIVTy->getScalarSizeInBits()); 2386 Type *VecIVTy = nullptr; 2387 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2388 if (!FirstLaneOnly && State.VF.isScalable()) { 2389 VecIVTy = VectorType::get(ScalarIVTy, State.VF); 2390 UnitStepVec = 2391 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF)); 2392 SplatStep = Builder.CreateVectorSplat(State.VF, Step); 2393 SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV); 2394 } 2395 2396 for (unsigned Part = 0; Part < State.UF; ++Part) { 2397 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part); 2398 2399 if (!FirstLaneOnly && State.VF.isScalable()) { 2400 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0); 2401 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2402 if (ScalarIVTy->isFloatingPointTy()) 2403 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2404 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2405 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2406 State.set(Def, Add, Part); 2407 // It's useful to record the lane values too for the known minimum number 2408 // of elements so we do those below. This improves the code quality when 2409 // trying to extract the first element, for example. 2410 } 2411 2412 if (ScalarIVTy->isFloatingPointTy()) 2413 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2414 2415 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2416 Value *StartIdx = Builder.CreateBinOp( 2417 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2418 // The step returned by `createStepForVF` is a runtime-evaluated value 2419 // when VF is scalable. Otherwise, it should be folded into a Constant. 2420 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) && 2421 "Expected StartIdx to be folded to a constant when VF is not " 2422 "scalable"); 2423 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2424 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2425 State.set(Def, Add, VPIteration(Part, Lane)); 2426 } 2427 } 2428 } 2429 2430 // Generate code for the induction step. Note that induction steps are 2431 // required to be loop-invariant 2432 static Value *CreateStepValue(const SCEV *Step, ScalarEvolution &SE, 2433 Instruction *InsertBefore, 2434 Loop *OrigLoop = nullptr) { 2435 const DataLayout &DL = SE.getDataLayout(); 2436 assert((!OrigLoop || SE.isLoopInvariant(Step, OrigLoop)) && 2437 "Induction step should be loop invariant"); 2438 if (auto *E = dyn_cast<SCEVUnknown>(Step)) 2439 return E->getValue(); 2440 2441 SCEVExpander Exp(SE, DL, "induction"); 2442 return Exp.expandCodeFor(Step, Step->getType(), InsertBefore); 2443 } 2444 2445 /// Compute the transformed value of Index at offset StartValue using step 2446 /// StepValue. 2447 /// For integer induction, returns StartValue + Index * StepValue. 2448 /// For pointer induction, returns StartValue[Index * StepValue]. 2449 /// FIXME: The newly created binary instructions should contain nsw/nuw 2450 /// flags, which can be found from the original scalar operations. 2451 static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index, 2452 Value *StartValue, Value *Step, 2453 const InductionDescriptor &ID) { 2454 assert(Index->getType()->getScalarType() == Step->getType() && 2455 "Index scalar type does not match StepValue type"); 2456 2457 // Note: the IR at this point is broken. We cannot use SE to create any new 2458 // SCEV and then expand it, hoping that SCEV's simplification will give us 2459 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2460 // lead to various SCEV crashes. So all we can do is to use builder and rely 2461 // on InstCombine for future simplifications. Here we handle some trivial 2462 // cases only. 2463 auto CreateAdd = [&B](Value *X, Value *Y) { 2464 assert(X->getType() == Y->getType() && "Types don't match!"); 2465 if (auto *CX = dyn_cast<ConstantInt>(X)) 2466 if (CX->isZero()) 2467 return Y; 2468 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2469 if (CY->isZero()) 2470 return X; 2471 return B.CreateAdd(X, Y); 2472 }; 2473 2474 // We allow X to be a vector type, in which case Y will potentially be 2475 // splatted into a vector with the same element count. 2476 auto CreateMul = [&B](Value *X, Value *Y) { 2477 assert(X->getType()->getScalarType() == Y->getType() && 2478 "Types don't match!"); 2479 if (auto *CX = dyn_cast<ConstantInt>(X)) 2480 if (CX->isOne()) 2481 return Y; 2482 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2483 if (CY->isOne()) 2484 return X; 2485 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 2486 if (XVTy && !isa<VectorType>(Y->getType())) 2487 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 2488 return B.CreateMul(X, Y); 2489 }; 2490 2491 switch (ID.getKind()) { 2492 case InductionDescriptor::IK_IntInduction: { 2493 assert(!isa<VectorType>(Index->getType()) && 2494 "Vector indices not supported for integer inductions yet"); 2495 assert(Index->getType() == StartValue->getType() && 2496 "Index type does not match StartValue type"); 2497 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne()) 2498 return B.CreateSub(StartValue, Index); 2499 auto *Offset = CreateMul(Index, Step); 2500 return CreateAdd(StartValue, Offset); 2501 } 2502 case InductionDescriptor::IK_PtrInduction: { 2503 assert(isa<Constant>(Step) && 2504 "Expected constant step for pointer induction"); 2505 return B.CreateGEP(ID.getElementType(), StartValue, CreateMul(Index, Step)); 2506 } 2507 case InductionDescriptor::IK_FpInduction: { 2508 assert(!isa<VectorType>(Index->getType()) && 2509 "Vector indices not supported for FP inductions yet"); 2510 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2511 auto InductionBinOp = ID.getInductionBinOp(); 2512 assert(InductionBinOp && 2513 (InductionBinOp->getOpcode() == Instruction::FAdd || 2514 InductionBinOp->getOpcode() == Instruction::FSub) && 2515 "Original bin op should be defined for FP induction"); 2516 2517 Value *MulExp = B.CreateFMul(Step, Index); 2518 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2519 "induction"); 2520 } 2521 case InductionDescriptor::IK_NoInduction: 2522 return nullptr; 2523 } 2524 llvm_unreachable("invalid enum"); 2525 } 2526 2527 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2528 const VPIteration &Instance, 2529 VPTransformState &State) { 2530 Value *ScalarInst = State.get(Def, Instance); 2531 Value *VectorValue = State.get(Def, Instance.Part); 2532 VectorValue = Builder.CreateInsertElement( 2533 VectorValue, ScalarInst, 2534 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2535 State.set(Def, VectorValue, Instance.Part); 2536 } 2537 2538 // Return whether we allow using masked interleave-groups (for dealing with 2539 // strided loads/stores that reside in predicated blocks, or for dealing 2540 // with gaps). 2541 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2542 // If an override option has been passed in for interleaved accesses, use it. 2543 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2544 return EnableMaskedInterleavedMemAccesses; 2545 2546 return TTI.enableMaskedInterleavedAccessVectorization(); 2547 } 2548 2549 // Try to vectorize the interleave group that \p Instr belongs to. 2550 // 2551 // E.g. Translate following interleaved load group (factor = 3): 2552 // for (i = 0; i < N; i+=3) { 2553 // R = Pic[i]; // Member of index 0 2554 // G = Pic[i+1]; // Member of index 1 2555 // B = Pic[i+2]; // Member of index 2 2556 // ... // do something to R, G, B 2557 // } 2558 // To: 2559 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2560 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2561 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2562 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2563 // 2564 // Or translate following interleaved store group (factor = 3): 2565 // for (i = 0; i < N; i+=3) { 2566 // ... do something to R, G, B 2567 // Pic[i] = R; // Member of index 0 2568 // Pic[i+1] = G; // Member of index 1 2569 // Pic[i+2] = B; // Member of index 2 2570 // } 2571 // To: 2572 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2573 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2574 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2575 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2576 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2577 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2578 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2579 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2580 VPValue *BlockInMask) { 2581 Instruction *Instr = Group->getInsertPos(); 2582 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2583 2584 // Prepare for the vector type of the interleaved load/store. 2585 Type *ScalarTy = getLoadStoreType(Instr); 2586 unsigned InterleaveFactor = Group->getFactor(); 2587 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2588 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2589 2590 // Prepare for the new pointers. 2591 SmallVector<Value *, 2> AddrParts; 2592 unsigned Index = Group->getIndex(Instr); 2593 2594 // TODO: extend the masked interleaved-group support to reversed access. 2595 assert((!BlockInMask || !Group->isReverse()) && 2596 "Reversed masked interleave-group not supported."); 2597 2598 // If the group is reverse, adjust the index to refer to the last vector lane 2599 // instead of the first. We adjust the index from the first vector lane, 2600 // rather than directly getting the pointer for lane VF - 1, because the 2601 // pointer operand of the interleaved access is supposed to be uniform. For 2602 // uniform instructions, we're only required to generate a value for the 2603 // first vector lane in each unroll iteration. 2604 if (Group->isReverse()) 2605 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2606 2607 for (unsigned Part = 0; Part < UF; Part++) { 2608 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2609 setDebugLocFromInst(AddrPart); 2610 2611 // Notice current instruction could be any index. Need to adjust the address 2612 // to the member of index 0. 2613 // 2614 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2615 // b = A[i]; // Member of index 0 2616 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2617 // 2618 // E.g. A[i+1] = a; // Member of index 1 2619 // A[i] = b; // Member of index 0 2620 // A[i+2] = c; // Member of index 2 (Current instruction) 2621 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2622 2623 bool InBounds = false; 2624 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2625 InBounds = gep->isInBounds(); 2626 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2627 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2628 2629 // Cast to the vector pointer type. 2630 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2631 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2632 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2633 } 2634 2635 setDebugLocFromInst(Instr); 2636 Value *PoisonVec = PoisonValue::get(VecTy); 2637 2638 Value *MaskForGaps = nullptr; 2639 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2640 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2641 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2642 } 2643 2644 // Vectorize the interleaved load group. 2645 if (isa<LoadInst>(Instr)) { 2646 // For each unroll part, create a wide load for the group. 2647 SmallVector<Value *, 2> NewLoads; 2648 for (unsigned Part = 0; Part < UF; Part++) { 2649 Instruction *NewLoad; 2650 if (BlockInMask || MaskForGaps) { 2651 assert(useMaskedInterleavedAccesses(*TTI) && 2652 "masked interleaved groups are not allowed."); 2653 Value *GroupMask = MaskForGaps; 2654 if (BlockInMask) { 2655 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2656 Value *ShuffledMask = Builder.CreateShuffleVector( 2657 BlockInMaskPart, 2658 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2659 "interleaved.mask"); 2660 GroupMask = MaskForGaps 2661 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2662 MaskForGaps) 2663 : ShuffledMask; 2664 } 2665 NewLoad = 2666 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2667 GroupMask, PoisonVec, "wide.masked.vec"); 2668 } 2669 else 2670 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2671 Group->getAlign(), "wide.vec"); 2672 Group->addMetadata(NewLoad); 2673 NewLoads.push_back(NewLoad); 2674 } 2675 2676 // For each member in the group, shuffle out the appropriate data from the 2677 // wide loads. 2678 unsigned J = 0; 2679 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2680 Instruction *Member = Group->getMember(I); 2681 2682 // Skip the gaps in the group. 2683 if (!Member) 2684 continue; 2685 2686 auto StrideMask = 2687 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2688 for (unsigned Part = 0; Part < UF; Part++) { 2689 Value *StridedVec = Builder.CreateShuffleVector( 2690 NewLoads[Part], StrideMask, "strided.vec"); 2691 2692 // If this member has different type, cast the result type. 2693 if (Member->getType() != ScalarTy) { 2694 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2695 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2696 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2697 } 2698 2699 if (Group->isReverse()) 2700 StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse"); 2701 2702 State.set(VPDefs[J], StridedVec, Part); 2703 } 2704 ++J; 2705 } 2706 return; 2707 } 2708 2709 // The sub vector type for current instruction. 2710 auto *SubVT = VectorType::get(ScalarTy, VF); 2711 2712 // Vectorize the interleaved store group. 2713 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2714 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2715 "masked interleaved groups are not allowed."); 2716 assert((!MaskForGaps || !VF.isScalable()) && 2717 "masking gaps for scalable vectors is not yet supported."); 2718 for (unsigned Part = 0; Part < UF; Part++) { 2719 // Collect the stored vector from each member. 2720 SmallVector<Value *, 4> StoredVecs; 2721 for (unsigned i = 0; i < InterleaveFactor; i++) { 2722 assert((Group->getMember(i) || MaskForGaps) && 2723 "Fail to get a member from an interleaved store group"); 2724 Instruction *Member = Group->getMember(i); 2725 2726 // Skip the gaps in the group. 2727 if (!Member) { 2728 Value *Undef = PoisonValue::get(SubVT); 2729 StoredVecs.push_back(Undef); 2730 continue; 2731 } 2732 2733 Value *StoredVec = State.get(StoredValues[i], Part); 2734 2735 if (Group->isReverse()) 2736 StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse"); 2737 2738 // If this member has different type, cast it to a unified type. 2739 2740 if (StoredVec->getType() != SubVT) 2741 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2742 2743 StoredVecs.push_back(StoredVec); 2744 } 2745 2746 // Concatenate all vectors into a wide vector. 2747 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2748 2749 // Interleave the elements in the wide vector. 2750 Value *IVec = Builder.CreateShuffleVector( 2751 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2752 "interleaved.vec"); 2753 2754 Instruction *NewStoreInstr; 2755 if (BlockInMask || MaskForGaps) { 2756 Value *GroupMask = MaskForGaps; 2757 if (BlockInMask) { 2758 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2759 Value *ShuffledMask = Builder.CreateShuffleVector( 2760 BlockInMaskPart, 2761 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2762 "interleaved.mask"); 2763 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2764 ShuffledMask, MaskForGaps) 2765 : ShuffledMask; 2766 } 2767 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2768 Group->getAlign(), GroupMask); 2769 } else 2770 NewStoreInstr = 2771 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2772 2773 Group->addMetadata(NewStoreInstr); 2774 } 2775 } 2776 2777 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2778 VPReplicateRecipe *RepRecipe, 2779 const VPIteration &Instance, 2780 bool IfPredicateInstr, 2781 VPTransformState &State) { 2782 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2783 2784 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2785 // the first lane and part. 2786 if (isa<NoAliasScopeDeclInst>(Instr)) 2787 if (!Instance.isFirstIteration()) 2788 return; 2789 2790 setDebugLocFromInst(Instr); 2791 2792 // Does this instruction return a value ? 2793 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2794 2795 Instruction *Cloned = Instr->clone(); 2796 if (!IsVoidRetTy) 2797 Cloned->setName(Instr->getName() + ".cloned"); 2798 2799 // If the scalarized instruction contributes to the address computation of a 2800 // widen masked load/store which was in a basic block that needed predication 2801 // and is not predicated after vectorization, we can't propagate 2802 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized 2803 // instruction could feed a poison value to the base address of the widen 2804 // load/store. 2805 if (State.MayGeneratePoisonRecipes.contains(RepRecipe)) 2806 Cloned->dropPoisonGeneratingFlags(); 2807 2808 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 2809 Builder.GetInsertPoint()); 2810 // Replace the operands of the cloned instructions with their scalar 2811 // equivalents in the new loop. 2812 for (auto &I : enumerate(RepRecipe->operands())) { 2813 auto InputInstance = Instance; 2814 VPValue *Operand = I.value(); 2815 VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand); 2816 if (OperandR && OperandR->isUniform()) 2817 InputInstance.Lane = VPLane::getFirstLane(); 2818 Cloned->setOperand(I.index(), State.get(Operand, InputInstance)); 2819 } 2820 addNewMetadata(Cloned, Instr); 2821 2822 // Place the cloned scalar in the new loop. 2823 Builder.Insert(Cloned); 2824 2825 State.set(RepRecipe, Cloned, Instance); 2826 2827 // If we just cloned a new assumption, add it the assumption cache. 2828 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 2829 AC->registerAssumption(II); 2830 2831 // End if-block. 2832 if (IfPredicateInstr) 2833 PredicatedInstructions.push_back(Cloned); 2834 } 2835 2836 void InnerLoopVectorizer::createHeaderBranch(Loop *L) { 2837 BasicBlock *Header = L->getHeader(); 2838 assert(!L->getLoopLatch() && "loop should not have a latch at this point"); 2839 2840 IRBuilder<> B(Header->getTerminator()); 2841 Instruction *OldInst = 2842 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 2843 setDebugLocFromInst(OldInst, &B); 2844 2845 // Connect the header to the exit and header blocks and replace the old 2846 // terminator. 2847 B.CreateCondBr(B.getTrue(), L->getUniqueExitBlock(), Header); 2848 2849 // Now we have two terminators. Remove the old one from the block. 2850 Header->getTerminator()->eraseFromParent(); 2851 } 2852 2853 Value *InnerLoopVectorizer::getOrCreateTripCount(BasicBlock *InsertBlock) { 2854 if (TripCount) 2855 return TripCount; 2856 2857 assert(InsertBlock); 2858 IRBuilder<> Builder(InsertBlock->getTerminator()); 2859 // Find the loop boundaries. 2860 ScalarEvolution *SE = PSE.getSE(); 2861 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2862 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 2863 "Invalid loop count"); 2864 2865 Type *IdxTy = Legal->getWidestInductionType(); 2866 assert(IdxTy && "No type for induction"); 2867 2868 // The exit count might have the type of i64 while the phi is i32. This can 2869 // happen if we have an induction variable that is sign extended before the 2870 // compare. The only way that we get a backedge taken count is that the 2871 // induction variable was signed and as such will not overflow. In such a case 2872 // truncation is legal. 2873 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2874 IdxTy->getPrimitiveSizeInBits()) 2875 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2876 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2877 2878 // Get the total trip count from the count by adding 1. 2879 const SCEV *ExitCount = SE->getAddExpr( 2880 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2881 2882 const DataLayout &DL = InsertBlock->getModule()->getDataLayout(); 2883 2884 // Expand the trip count and place the new instructions in the preheader. 2885 // Notice that the pre-header does not change, only the loop body. 2886 SCEVExpander Exp(*SE, DL, "induction"); 2887 2888 // Count holds the overall loop count (N). 2889 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2890 InsertBlock->getTerminator()); 2891 2892 if (TripCount->getType()->isPointerTy()) 2893 TripCount = 2894 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2895 InsertBlock->getTerminator()); 2896 2897 return TripCount; 2898 } 2899 2900 Value * 2901 InnerLoopVectorizer::getOrCreateVectorTripCount(BasicBlock *InsertBlock) { 2902 if (VectorTripCount) 2903 return VectorTripCount; 2904 2905 Value *TC = getOrCreateTripCount(InsertBlock); 2906 IRBuilder<> Builder(InsertBlock->getTerminator()); 2907 2908 Type *Ty = TC->getType(); 2909 // This is where we can make the step a runtime constant. 2910 Value *Step = createStepForVF(Builder, Ty, VF, UF); 2911 2912 // If the tail is to be folded by masking, round the number of iterations N 2913 // up to a multiple of Step instead of rounding down. This is done by first 2914 // adding Step-1 and then rounding down. Note that it's ok if this addition 2915 // overflows: the vector induction variable will eventually wrap to zero given 2916 // that it starts at zero and its Step is a power of two; the loop will then 2917 // exit, with the last early-exit vector comparison also producing all-true. 2918 if (Cost->foldTailByMasking()) { 2919 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 2920 "VF*UF must be a power of 2 when folding tail by masking"); 2921 Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF); 2922 TC = Builder.CreateAdd( 2923 TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up"); 2924 } 2925 2926 // Now we need to generate the expression for the part of the loop that the 2927 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2928 // iterations are not required for correctness, or N - Step, otherwise. Step 2929 // is equal to the vectorization factor (number of SIMD elements) times the 2930 // unroll factor (number of SIMD instructions). 2931 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2932 2933 // There are cases where we *must* run at least one iteration in the remainder 2934 // loop. See the cost model for when this can happen. If the step evenly 2935 // divides the trip count, we set the remainder to be equal to the step. If 2936 // the step does not evenly divide the trip count, no adjustment is necessary 2937 // since there will already be scalar iterations. Note that the minimum 2938 // iterations check ensures that N >= Step. 2939 if (Cost->requiresScalarEpilogue(VF)) { 2940 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2941 R = Builder.CreateSelect(IsZero, Step, R); 2942 } 2943 2944 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2945 2946 return VectorTripCount; 2947 } 2948 2949 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2950 const DataLayout &DL) { 2951 // Verify that V is a vector type with same number of elements as DstVTy. 2952 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 2953 unsigned VF = DstFVTy->getNumElements(); 2954 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 2955 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2956 Type *SrcElemTy = SrcVecTy->getElementType(); 2957 Type *DstElemTy = DstFVTy->getElementType(); 2958 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2959 "Vector elements must have same size"); 2960 2961 // Do a direct cast if element types are castable. 2962 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2963 return Builder.CreateBitOrPointerCast(V, DstFVTy); 2964 } 2965 // V cannot be directly casted to desired vector type. 2966 // May happen when V is a floating point vector but DstVTy is a vector of 2967 // pointers or vice-versa. Handle this using a two-step bitcast using an 2968 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2969 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2970 "Only one type should be a pointer type"); 2971 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2972 "Only one type should be a floating point type"); 2973 Type *IntTy = 2974 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2975 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 2976 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2977 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 2978 } 2979 2980 void InnerLoopVectorizer::emitMinimumIterationCountCheck(BasicBlock *Bypass) { 2981 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 2982 // Reuse existing vector loop preheader for TC checks. 2983 // Note that new preheader block is generated for vector loop. 2984 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 2985 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 2986 2987 // Generate code to check if the loop's trip count is less than VF * UF, or 2988 // equal to it in case a scalar epilogue is required; this implies that the 2989 // vector trip count is zero. This check also covers the case where adding one 2990 // to the backedge-taken count overflowed leading to an incorrect trip count 2991 // of zero. In this case we will also jump to the scalar loop. 2992 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 2993 : ICmpInst::ICMP_ULT; 2994 2995 // If tail is to be folded, vector loop takes care of all iterations. 2996 Value *CheckMinIters = Builder.getFalse(); 2997 if (!Cost->foldTailByMasking()) { 2998 Value *Step = createStepForVF(Builder, Count->getType(), VF, UF); 2999 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3000 } 3001 // Create new preheader for vector loop. 3002 LoopVectorPreHeader = 3003 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3004 "vector.ph"); 3005 3006 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3007 DT->getNode(Bypass)->getIDom()) && 3008 "TC check is expected to dominate Bypass"); 3009 3010 // Update dominator for Bypass & LoopExit (if needed). 3011 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3012 if (!Cost->requiresScalarEpilogue(VF)) 3013 // If there is an epilogue which must run, there's no edge from the 3014 // middle block to exit blocks and thus no need to update the immediate 3015 // dominator of the exit blocks. 3016 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3017 3018 ReplaceInstWithInst( 3019 TCCheckBlock->getTerminator(), 3020 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3021 LoopBypassBlocks.push_back(TCCheckBlock); 3022 } 3023 3024 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) { 3025 3026 BasicBlock *const SCEVCheckBlock = 3027 RTChecks.emitSCEVChecks(Bypass, LoopVectorPreHeader, LoopExitBlock); 3028 if (!SCEVCheckBlock) 3029 return nullptr; 3030 3031 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3032 (OptForSizeBasedOnProfile && 3033 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3034 "Cannot SCEV check stride or overflow when optimizing for size"); 3035 3036 3037 // Update dominator only if this is first RT check. 3038 if (LoopBypassBlocks.empty()) { 3039 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3040 if (!Cost->requiresScalarEpilogue(VF)) 3041 // If there is an epilogue which must run, there's no edge from the 3042 // middle block to exit blocks and thus no need to update the immediate 3043 // dominator of the exit blocks. 3044 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3045 } 3046 3047 LoopBypassBlocks.push_back(SCEVCheckBlock); 3048 AddedSafetyChecks = true; 3049 return SCEVCheckBlock; 3050 } 3051 3052 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) { 3053 // VPlan-native path does not do any analysis for runtime checks currently. 3054 if (EnableVPlanNativePath) 3055 return nullptr; 3056 3057 BasicBlock *const MemCheckBlock = 3058 RTChecks.emitMemRuntimeChecks(Bypass, LoopVectorPreHeader); 3059 3060 // Check if we generated code that checks in runtime if arrays overlap. We put 3061 // the checks into a separate block to make the more common case of few 3062 // elements faster. 3063 if (!MemCheckBlock) 3064 return nullptr; 3065 3066 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3067 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3068 "Cannot emit memory checks when optimizing for size, unless forced " 3069 "to vectorize."); 3070 ORE->emit([&]() { 3071 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3072 OrigLoop->getStartLoc(), 3073 OrigLoop->getHeader()) 3074 << "Code-size may be reduced by not forcing " 3075 "vectorization, or by source-code modifications " 3076 "eliminating the need for runtime checks " 3077 "(e.g., adding 'restrict')."; 3078 }); 3079 } 3080 3081 LoopBypassBlocks.push_back(MemCheckBlock); 3082 3083 AddedSafetyChecks = true; 3084 3085 // We currently don't use LoopVersioning for the actual loop cloning but we 3086 // still use it to add the noalias metadata. 3087 LVer = std::make_unique<LoopVersioning>( 3088 *Legal->getLAI(), 3089 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3090 DT, PSE.getSE()); 3091 LVer->prepareNoAliasMetadata(); 3092 return MemCheckBlock; 3093 } 3094 3095 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3096 LoopScalarBody = OrigLoop->getHeader(); 3097 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3098 assert(LoopVectorPreHeader && "Invalid loop structure"); 3099 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3100 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3101 "multiple exit loop without required epilogue?"); 3102 3103 LoopMiddleBlock = 3104 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3105 LI, nullptr, Twine(Prefix) + "middle.block"); 3106 LoopScalarPreHeader = 3107 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3108 nullptr, Twine(Prefix) + "scalar.ph"); 3109 3110 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3111 3112 // Set up the middle block terminator. Two cases: 3113 // 1) If we know that we must execute the scalar epilogue, emit an 3114 // unconditional branch. 3115 // 2) Otherwise, we must have a single unique exit block (due to how we 3116 // implement the multiple exit case). In this case, set up a conditonal 3117 // branch from the middle block to the loop scalar preheader, and the 3118 // exit block. completeLoopSkeleton will update the condition to use an 3119 // iteration check, if required to decide whether to execute the remainder. 3120 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3121 BranchInst::Create(LoopScalarPreHeader) : 3122 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3123 Builder.getTrue()); 3124 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3125 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3126 3127 // We intentionally don't let SplitBlock to update LoopInfo since 3128 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3129 // LoopVectorBody is explicitly added to the correct place few lines later. 3130 BasicBlock *LoopVectorBody = 3131 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3132 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3133 3134 // Update dominator for loop exit. 3135 if (!Cost->requiresScalarEpilogue(VF)) 3136 // If there is an epilogue which must run, there's no edge from the 3137 // middle block to exit blocks and thus no need to update the immediate 3138 // dominator of the exit blocks. 3139 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3140 3141 // Create and register the new vector loop. 3142 Loop *Lp = LI->AllocateLoop(); 3143 Loop *ParentLoop = OrigLoop->getParentLoop(); 3144 3145 // Insert the new loop into the loop nest and register the new basic blocks 3146 // before calling any utilities such as SCEV that require valid LoopInfo. 3147 if (ParentLoop) { 3148 ParentLoop->addChildLoop(Lp); 3149 } else { 3150 LI->addTopLevelLoop(Lp); 3151 } 3152 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3153 return Lp; 3154 } 3155 3156 void InnerLoopVectorizer::createInductionResumeValues( 3157 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3158 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3159 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3160 "Inconsistent information about additional bypass."); 3161 3162 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3163 assert(VectorTripCount && "Expected valid arguments"); 3164 // We are going to resume the execution of the scalar loop. 3165 // Go over all of the induction variables that we found and fix the 3166 // PHIs that are left in the scalar version of the loop. 3167 // The starting values of PHI nodes depend on the counter of the last 3168 // iteration in the vectorized loop. 3169 // If we come from a bypass edge then we need to start from the original 3170 // start value. 3171 Instruction *OldInduction = Legal->getPrimaryInduction(); 3172 for (auto &InductionEntry : Legal->getInductionVars()) { 3173 PHINode *OrigPhi = InductionEntry.first; 3174 InductionDescriptor II = InductionEntry.second; 3175 3176 // Create phi nodes to merge from the backedge-taken check block. 3177 PHINode *BCResumeVal = 3178 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3179 LoopScalarPreHeader->getTerminator()); 3180 // Copy original phi DL over to the new one. 3181 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3182 Value *&EndValue = IVEndValues[OrigPhi]; 3183 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3184 if (OrigPhi == OldInduction) { 3185 // We know what the end value is. 3186 EndValue = VectorTripCount; 3187 } else { 3188 IRBuilder<> B(LoopVectorPreHeader->getTerminator()); 3189 3190 // Fast-math-flags propagate from the original induction instruction. 3191 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3192 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3193 3194 Type *StepType = II.getStep()->getType(); 3195 Instruction::CastOps CastOp = 3196 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3197 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3198 Value *Step = 3199 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3200 EndValue = emitTransformedIndex(B, CRD, II.getStartValue(), Step, II); 3201 EndValue->setName("ind.end"); 3202 3203 // Compute the end value for the additional bypass (if applicable). 3204 if (AdditionalBypass.first) { 3205 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3206 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3207 StepType, true); 3208 Value *Step = 3209 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3210 CRD = 3211 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3212 EndValueFromAdditionalBypass = 3213 emitTransformedIndex(B, CRD, II.getStartValue(), Step, II); 3214 EndValueFromAdditionalBypass->setName("ind.end"); 3215 } 3216 } 3217 // The new PHI merges the original incoming value, in case of a bypass, 3218 // or the value at the end of the vectorized loop. 3219 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3220 3221 // Fix the scalar body counter (PHI node). 3222 // The old induction's phi node in the scalar body needs the truncated 3223 // value. 3224 for (BasicBlock *BB : LoopBypassBlocks) 3225 BCResumeVal->addIncoming(II.getStartValue(), BB); 3226 3227 if (AdditionalBypass.first) 3228 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3229 EndValueFromAdditionalBypass); 3230 3231 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3232 } 3233 } 3234 3235 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(MDNode *OrigLoopID) { 3236 // The trip counts should be cached by now. 3237 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 3238 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3239 3240 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3241 3242 // Add a check in the middle block to see if we have completed 3243 // all of the iterations in the first vector loop. Three cases: 3244 // 1) If we require a scalar epilogue, there is no conditional branch as 3245 // we unconditionally branch to the scalar preheader. Do nothing. 3246 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3247 // Thus if tail is to be folded, we know we don't need to run the 3248 // remainder and we can use the previous value for the condition (true). 3249 // 3) Otherwise, construct a runtime check. 3250 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3251 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3252 Count, VectorTripCount, "cmp.n", 3253 LoopMiddleBlock->getTerminator()); 3254 3255 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3256 // of the corresponding compare because they may have ended up with 3257 // different line numbers and we want to avoid awkward line stepping while 3258 // debugging. Eg. if the compare has got a line number inside the loop. 3259 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3260 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3261 } 3262 3263 #ifdef EXPENSIVE_CHECKS 3264 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3265 LI->verify(*DT); 3266 #endif 3267 3268 return LoopVectorPreHeader; 3269 } 3270 3271 std::pair<BasicBlock *, Value *> 3272 InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3273 /* 3274 In this function we generate a new loop. The new loop will contain 3275 the vectorized instructions while the old loop will continue to run the 3276 scalar remainder. 3277 3278 [ ] <-- loop iteration number check. 3279 / | 3280 / v 3281 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3282 | / | 3283 | / v 3284 || [ ] <-- vector pre header. 3285 |/ | 3286 | v 3287 | [ ] \ 3288 | [ ]_| <-- vector loop. 3289 | | 3290 | v 3291 \ -[ ] <--- middle-block. 3292 \/ | 3293 /\ v 3294 | ->[ ] <--- new preheader. 3295 | | 3296 (opt) v <-- edge from middle to exit iff epilogue is not required. 3297 | [ ] \ 3298 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3299 \ | 3300 \ v 3301 >[ ] <-- exit block(s). 3302 ... 3303 */ 3304 3305 // Get the metadata of the original loop before it gets modified. 3306 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3307 3308 // Workaround! Compute the trip count of the original loop and cache it 3309 // before we start modifying the CFG. This code has a systemic problem 3310 // wherein it tries to run analysis over partially constructed IR; this is 3311 // wrong, and not simply for SCEV. The trip count of the original loop 3312 // simply happens to be prone to hitting this in practice. In theory, we 3313 // can hit the same issue for any SCEV, or ValueTracking query done during 3314 // mutation. See PR49900. 3315 getOrCreateTripCount(OrigLoop->getLoopPreheader()); 3316 3317 // Create an empty vector loop, and prepare basic blocks for the runtime 3318 // checks. 3319 Loop *Lp = createVectorLoopSkeleton(""); 3320 3321 // Now, compare the new count to zero. If it is zero skip the vector loop and 3322 // jump to the scalar loop. This check also covers the case where the 3323 // backedge-taken count is uint##_max: adding one to it will overflow leading 3324 // to an incorrect trip count of zero. In this (rare) case we will also jump 3325 // to the scalar loop. 3326 emitMinimumIterationCountCheck(LoopScalarPreHeader); 3327 3328 // Generate the code to check any assumptions that we've made for SCEV 3329 // expressions. 3330 emitSCEVChecks(LoopScalarPreHeader); 3331 3332 // Generate the code that checks in runtime if arrays overlap. We put the 3333 // checks into a separate block to make the more common case of few elements 3334 // faster. 3335 emitMemRuntimeChecks(LoopScalarPreHeader); 3336 3337 createHeaderBranch(Lp); 3338 3339 // Emit phis for the new starting index of the scalar loop. 3340 createInductionResumeValues(); 3341 3342 return {completeLoopSkeleton(OrigLoopID), nullptr}; 3343 } 3344 3345 // Fix up external users of the induction variable. At this point, we are 3346 // in LCSSA form, with all external PHIs that use the IV having one input value, 3347 // coming from the remainder loop. We need those PHIs to also have a correct 3348 // value for the IV when arriving directly from the middle block. 3349 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3350 const InductionDescriptor &II, 3351 Value *CountRoundDown, Value *EndValue, 3352 BasicBlock *MiddleBlock, 3353 BasicBlock *VectorHeader) { 3354 // There are two kinds of external IV usages - those that use the value 3355 // computed in the last iteration (the PHI) and those that use the penultimate 3356 // value (the value that feeds into the phi from the loop latch). 3357 // We allow both, but they, obviously, have different values. 3358 3359 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3360 3361 DenseMap<Value *, Value *> MissingVals; 3362 3363 // An external user of the last iteration's value should see the value that 3364 // the remainder loop uses to initialize its own IV. 3365 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3366 for (User *U : PostInc->users()) { 3367 Instruction *UI = cast<Instruction>(U); 3368 if (!OrigLoop->contains(UI)) { 3369 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3370 MissingVals[UI] = EndValue; 3371 } 3372 } 3373 3374 // An external user of the penultimate value need to see EndValue - Step. 3375 // The simplest way to get this is to recompute it from the constituent SCEVs, 3376 // that is Start + (Step * (CRD - 1)). 3377 for (User *U : OrigPhi->users()) { 3378 auto *UI = cast<Instruction>(U); 3379 if (!OrigLoop->contains(UI)) { 3380 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3381 3382 IRBuilder<> B(MiddleBlock->getTerminator()); 3383 3384 // Fast-math-flags propagate from the original induction instruction. 3385 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3386 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3387 3388 Value *CountMinusOne = B.CreateSub( 3389 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3390 Value *CMO = 3391 !II.getStep()->getType()->isIntegerTy() 3392 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3393 II.getStep()->getType()) 3394 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3395 CMO->setName("cast.cmo"); 3396 3397 Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(), 3398 VectorHeader->getTerminator()); 3399 Value *Escape = 3400 emitTransformedIndex(B, CMO, II.getStartValue(), Step, II); 3401 Escape->setName("ind.escape"); 3402 MissingVals[UI] = Escape; 3403 } 3404 } 3405 3406 for (auto &I : MissingVals) { 3407 PHINode *PHI = cast<PHINode>(I.first); 3408 // One corner case we have to handle is two IVs "chasing" each-other, 3409 // that is %IV2 = phi [...], [ %IV1, %latch ] 3410 // In this case, if IV1 has an external use, we need to avoid adding both 3411 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3412 // don't already have an incoming value for the middle block. 3413 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3414 PHI->addIncoming(I.second, MiddleBlock); 3415 } 3416 } 3417 3418 namespace { 3419 3420 struct CSEDenseMapInfo { 3421 static bool canHandle(const Instruction *I) { 3422 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3423 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3424 } 3425 3426 static inline Instruction *getEmptyKey() { 3427 return DenseMapInfo<Instruction *>::getEmptyKey(); 3428 } 3429 3430 static inline Instruction *getTombstoneKey() { 3431 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3432 } 3433 3434 static unsigned getHashValue(const Instruction *I) { 3435 assert(canHandle(I) && "Unknown instruction!"); 3436 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3437 I->value_op_end())); 3438 } 3439 3440 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3441 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3442 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3443 return LHS == RHS; 3444 return LHS->isIdenticalTo(RHS); 3445 } 3446 }; 3447 3448 } // end anonymous namespace 3449 3450 ///Perform cse of induction variable instructions. 3451 static void cse(BasicBlock *BB) { 3452 // Perform simple cse. 3453 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3454 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3455 if (!CSEDenseMapInfo::canHandle(&In)) 3456 continue; 3457 3458 // Check if we can replace this instruction with any of the 3459 // visited instructions. 3460 if (Instruction *V = CSEMap.lookup(&In)) { 3461 In.replaceAllUsesWith(V); 3462 In.eraseFromParent(); 3463 continue; 3464 } 3465 3466 CSEMap[&In] = &In; 3467 } 3468 } 3469 3470 InstructionCost 3471 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3472 bool &NeedToScalarize) const { 3473 Function *F = CI->getCalledFunction(); 3474 Type *ScalarRetTy = CI->getType(); 3475 SmallVector<Type *, 4> Tys, ScalarTys; 3476 for (auto &ArgOp : CI->args()) 3477 ScalarTys.push_back(ArgOp->getType()); 3478 3479 // Estimate cost of scalarized vector call. The source operands are assumed 3480 // to be vectors, so we need to extract individual elements from there, 3481 // execute VF scalar calls, and then gather the result into the vector return 3482 // value. 3483 InstructionCost ScalarCallCost = 3484 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3485 if (VF.isScalar()) 3486 return ScalarCallCost; 3487 3488 // Compute corresponding vector type for return value and arguments. 3489 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3490 for (Type *ScalarTy : ScalarTys) 3491 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3492 3493 // Compute costs of unpacking argument values for the scalar calls and 3494 // packing the return values to a vector. 3495 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3496 3497 InstructionCost Cost = 3498 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3499 3500 // If we can't emit a vector call for this function, then the currently found 3501 // cost is the cost we need to return. 3502 NeedToScalarize = true; 3503 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3504 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3505 3506 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3507 return Cost; 3508 3509 // If the corresponding vector cost is cheaper, return its cost. 3510 InstructionCost VectorCallCost = 3511 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3512 if (VectorCallCost < Cost) { 3513 NeedToScalarize = false; 3514 Cost = VectorCallCost; 3515 } 3516 return Cost; 3517 } 3518 3519 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3520 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3521 return Elt; 3522 return VectorType::get(Elt, VF); 3523 } 3524 3525 InstructionCost 3526 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3527 ElementCount VF) const { 3528 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3529 assert(ID && "Expected intrinsic call!"); 3530 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3531 FastMathFlags FMF; 3532 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3533 FMF = FPMO->getFastMathFlags(); 3534 3535 SmallVector<const Value *> Arguments(CI->args()); 3536 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3537 SmallVector<Type *> ParamTys; 3538 std::transform(FTy->param_begin(), FTy->param_end(), 3539 std::back_inserter(ParamTys), 3540 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3541 3542 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3543 dyn_cast<IntrinsicInst>(CI)); 3544 return TTI.getIntrinsicInstrCost(CostAttrs, 3545 TargetTransformInfo::TCK_RecipThroughput); 3546 } 3547 3548 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3549 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3550 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3551 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3552 } 3553 3554 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3555 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3556 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3557 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3558 } 3559 3560 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3561 // For every instruction `I` in MinBWs, truncate the operands, create a 3562 // truncated version of `I` and reextend its result. InstCombine runs 3563 // later and will remove any ext/trunc pairs. 3564 SmallPtrSet<Value *, 4> Erased; 3565 for (const auto &KV : Cost->getMinimalBitwidths()) { 3566 // If the value wasn't vectorized, we must maintain the original scalar 3567 // type. The absence of the value from State indicates that it 3568 // wasn't vectorized. 3569 // FIXME: Should not rely on getVPValue at this point. 3570 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3571 if (!State.hasAnyVectorValue(Def)) 3572 continue; 3573 for (unsigned Part = 0; Part < UF; ++Part) { 3574 Value *I = State.get(Def, Part); 3575 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3576 continue; 3577 Type *OriginalTy = I->getType(); 3578 Type *ScalarTruncatedTy = 3579 IntegerType::get(OriginalTy->getContext(), KV.second); 3580 auto *TruncatedTy = VectorType::get( 3581 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3582 if (TruncatedTy == OriginalTy) 3583 continue; 3584 3585 IRBuilder<> B(cast<Instruction>(I)); 3586 auto ShrinkOperand = [&](Value *V) -> Value * { 3587 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3588 if (ZI->getSrcTy() == TruncatedTy) 3589 return ZI->getOperand(0); 3590 return B.CreateZExtOrTrunc(V, TruncatedTy); 3591 }; 3592 3593 // The actual instruction modification depends on the instruction type, 3594 // unfortunately. 3595 Value *NewI = nullptr; 3596 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3597 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3598 ShrinkOperand(BO->getOperand(1))); 3599 3600 // Any wrapping introduced by shrinking this operation shouldn't be 3601 // considered undefined behavior. So, we can't unconditionally copy 3602 // arithmetic wrapping flags to NewI. 3603 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3604 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3605 NewI = 3606 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3607 ShrinkOperand(CI->getOperand(1))); 3608 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3609 NewI = B.CreateSelect(SI->getCondition(), 3610 ShrinkOperand(SI->getTrueValue()), 3611 ShrinkOperand(SI->getFalseValue())); 3612 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3613 switch (CI->getOpcode()) { 3614 default: 3615 llvm_unreachable("Unhandled cast!"); 3616 case Instruction::Trunc: 3617 NewI = ShrinkOperand(CI->getOperand(0)); 3618 break; 3619 case Instruction::SExt: 3620 NewI = B.CreateSExtOrTrunc( 3621 CI->getOperand(0), 3622 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3623 break; 3624 case Instruction::ZExt: 3625 NewI = B.CreateZExtOrTrunc( 3626 CI->getOperand(0), 3627 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3628 break; 3629 } 3630 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3631 auto Elements0 = 3632 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 3633 auto *O0 = B.CreateZExtOrTrunc( 3634 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3635 auto Elements1 = 3636 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 3637 auto *O1 = B.CreateZExtOrTrunc( 3638 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3639 3640 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3641 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3642 // Don't do anything with the operands, just extend the result. 3643 continue; 3644 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3645 auto Elements = 3646 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 3647 auto *O0 = B.CreateZExtOrTrunc( 3648 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3649 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3650 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3651 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3652 auto Elements = 3653 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 3654 auto *O0 = B.CreateZExtOrTrunc( 3655 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3656 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3657 } else { 3658 // If we don't know what to do, be conservative and don't do anything. 3659 continue; 3660 } 3661 3662 // Lastly, extend the result. 3663 NewI->takeName(cast<Instruction>(I)); 3664 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3665 I->replaceAllUsesWith(Res); 3666 cast<Instruction>(I)->eraseFromParent(); 3667 Erased.insert(I); 3668 State.reset(Def, Res, Part); 3669 } 3670 } 3671 3672 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3673 for (const auto &KV : Cost->getMinimalBitwidths()) { 3674 // If the value wasn't vectorized, we must maintain the original scalar 3675 // type. The absence of the value from State indicates that it 3676 // wasn't vectorized. 3677 // FIXME: Should not rely on getVPValue at this point. 3678 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3679 if (!State.hasAnyVectorValue(Def)) 3680 continue; 3681 for (unsigned Part = 0; Part < UF; ++Part) { 3682 Value *I = State.get(Def, Part); 3683 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3684 if (Inst && Inst->use_empty()) { 3685 Value *NewI = Inst->getOperand(0); 3686 Inst->eraseFromParent(); 3687 State.reset(Def, NewI, Part); 3688 } 3689 } 3690 } 3691 } 3692 3693 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 3694 // Insert truncates and extends for any truncated instructions as hints to 3695 // InstCombine. 3696 if (VF.isVector()) 3697 truncateToMinimalBitwidths(State); 3698 3699 // Fix widened non-induction PHIs by setting up the PHI operands. 3700 if (OrigPHIsToFix.size()) { 3701 assert(EnableVPlanNativePath && 3702 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3703 fixNonInductionPHIs(State); 3704 } 3705 3706 // At this point every instruction in the original loop is widened to a 3707 // vector form. Now we need to fix the recurrences in the loop. These PHI 3708 // nodes are currently empty because we did not want to introduce cycles. 3709 // This is the second stage of vectorizing recurrences. 3710 fixCrossIterationPHIs(State); 3711 3712 // Forget the original basic block. 3713 PSE.getSE()->forgetLoop(OrigLoop); 3714 3715 Loop *VectorLoop = LI->getLoopFor(State.CFG.PrevBB); 3716 // If we inserted an edge from the middle block to the unique exit block, 3717 // update uses outside the loop (phis) to account for the newly inserted 3718 // edge. 3719 if (!Cost->requiresScalarEpilogue(VF)) { 3720 // Fix-up external users of the induction variables. 3721 for (auto &Entry : Legal->getInductionVars()) 3722 fixupIVUsers(Entry.first, Entry.second, 3723 getOrCreateVectorTripCount(VectorLoop->getLoopPreheader()), 3724 IVEndValues[Entry.first], LoopMiddleBlock, 3725 VectorLoop->getHeader()); 3726 3727 fixLCSSAPHIs(State); 3728 } 3729 3730 for (Instruction *PI : PredicatedInstructions) 3731 sinkScalarOperands(&*PI); 3732 3733 // Remove redundant induction instructions. 3734 cse(VectorLoop->getHeader()); 3735 3736 // Set/update profile weights for the vector and remainder loops as original 3737 // loop iterations are now distributed among them. Note that original loop 3738 // represented by LoopScalarBody becomes remainder loop after vectorization. 3739 // 3740 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3741 // end up getting slightly roughened result but that should be OK since 3742 // profile is not inherently precise anyway. Note also possible bypass of 3743 // vector code caused by legality checks is ignored, assigning all the weight 3744 // to the vector loop, optimistically. 3745 // 3746 // For scalable vectorization we can't know at compile time how many iterations 3747 // of the loop are handled in one vector iteration, so instead assume a pessimistic 3748 // vscale of '1'. 3749 setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), VectorLoop, 3750 LI->getLoopFor(LoopScalarBody), 3751 VF.getKnownMinValue() * UF); 3752 } 3753 3754 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 3755 // In order to support recurrences we need to be able to vectorize Phi nodes. 3756 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3757 // stage #2: We now need to fix the recurrences by adding incoming edges to 3758 // the currently empty PHI nodes. At this point every instruction in the 3759 // original loop is widened to a vector form so we can use them to construct 3760 // the incoming edges. 3761 VPBasicBlock *Header = 3762 State.Plan->getVectorLoopRegion()->getEntryBasicBlock(); 3763 for (VPRecipeBase &R : Header->phis()) { 3764 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 3765 fixReduction(ReductionPhi, State); 3766 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 3767 fixFirstOrderRecurrence(FOR, State); 3768 } 3769 } 3770 3771 void InnerLoopVectorizer::fixFirstOrderRecurrence( 3772 VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) { 3773 // This is the second phase of vectorizing first-order recurrences. An 3774 // overview of the transformation is described below. Suppose we have the 3775 // following loop. 3776 // 3777 // for (int i = 0; i < n; ++i) 3778 // b[i] = a[i] - a[i - 1]; 3779 // 3780 // There is a first-order recurrence on "a". For this loop, the shorthand 3781 // scalar IR looks like: 3782 // 3783 // scalar.ph: 3784 // s_init = a[-1] 3785 // br scalar.body 3786 // 3787 // scalar.body: 3788 // i = phi [0, scalar.ph], [i+1, scalar.body] 3789 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3790 // s2 = a[i] 3791 // b[i] = s2 - s1 3792 // br cond, scalar.body, ... 3793 // 3794 // In this example, s1 is a recurrence because it's value depends on the 3795 // previous iteration. In the first phase of vectorization, we created a 3796 // vector phi v1 for s1. We now complete the vectorization and produce the 3797 // shorthand vector IR shown below (for VF = 4, UF = 1). 3798 // 3799 // vector.ph: 3800 // v_init = vector(..., ..., ..., a[-1]) 3801 // br vector.body 3802 // 3803 // vector.body 3804 // i = phi [0, vector.ph], [i+4, vector.body] 3805 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3806 // v2 = a[i, i+1, i+2, i+3]; 3807 // v3 = vector(v1(3), v2(0, 1, 2)) 3808 // b[i, i+1, i+2, i+3] = v2 - v3 3809 // br cond, vector.body, middle.block 3810 // 3811 // middle.block: 3812 // x = v2(3) 3813 // br scalar.ph 3814 // 3815 // scalar.ph: 3816 // s_init = phi [x, middle.block], [a[-1], otherwise] 3817 // br scalar.body 3818 // 3819 // After execution completes the vector loop, we extract the next value of 3820 // the recurrence (x) to use as the initial value in the scalar loop. 3821 3822 // Extract the last vector element in the middle block. This will be the 3823 // initial value for the recurrence when jumping to the scalar loop. 3824 VPValue *PreviousDef = PhiR->getBackedgeValue(); 3825 Value *Incoming = State.get(PreviousDef, UF - 1); 3826 auto *ExtractForScalar = Incoming; 3827 auto *IdxTy = Builder.getInt32Ty(); 3828 if (VF.isVector()) { 3829 auto *One = ConstantInt::get(IdxTy, 1); 3830 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3831 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3832 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 3833 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 3834 "vector.recur.extract"); 3835 } 3836 // Extract the second last element in the middle block if the 3837 // Phi is used outside the loop. We need to extract the phi itself 3838 // and not the last element (the phi update in the current iteration). This 3839 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3840 // when the scalar loop is not run at all. 3841 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3842 if (VF.isVector()) { 3843 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3844 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 3845 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3846 Incoming, Idx, "vector.recur.extract.for.phi"); 3847 } else if (UF > 1) 3848 // When loop is unrolled without vectorizing, initialize 3849 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 3850 // of `Incoming`. This is analogous to the vectorized case above: extracting 3851 // the second last element when VF > 1. 3852 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 3853 3854 // Fix the initial value of the original recurrence in the scalar loop. 3855 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3856 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 3857 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3858 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 3859 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3860 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3861 Start->addIncoming(Incoming, BB); 3862 } 3863 3864 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 3865 Phi->setName("scalar.recur"); 3866 3867 // Finally, fix users of the recurrence outside the loop. The users will need 3868 // either the last value of the scalar recurrence or the last value of the 3869 // vector recurrence we extracted in the middle block. Since the loop is in 3870 // LCSSA form, we just need to find all the phi nodes for the original scalar 3871 // recurrence in the exit block, and then add an edge for the middle block. 3872 // Note that LCSSA does not imply single entry when the original scalar loop 3873 // had multiple exiting edges (as we always run the last iteration in the 3874 // scalar epilogue); in that case, there is no edge from middle to exit and 3875 // and thus no phis which needed updated. 3876 if (!Cost->requiresScalarEpilogue(VF)) 3877 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 3878 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) 3879 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3880 } 3881 3882 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 3883 VPTransformState &State) { 3884 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 3885 // Get it's reduction variable descriptor. 3886 assert(Legal->isReductionVariable(OrigPhi) && 3887 "Unable to find the reduction variable"); 3888 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 3889 3890 RecurKind RK = RdxDesc.getRecurrenceKind(); 3891 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3892 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3893 setDebugLocFromInst(ReductionStartValue); 3894 3895 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 3896 // This is the vector-clone of the value that leaves the loop. 3897 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 3898 3899 // Wrap flags are in general invalid after vectorization, clear them. 3900 clearReductionWrapFlags(RdxDesc, State); 3901 3902 // Before each round, move the insertion point right between 3903 // the PHIs and the values we are going to write. 3904 // This allows us to write both PHINodes and the extractelement 3905 // instructions. 3906 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3907 3908 setDebugLocFromInst(LoopExitInst); 3909 3910 Type *PhiTy = OrigPhi->getType(); 3911 BasicBlock *VectorLoopLatch = 3912 LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch(); 3913 // If tail is folded by masking, the vector value to leave the loop should be 3914 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 3915 // instead of the former. For an inloop reduction the reduction will already 3916 // be predicated, and does not need to be handled here. 3917 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 3918 for (unsigned Part = 0; Part < UF; ++Part) { 3919 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 3920 Value *Sel = nullptr; 3921 for (User *U : VecLoopExitInst->users()) { 3922 if (isa<SelectInst>(U)) { 3923 assert(!Sel && "Reduction exit feeding two selects"); 3924 Sel = U; 3925 } else 3926 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 3927 } 3928 assert(Sel && "Reduction exit feeds no select"); 3929 State.reset(LoopExitInstDef, Sel, Part); 3930 3931 // If the target can create a predicated operator for the reduction at no 3932 // extra cost in the loop (for example a predicated vadd), it can be 3933 // cheaper for the select to remain in the loop than be sunk out of it, 3934 // and so use the select value for the phi instead of the old 3935 // LoopExitValue. 3936 if (PreferPredicatedReductionSelect || 3937 TTI->preferPredicatedReductionSelect( 3938 RdxDesc.getOpcode(), PhiTy, 3939 TargetTransformInfo::ReductionFlags())) { 3940 auto *VecRdxPhi = 3941 cast<PHINode>(State.get(PhiR, Part)); 3942 VecRdxPhi->setIncomingValueForBlock(VectorLoopLatch, Sel); 3943 } 3944 } 3945 } 3946 3947 // If the vector reduction can be performed in a smaller type, we truncate 3948 // then extend the loop exit value to enable InstCombine to evaluate the 3949 // entire expression in the smaller type. 3950 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 3951 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 3952 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3953 Builder.SetInsertPoint(VectorLoopLatch->getTerminator()); 3954 VectorParts RdxParts(UF); 3955 for (unsigned Part = 0; Part < UF; ++Part) { 3956 RdxParts[Part] = State.get(LoopExitInstDef, Part); 3957 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3958 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3959 : Builder.CreateZExt(Trunc, VecTy); 3960 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 3961 if (U != Trunc) { 3962 U->replaceUsesOfWith(RdxParts[Part], Extnd); 3963 RdxParts[Part] = Extnd; 3964 } 3965 } 3966 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3967 for (unsigned Part = 0; Part < UF; ++Part) { 3968 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3969 State.reset(LoopExitInstDef, RdxParts[Part], Part); 3970 } 3971 } 3972 3973 // Reduce all of the unrolled parts into a single vector. 3974 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 3975 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 3976 3977 // The middle block terminator has already been assigned a DebugLoc here (the 3978 // OrigLoop's single latch terminator). We want the whole middle block to 3979 // appear to execute on this line because: (a) it is all compiler generated, 3980 // (b) these instructions are always executed after evaluating the latch 3981 // conditional branch, and (c) other passes may add new predecessors which 3982 // terminate on this line. This is the easiest way to ensure we don't 3983 // accidentally cause an extra step back into the loop while debugging. 3984 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 3985 if (PhiR->isOrdered()) 3986 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 3987 else { 3988 // Floating-point operations should have some FMF to enable the reduction. 3989 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 3990 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 3991 for (unsigned Part = 1; Part < UF; ++Part) { 3992 Value *RdxPart = State.get(LoopExitInstDef, Part); 3993 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 3994 ReducedPartRdx = Builder.CreateBinOp( 3995 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 3996 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 3997 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 3998 ReducedPartRdx, RdxPart); 3999 else 4000 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4001 } 4002 } 4003 4004 // Create the reduction after the loop. Note that inloop reductions create the 4005 // target reduction in the loop using a Reduction recipe. 4006 if (VF.isVector() && !PhiR->isInLoop()) { 4007 ReducedPartRdx = 4008 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 4009 // If the reduction can be performed in a smaller type, we need to extend 4010 // the reduction to the wider type before we branch to the original loop. 4011 if (PhiTy != RdxDesc.getRecurrenceType()) 4012 ReducedPartRdx = RdxDesc.isSigned() 4013 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4014 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4015 } 4016 4017 PHINode *ResumePhi = 4018 dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue()); 4019 4020 // Create a phi node that merges control-flow from the backedge-taken check 4021 // block and the middle block. 4022 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4023 LoopScalarPreHeader->getTerminator()); 4024 4025 // If we are fixing reductions in the epilogue loop then we should already 4026 // have created a bc.merge.rdx Phi after the main vector body. Ensure that 4027 // we carry over the incoming values correctly. 4028 for (auto *Incoming : predecessors(LoopScalarPreHeader)) { 4029 if (Incoming == LoopMiddleBlock) 4030 BCBlockPhi->addIncoming(ReducedPartRdx, Incoming); 4031 else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming)) 4032 BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming), 4033 Incoming); 4034 else 4035 BCBlockPhi->addIncoming(ReductionStartValue, Incoming); 4036 } 4037 4038 // Set the resume value for this reduction 4039 ReductionResumeValues.insert({&RdxDesc, BCBlockPhi}); 4040 4041 // Now, we need to fix the users of the reduction variable 4042 // inside and outside of the scalar remainder loop. 4043 4044 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4045 // in the exit blocks. See comment on analogous loop in 4046 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4047 if (!Cost->requiresScalarEpilogue(VF)) 4048 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4049 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) 4050 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4051 4052 // Fix the scalar loop reduction variable with the incoming reduction sum 4053 // from the vector body and from the backedge value. 4054 int IncomingEdgeBlockIdx = 4055 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4056 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4057 // Pick the other block. 4058 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4059 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4060 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4061 } 4062 4063 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4064 VPTransformState &State) { 4065 RecurKind RK = RdxDesc.getRecurrenceKind(); 4066 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4067 return; 4068 4069 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4070 assert(LoopExitInstr && "null loop exit instruction"); 4071 SmallVector<Instruction *, 8> Worklist; 4072 SmallPtrSet<Instruction *, 8> Visited; 4073 Worklist.push_back(LoopExitInstr); 4074 Visited.insert(LoopExitInstr); 4075 4076 while (!Worklist.empty()) { 4077 Instruction *Cur = Worklist.pop_back_val(); 4078 if (isa<OverflowingBinaryOperator>(Cur)) 4079 for (unsigned Part = 0; Part < UF; ++Part) { 4080 // FIXME: Should not rely on getVPValue at this point. 4081 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4082 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4083 } 4084 4085 for (User *U : Cur->users()) { 4086 Instruction *UI = cast<Instruction>(U); 4087 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4088 Visited.insert(UI).second) 4089 Worklist.push_back(UI); 4090 } 4091 } 4092 } 4093 4094 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4095 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4096 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4097 // Some phis were already hand updated by the reduction and recurrence 4098 // code above, leave them alone. 4099 continue; 4100 4101 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4102 // Non-instruction incoming values will have only one value. 4103 4104 VPLane Lane = VPLane::getFirstLane(); 4105 if (isa<Instruction>(IncomingValue) && 4106 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4107 VF)) 4108 Lane = VPLane::getLastLaneForVF(VF); 4109 4110 // Can be a loop invariant incoming value or the last scalar value to be 4111 // extracted from the vectorized loop. 4112 // FIXME: Should not rely on getVPValue at this point. 4113 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4114 Value *lastIncomingValue = 4115 OrigLoop->isLoopInvariant(IncomingValue) 4116 ? IncomingValue 4117 : State.get(State.Plan->getVPValue(IncomingValue, true), 4118 VPIteration(UF - 1, Lane)); 4119 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4120 } 4121 } 4122 4123 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4124 // The basic block and loop containing the predicated instruction. 4125 auto *PredBB = PredInst->getParent(); 4126 auto *VectorLoop = LI->getLoopFor(PredBB); 4127 4128 // Initialize a worklist with the operands of the predicated instruction. 4129 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4130 4131 // Holds instructions that we need to analyze again. An instruction may be 4132 // reanalyzed if we don't yet know if we can sink it or not. 4133 SmallVector<Instruction *, 8> InstsToReanalyze; 4134 4135 // Returns true if a given use occurs in the predicated block. Phi nodes use 4136 // their operands in their corresponding predecessor blocks. 4137 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4138 auto *I = cast<Instruction>(U.getUser()); 4139 BasicBlock *BB = I->getParent(); 4140 if (auto *Phi = dyn_cast<PHINode>(I)) 4141 BB = Phi->getIncomingBlock( 4142 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4143 return BB == PredBB; 4144 }; 4145 4146 // Iteratively sink the scalarized operands of the predicated instruction 4147 // into the block we created for it. When an instruction is sunk, it's 4148 // operands are then added to the worklist. The algorithm ends after one pass 4149 // through the worklist doesn't sink a single instruction. 4150 bool Changed; 4151 do { 4152 // Add the instructions that need to be reanalyzed to the worklist, and 4153 // reset the changed indicator. 4154 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4155 InstsToReanalyze.clear(); 4156 Changed = false; 4157 4158 while (!Worklist.empty()) { 4159 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4160 4161 // We can't sink an instruction if it is a phi node, is not in the loop, 4162 // or may have side effects. 4163 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4164 I->mayHaveSideEffects()) 4165 continue; 4166 4167 // If the instruction is already in PredBB, check if we can sink its 4168 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4169 // sinking the scalar instruction I, hence it appears in PredBB; but it 4170 // may have failed to sink I's operands (recursively), which we try 4171 // (again) here. 4172 if (I->getParent() == PredBB) { 4173 Worklist.insert(I->op_begin(), I->op_end()); 4174 continue; 4175 } 4176 4177 // It's legal to sink the instruction if all its uses occur in the 4178 // predicated block. Otherwise, there's nothing to do yet, and we may 4179 // need to reanalyze the instruction. 4180 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4181 InstsToReanalyze.push_back(I); 4182 continue; 4183 } 4184 4185 // Move the instruction to the beginning of the predicated block, and add 4186 // it's operands to the worklist. 4187 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4188 Worklist.insert(I->op_begin(), I->op_end()); 4189 4190 // The sinking may have enabled other instructions to be sunk, so we will 4191 // need to iterate. 4192 Changed = true; 4193 } 4194 } while (Changed); 4195 } 4196 4197 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4198 for (PHINode *OrigPhi : OrigPHIsToFix) { 4199 VPWidenPHIRecipe *VPPhi = 4200 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4201 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4202 // Make sure the builder has a valid insert point. 4203 Builder.SetInsertPoint(NewPhi); 4204 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4205 VPValue *Inc = VPPhi->getIncomingValue(i); 4206 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4207 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4208 } 4209 } 4210 } 4211 4212 bool InnerLoopVectorizer::useOrderedReductions( 4213 const RecurrenceDescriptor &RdxDesc) { 4214 return Cost->useOrderedReductions(RdxDesc); 4215 } 4216 4217 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4218 VPWidenPHIRecipe *PhiR, 4219 VPTransformState &State) { 4220 assert(EnableVPlanNativePath && 4221 "Non-native vplans are not expected to have VPWidenPHIRecipes."); 4222 // Currently we enter here in the VPlan-native path for non-induction 4223 // PHIs where all control flow is uniform. We simply widen these PHIs. 4224 // Create a vector phi with no operands - the vector phi operands will be 4225 // set at the end of vector code generation. 4226 Type *VecTy = (State.VF.isScalar()) 4227 ? PN->getType() 4228 : VectorType::get(PN->getType(), State.VF); 4229 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4230 State.set(PhiR, VecPhi, 0); 4231 OrigPHIsToFix.push_back(cast<PHINode>(PN)); 4232 } 4233 4234 /// A helper function for checking whether an integer division-related 4235 /// instruction may divide by zero (in which case it must be predicated if 4236 /// executed conditionally in the scalar code). 4237 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4238 /// Non-zero divisors that are non compile-time constants will not be 4239 /// converted into multiplication, so we will still end up scalarizing 4240 /// the division, but can do so w/o predication. 4241 static bool mayDivideByZero(Instruction &I) { 4242 assert((I.getOpcode() == Instruction::UDiv || 4243 I.getOpcode() == Instruction::SDiv || 4244 I.getOpcode() == Instruction::URem || 4245 I.getOpcode() == Instruction::SRem) && 4246 "Unexpected instruction"); 4247 Value *Divisor = I.getOperand(1); 4248 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4249 return !CInt || CInt->isZero(); 4250 } 4251 4252 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4253 VPUser &ArgOperands, 4254 VPTransformState &State) { 4255 assert(!isa<DbgInfoIntrinsic>(I) && 4256 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4257 setDebugLocFromInst(&I); 4258 4259 Module *M = I.getParent()->getParent()->getParent(); 4260 auto *CI = cast<CallInst>(&I); 4261 4262 SmallVector<Type *, 4> Tys; 4263 for (Value *ArgOperand : CI->args()) 4264 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4265 4266 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4267 4268 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4269 // version of the instruction. 4270 // Is it beneficial to perform intrinsic call compared to lib call? 4271 bool NeedToScalarize = false; 4272 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4273 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4274 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4275 assert((UseVectorIntrinsic || !NeedToScalarize) && 4276 "Instruction should be scalarized elsewhere."); 4277 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4278 "Either the intrinsic cost or vector call cost must be valid"); 4279 4280 for (unsigned Part = 0; Part < UF; ++Part) { 4281 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 4282 SmallVector<Value *, 4> Args; 4283 for (auto &I : enumerate(ArgOperands.operands())) { 4284 // Some intrinsics have a scalar argument - don't replace it with a 4285 // vector. 4286 Value *Arg; 4287 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4288 Arg = State.get(I.value(), Part); 4289 else { 4290 Arg = State.get(I.value(), VPIteration(0, 0)); 4291 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 4292 TysForDecl.push_back(Arg->getType()); 4293 } 4294 Args.push_back(Arg); 4295 } 4296 4297 Function *VectorF; 4298 if (UseVectorIntrinsic) { 4299 // Use vector version of the intrinsic. 4300 if (VF.isVector()) 4301 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4302 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4303 assert(VectorF && "Can't retrieve vector intrinsic."); 4304 } else { 4305 // Use vector version of the function call. 4306 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4307 #ifndef NDEBUG 4308 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4309 "Can't create vector function."); 4310 #endif 4311 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4312 } 4313 SmallVector<OperandBundleDef, 1> OpBundles; 4314 CI->getOperandBundlesAsDefs(OpBundles); 4315 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4316 4317 if (isa<FPMathOperator>(V)) 4318 V->copyFastMathFlags(CI); 4319 4320 State.set(Def, V, Part); 4321 addMetadata(V, &I); 4322 } 4323 } 4324 4325 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4326 // We should not collect Scalars more than once per VF. Right now, this 4327 // function is called from collectUniformsAndScalars(), which already does 4328 // this check. Collecting Scalars for VF=1 does not make any sense. 4329 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4330 "This function should not be visited twice for the same VF"); 4331 4332 // This avoids any chances of creating a REPLICATE recipe during planning 4333 // since that would result in generation of scalarized code during execution, 4334 // which is not supported for scalable vectors. 4335 if (VF.isScalable()) { 4336 Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4337 return; 4338 } 4339 4340 SmallSetVector<Instruction *, 8> Worklist; 4341 4342 // These sets are used to seed the analysis with pointers used by memory 4343 // accesses that will remain scalar. 4344 SmallSetVector<Instruction *, 8> ScalarPtrs; 4345 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4346 auto *Latch = TheLoop->getLoopLatch(); 4347 4348 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4349 // The pointer operands of loads and stores will be scalar as long as the 4350 // memory access is not a gather or scatter operation. The value operand of a 4351 // store will remain scalar if the store is scalarized. 4352 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4353 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4354 assert(WideningDecision != CM_Unknown && 4355 "Widening decision should be ready at this moment"); 4356 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4357 if (Ptr == Store->getValueOperand()) 4358 return WideningDecision == CM_Scalarize; 4359 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4360 "Ptr is neither a value or pointer operand"); 4361 return WideningDecision != CM_GatherScatter; 4362 }; 4363 4364 // A helper that returns true if the given value is a bitcast or 4365 // getelementptr instruction contained in the loop. 4366 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4367 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4368 isa<GetElementPtrInst>(V)) && 4369 !TheLoop->isLoopInvariant(V); 4370 }; 4371 4372 // A helper that evaluates a memory access's use of a pointer. If the use will 4373 // be a scalar use and the pointer is only used by memory accesses, we place 4374 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4375 // PossibleNonScalarPtrs. 4376 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4377 // We only care about bitcast and getelementptr instructions contained in 4378 // the loop. 4379 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4380 return; 4381 4382 // If the pointer has already been identified as scalar (e.g., if it was 4383 // also identified as uniform), there's nothing to do. 4384 auto *I = cast<Instruction>(Ptr); 4385 if (Worklist.count(I)) 4386 return; 4387 4388 // If the use of the pointer will be a scalar use, and all users of the 4389 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4390 // place the pointer in PossibleNonScalarPtrs. 4391 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4392 return isa<LoadInst>(U) || isa<StoreInst>(U); 4393 })) 4394 ScalarPtrs.insert(I); 4395 else 4396 PossibleNonScalarPtrs.insert(I); 4397 }; 4398 4399 // We seed the scalars analysis with three classes of instructions: (1) 4400 // instructions marked uniform-after-vectorization and (2) bitcast, 4401 // getelementptr and (pointer) phi instructions used by memory accesses 4402 // requiring a scalar use. 4403 // 4404 // (1) Add to the worklist all instructions that have been identified as 4405 // uniform-after-vectorization. 4406 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4407 4408 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4409 // memory accesses requiring a scalar use. The pointer operands of loads and 4410 // stores will be scalar as long as the memory accesses is not a gather or 4411 // scatter operation. The value operand of a store will remain scalar if the 4412 // store is scalarized. 4413 for (auto *BB : TheLoop->blocks()) 4414 for (auto &I : *BB) { 4415 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4416 evaluatePtrUse(Load, Load->getPointerOperand()); 4417 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4418 evaluatePtrUse(Store, Store->getPointerOperand()); 4419 evaluatePtrUse(Store, Store->getValueOperand()); 4420 } 4421 } 4422 for (auto *I : ScalarPtrs) 4423 if (!PossibleNonScalarPtrs.count(I)) { 4424 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4425 Worklist.insert(I); 4426 } 4427 4428 // Insert the forced scalars. 4429 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4430 // induction variable when the PHI user is scalarized. 4431 auto ForcedScalar = ForcedScalars.find(VF); 4432 if (ForcedScalar != ForcedScalars.end()) 4433 for (auto *I : ForcedScalar->second) 4434 Worklist.insert(I); 4435 4436 // Expand the worklist by looking through any bitcasts and getelementptr 4437 // instructions we've already identified as scalar. This is similar to the 4438 // expansion step in collectLoopUniforms(); however, here we're only 4439 // expanding to include additional bitcasts and getelementptr instructions. 4440 unsigned Idx = 0; 4441 while (Idx != Worklist.size()) { 4442 Instruction *Dst = Worklist[Idx++]; 4443 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4444 continue; 4445 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4446 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4447 auto *J = cast<Instruction>(U); 4448 return !TheLoop->contains(J) || Worklist.count(J) || 4449 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4450 isScalarUse(J, Src)); 4451 })) { 4452 Worklist.insert(Src); 4453 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4454 } 4455 } 4456 4457 // An induction variable will remain scalar if all users of the induction 4458 // variable and induction variable update remain scalar. 4459 for (auto &Induction : Legal->getInductionVars()) { 4460 auto *Ind = Induction.first; 4461 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4462 4463 // If tail-folding is applied, the primary induction variable will be used 4464 // to feed a vector compare. 4465 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 4466 continue; 4467 4468 // Returns true if \p Indvar is a pointer induction that is used directly by 4469 // load/store instruction \p I. 4470 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar, 4471 Instruction *I) { 4472 return Induction.second.getKind() == 4473 InductionDescriptor::IK_PtrInduction && 4474 (isa<LoadInst>(I) || isa<StoreInst>(I)) && 4475 Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar); 4476 }; 4477 4478 // Determine if all users of the induction variable are scalar after 4479 // vectorization. 4480 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4481 auto *I = cast<Instruction>(U); 4482 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4483 IsDirectLoadStoreFromPtrIndvar(Ind, I); 4484 }); 4485 if (!ScalarInd) 4486 continue; 4487 4488 // Determine if all users of the induction variable update instruction are 4489 // scalar after vectorization. 4490 auto ScalarIndUpdate = 4491 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4492 auto *I = cast<Instruction>(U); 4493 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4494 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I); 4495 }); 4496 if (!ScalarIndUpdate) 4497 continue; 4498 4499 // The induction variable and its update instruction will remain scalar. 4500 Worklist.insert(Ind); 4501 Worklist.insert(IndUpdate); 4502 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4503 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4504 << "\n"); 4505 } 4506 4507 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4508 } 4509 4510 bool LoopVectorizationCostModel::isScalarWithPredication( 4511 Instruction *I, ElementCount VF) const { 4512 if (!blockNeedsPredicationForAnyReason(I->getParent())) 4513 return false; 4514 switch(I->getOpcode()) { 4515 default: 4516 break; 4517 case Instruction::Load: 4518 case Instruction::Store: { 4519 if (!Legal->isMaskRequired(I)) 4520 return false; 4521 auto *Ptr = getLoadStorePointerOperand(I); 4522 auto *Ty = getLoadStoreType(I); 4523 Type *VTy = Ty; 4524 if (VF.isVector()) 4525 VTy = VectorType::get(Ty, VF); 4526 const Align Alignment = getLoadStoreAlignment(I); 4527 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4528 TTI.isLegalMaskedGather(VTy, Alignment)) 4529 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4530 TTI.isLegalMaskedScatter(VTy, Alignment)); 4531 } 4532 case Instruction::UDiv: 4533 case Instruction::SDiv: 4534 case Instruction::SRem: 4535 case Instruction::URem: 4536 return mayDivideByZero(*I); 4537 } 4538 return false; 4539 } 4540 4541 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 4542 Instruction *I, ElementCount VF) { 4543 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4544 assert(getWideningDecision(I, VF) == CM_Unknown && 4545 "Decision should not be set yet."); 4546 auto *Group = getInterleavedAccessGroup(I); 4547 assert(Group && "Must have a group."); 4548 4549 // If the instruction's allocated size doesn't equal it's type size, it 4550 // requires padding and will be scalarized. 4551 auto &DL = I->getModule()->getDataLayout(); 4552 auto *ScalarTy = getLoadStoreType(I); 4553 if (hasIrregularType(ScalarTy, DL)) 4554 return false; 4555 4556 // Check if masking is required. 4557 // A Group may need masking for one of two reasons: it resides in a block that 4558 // needs predication, or it was decided to use masking to deal with gaps 4559 // (either a gap at the end of a load-access that may result in a speculative 4560 // load, or any gaps in a store-access). 4561 bool PredicatedAccessRequiresMasking = 4562 blockNeedsPredicationForAnyReason(I->getParent()) && 4563 Legal->isMaskRequired(I); 4564 bool LoadAccessWithGapsRequiresEpilogMasking = 4565 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 4566 !isScalarEpilogueAllowed(); 4567 bool StoreAccessWithGapsRequiresMasking = 4568 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 4569 if (!PredicatedAccessRequiresMasking && 4570 !LoadAccessWithGapsRequiresEpilogMasking && 4571 !StoreAccessWithGapsRequiresMasking) 4572 return true; 4573 4574 // If masked interleaving is required, we expect that the user/target had 4575 // enabled it, because otherwise it either wouldn't have been created or 4576 // it should have been invalidated by the CostModel. 4577 assert(useMaskedInterleavedAccesses(TTI) && 4578 "Masked interleave-groups for predicated accesses are not enabled."); 4579 4580 if (Group->isReverse()) 4581 return false; 4582 4583 auto *Ty = getLoadStoreType(I); 4584 const Align Alignment = getLoadStoreAlignment(I); 4585 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 4586 : TTI.isLegalMaskedStore(Ty, Alignment); 4587 } 4588 4589 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 4590 Instruction *I, ElementCount VF) { 4591 // Get and ensure we have a valid memory instruction. 4592 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 4593 4594 auto *Ptr = getLoadStorePointerOperand(I); 4595 auto *ScalarTy = getLoadStoreType(I); 4596 4597 // In order to be widened, the pointer should be consecutive, first of all. 4598 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 4599 return false; 4600 4601 // If the instruction is a store located in a predicated block, it will be 4602 // scalarized. 4603 if (isScalarWithPredication(I, VF)) 4604 return false; 4605 4606 // If the instruction's allocated size doesn't equal it's type size, it 4607 // requires padding and will be scalarized. 4608 auto &DL = I->getModule()->getDataLayout(); 4609 if (hasIrregularType(ScalarTy, DL)) 4610 return false; 4611 4612 return true; 4613 } 4614 4615 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 4616 // We should not collect Uniforms more than once per VF. Right now, 4617 // this function is called from collectUniformsAndScalars(), which 4618 // already does this check. Collecting Uniforms for VF=1 does not make any 4619 // sense. 4620 4621 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 4622 "This function should not be visited twice for the same VF"); 4623 4624 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4625 // not analyze again. Uniforms.count(VF) will return 1. 4626 Uniforms[VF].clear(); 4627 4628 // We now know that the loop is vectorizable! 4629 // Collect instructions inside the loop that will remain uniform after 4630 // vectorization. 4631 4632 // Global values, params and instructions outside of current loop are out of 4633 // scope. 4634 auto isOutOfScope = [&](Value *V) -> bool { 4635 Instruction *I = dyn_cast<Instruction>(V); 4636 return (!I || !TheLoop->contains(I)); 4637 }; 4638 4639 // Worklist containing uniform instructions demanding lane 0. 4640 SetVector<Instruction *> Worklist; 4641 BasicBlock *Latch = TheLoop->getLoopLatch(); 4642 4643 // Add uniform instructions demanding lane 0 to the worklist. Instructions 4644 // that are scalar with predication must not be considered uniform after 4645 // vectorization, because that would create an erroneous replicating region 4646 // where only a single instance out of VF should be formed. 4647 // TODO: optimize such seldom cases if found important, see PR40816. 4648 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 4649 if (isOutOfScope(I)) { 4650 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 4651 << *I << "\n"); 4652 return; 4653 } 4654 if (isScalarWithPredication(I, VF)) { 4655 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 4656 << *I << "\n"); 4657 return; 4658 } 4659 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 4660 Worklist.insert(I); 4661 }; 4662 4663 // Start with the conditional branch. If the branch condition is an 4664 // instruction contained in the loop that is only used by the branch, it is 4665 // uniform. 4666 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4667 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 4668 addToWorklistIfAllowed(Cmp); 4669 4670 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 4671 InstWidening WideningDecision = getWideningDecision(I, VF); 4672 assert(WideningDecision != CM_Unknown && 4673 "Widening decision should be ready at this moment"); 4674 4675 // A uniform memory op is itself uniform. We exclude uniform stores 4676 // here as they demand the last lane, not the first one. 4677 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 4678 assert(WideningDecision == CM_Scalarize); 4679 return true; 4680 } 4681 4682 return (WideningDecision == CM_Widen || 4683 WideningDecision == CM_Widen_Reverse || 4684 WideningDecision == CM_Interleave); 4685 }; 4686 4687 4688 // Returns true if Ptr is the pointer operand of a memory access instruction 4689 // I, and I is known to not require scalarization. 4690 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4691 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4692 }; 4693 4694 // Holds a list of values which are known to have at least one uniform use. 4695 // Note that there may be other uses which aren't uniform. A "uniform use" 4696 // here is something which only demands lane 0 of the unrolled iterations; 4697 // it does not imply that all lanes produce the same value (e.g. this is not 4698 // the usual meaning of uniform) 4699 SetVector<Value *> HasUniformUse; 4700 4701 // Scan the loop for instructions which are either a) known to have only 4702 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 4703 for (auto *BB : TheLoop->blocks()) 4704 for (auto &I : *BB) { 4705 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 4706 switch (II->getIntrinsicID()) { 4707 case Intrinsic::sideeffect: 4708 case Intrinsic::experimental_noalias_scope_decl: 4709 case Intrinsic::assume: 4710 case Intrinsic::lifetime_start: 4711 case Intrinsic::lifetime_end: 4712 if (TheLoop->hasLoopInvariantOperands(&I)) 4713 addToWorklistIfAllowed(&I); 4714 break; 4715 default: 4716 break; 4717 } 4718 } 4719 4720 // ExtractValue instructions must be uniform, because the operands are 4721 // known to be loop-invariant. 4722 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 4723 assert(isOutOfScope(EVI->getAggregateOperand()) && 4724 "Expected aggregate value to be loop invariant"); 4725 addToWorklistIfAllowed(EVI); 4726 continue; 4727 } 4728 4729 // If there's no pointer operand, there's nothing to do. 4730 auto *Ptr = getLoadStorePointerOperand(&I); 4731 if (!Ptr) 4732 continue; 4733 4734 // A uniform memory op is itself uniform. We exclude uniform stores 4735 // here as they demand the last lane, not the first one. 4736 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 4737 addToWorklistIfAllowed(&I); 4738 4739 if (isUniformDecision(&I, VF)) { 4740 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 4741 HasUniformUse.insert(Ptr); 4742 } 4743 } 4744 4745 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 4746 // demanding) users. Since loops are assumed to be in LCSSA form, this 4747 // disallows uses outside the loop as well. 4748 for (auto *V : HasUniformUse) { 4749 if (isOutOfScope(V)) 4750 continue; 4751 auto *I = cast<Instruction>(V); 4752 auto UsersAreMemAccesses = 4753 llvm::all_of(I->users(), [&](User *U) -> bool { 4754 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 4755 }); 4756 if (UsersAreMemAccesses) 4757 addToWorklistIfAllowed(I); 4758 } 4759 4760 // Expand Worklist in topological order: whenever a new instruction 4761 // is added , its users should be already inside Worklist. It ensures 4762 // a uniform instruction will only be used by uniform instructions. 4763 unsigned idx = 0; 4764 while (idx != Worklist.size()) { 4765 Instruction *I = Worklist[idx++]; 4766 4767 for (auto OV : I->operand_values()) { 4768 // isOutOfScope operands cannot be uniform instructions. 4769 if (isOutOfScope(OV)) 4770 continue; 4771 // First order recurrence Phi's should typically be considered 4772 // non-uniform. 4773 auto *OP = dyn_cast<PHINode>(OV); 4774 if (OP && Legal->isFirstOrderRecurrence(OP)) 4775 continue; 4776 // If all the users of the operand are uniform, then add the 4777 // operand into the uniform worklist. 4778 auto *OI = cast<Instruction>(OV); 4779 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4780 auto *J = cast<Instruction>(U); 4781 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 4782 })) 4783 addToWorklistIfAllowed(OI); 4784 } 4785 } 4786 4787 // For an instruction to be added into Worklist above, all its users inside 4788 // the loop should also be in Worklist. However, this condition cannot be 4789 // true for phi nodes that form a cyclic dependence. We must process phi 4790 // nodes separately. An induction variable will remain uniform if all users 4791 // of the induction variable and induction variable update remain uniform. 4792 // The code below handles both pointer and non-pointer induction variables. 4793 for (auto &Induction : Legal->getInductionVars()) { 4794 auto *Ind = Induction.first; 4795 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4796 4797 // Determine if all users of the induction variable are uniform after 4798 // vectorization. 4799 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4800 auto *I = cast<Instruction>(U); 4801 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4802 isVectorizedMemAccessUse(I, Ind); 4803 }); 4804 if (!UniformInd) 4805 continue; 4806 4807 // Determine if all users of the induction variable update instruction are 4808 // uniform after vectorization. 4809 auto UniformIndUpdate = 4810 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4811 auto *I = cast<Instruction>(U); 4812 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4813 isVectorizedMemAccessUse(I, IndUpdate); 4814 }); 4815 if (!UniformIndUpdate) 4816 continue; 4817 4818 // The induction variable and its update instruction will remain uniform. 4819 addToWorklistIfAllowed(Ind); 4820 addToWorklistIfAllowed(IndUpdate); 4821 } 4822 4823 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4824 } 4825 4826 bool LoopVectorizationCostModel::runtimeChecksRequired() { 4827 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 4828 4829 if (Legal->getRuntimePointerChecking()->Need) { 4830 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 4831 "runtime pointer checks needed. Enable vectorization of this " 4832 "loop with '#pragma clang loop vectorize(enable)' when " 4833 "compiling with -Os/-Oz", 4834 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4835 return true; 4836 } 4837 4838 if (!PSE.getPredicate().isAlwaysTrue()) { 4839 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 4840 "runtime SCEV checks needed. Enable vectorization of this " 4841 "loop with '#pragma clang loop vectorize(enable)' when " 4842 "compiling with -Os/-Oz", 4843 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4844 return true; 4845 } 4846 4847 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4848 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4849 reportVectorizationFailure("Runtime stride check for small trip count", 4850 "runtime stride == 1 checks needed. Enable vectorization of " 4851 "this loop without such check by compiling with -Os/-Oz", 4852 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4853 return true; 4854 } 4855 4856 return false; 4857 } 4858 4859 ElementCount 4860 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 4861 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 4862 return ElementCount::getScalable(0); 4863 4864 if (Hints->isScalableVectorizationDisabled()) { 4865 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 4866 "ScalableVectorizationDisabled", ORE, TheLoop); 4867 return ElementCount::getScalable(0); 4868 } 4869 4870 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 4871 4872 auto MaxScalableVF = ElementCount::getScalable( 4873 std::numeric_limits<ElementCount::ScalarTy>::max()); 4874 4875 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 4876 // FIXME: While for scalable vectors this is currently sufficient, this should 4877 // be replaced by a more detailed mechanism that filters out specific VFs, 4878 // instead of invalidating vectorization for a whole set of VFs based on the 4879 // MaxVF. 4880 4881 // Disable scalable vectorization if the loop contains unsupported reductions. 4882 if (!canVectorizeReductions(MaxScalableVF)) { 4883 reportVectorizationInfo( 4884 "Scalable vectorization not supported for the reduction " 4885 "operations found in this loop.", 4886 "ScalableVFUnfeasible", ORE, TheLoop); 4887 return ElementCount::getScalable(0); 4888 } 4889 4890 // Disable scalable vectorization if the loop contains any instructions 4891 // with element types not supported for scalable vectors. 4892 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 4893 return !Ty->isVoidTy() && 4894 !this->TTI.isElementTypeLegalForScalableVector(Ty); 4895 })) { 4896 reportVectorizationInfo("Scalable vectorization is not supported " 4897 "for all element types found in this loop.", 4898 "ScalableVFUnfeasible", ORE, TheLoop); 4899 return ElementCount::getScalable(0); 4900 } 4901 4902 if (Legal->isSafeForAnyVectorWidth()) 4903 return MaxScalableVF; 4904 4905 // Limit MaxScalableVF by the maximum safe dependence distance. 4906 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 4907 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) 4908 MaxVScale = 4909 TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax(); 4910 MaxScalableVF = ElementCount::getScalable( 4911 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 4912 if (!MaxScalableVF) 4913 reportVectorizationInfo( 4914 "Max legal vector width too small, scalable vectorization " 4915 "unfeasible.", 4916 "ScalableVFUnfeasible", ORE, TheLoop); 4917 4918 return MaxScalableVF; 4919 } 4920 4921 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF( 4922 unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) { 4923 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4924 unsigned SmallestType, WidestType; 4925 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 4926 4927 // Get the maximum safe dependence distance in bits computed by LAA. 4928 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 4929 // the memory accesses that is most restrictive (involved in the smallest 4930 // dependence distance). 4931 unsigned MaxSafeElements = 4932 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 4933 4934 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 4935 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 4936 4937 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 4938 << ".\n"); 4939 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 4940 << ".\n"); 4941 4942 // First analyze the UserVF, fall back if the UserVF should be ignored. 4943 if (UserVF) { 4944 auto MaxSafeUserVF = 4945 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 4946 4947 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 4948 // If `VF=vscale x N` is safe, then so is `VF=N` 4949 if (UserVF.isScalable()) 4950 return FixedScalableVFPair( 4951 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 4952 else 4953 return UserVF; 4954 } 4955 4956 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 4957 4958 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 4959 // is better to ignore the hint and let the compiler choose a suitable VF. 4960 if (!UserVF.isScalable()) { 4961 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4962 << " is unsafe, clamping to max safe VF=" 4963 << MaxSafeFixedVF << ".\n"); 4964 ORE->emit([&]() { 4965 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4966 TheLoop->getStartLoc(), 4967 TheLoop->getHeader()) 4968 << "User-specified vectorization factor " 4969 << ore::NV("UserVectorizationFactor", UserVF) 4970 << " is unsafe, clamping to maximum safe vectorization factor " 4971 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 4972 }); 4973 return MaxSafeFixedVF; 4974 } 4975 4976 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 4977 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4978 << " is ignored because scalable vectors are not " 4979 "available.\n"); 4980 ORE->emit([&]() { 4981 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4982 TheLoop->getStartLoc(), 4983 TheLoop->getHeader()) 4984 << "User-specified vectorization factor " 4985 << ore::NV("UserVectorizationFactor", UserVF) 4986 << " is ignored because the target does not support scalable " 4987 "vectors. The compiler will pick a more suitable value."; 4988 }); 4989 } else { 4990 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4991 << " is unsafe. Ignoring scalable UserVF.\n"); 4992 ORE->emit([&]() { 4993 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4994 TheLoop->getStartLoc(), 4995 TheLoop->getHeader()) 4996 << "User-specified vectorization factor " 4997 << ore::NV("UserVectorizationFactor", UserVF) 4998 << " is unsafe. Ignoring the hint to let the compiler pick a " 4999 "more suitable value."; 5000 }); 5001 } 5002 } 5003 5004 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5005 << " / " << WidestType << " bits.\n"); 5006 5007 FixedScalableVFPair Result(ElementCount::getFixed(1), 5008 ElementCount::getScalable(0)); 5009 if (auto MaxVF = 5010 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 5011 MaxSafeFixedVF, FoldTailByMasking)) 5012 Result.FixedVF = MaxVF; 5013 5014 if (auto MaxVF = 5015 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 5016 MaxSafeScalableVF, FoldTailByMasking)) 5017 if (MaxVF.isScalable()) { 5018 Result.ScalableVF = MaxVF; 5019 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5020 << "\n"); 5021 } 5022 5023 return Result; 5024 } 5025 5026 FixedScalableVFPair 5027 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5028 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5029 // TODO: It may by useful to do since it's still likely to be dynamically 5030 // uniform if the target can skip. 5031 reportVectorizationFailure( 5032 "Not inserting runtime ptr check for divergent target", 5033 "runtime pointer checks needed. Not enabled for divergent target", 5034 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5035 return FixedScalableVFPair::getNone(); 5036 } 5037 5038 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5039 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5040 if (TC == 1) { 5041 reportVectorizationFailure("Single iteration (non) loop", 5042 "loop trip count is one, irrelevant for vectorization", 5043 "SingleIterationLoop", ORE, TheLoop); 5044 return FixedScalableVFPair::getNone(); 5045 } 5046 5047 switch (ScalarEpilogueStatus) { 5048 case CM_ScalarEpilogueAllowed: 5049 return computeFeasibleMaxVF(TC, UserVF, false); 5050 case CM_ScalarEpilogueNotAllowedUsePredicate: 5051 LLVM_FALLTHROUGH; 5052 case CM_ScalarEpilogueNotNeededUsePredicate: 5053 LLVM_DEBUG( 5054 dbgs() << "LV: vector predicate hint/switch found.\n" 5055 << "LV: Not allowing scalar epilogue, creating predicated " 5056 << "vector loop.\n"); 5057 break; 5058 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5059 // fallthrough as a special case of OptForSize 5060 case CM_ScalarEpilogueNotAllowedOptSize: 5061 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5062 LLVM_DEBUG( 5063 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5064 else 5065 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5066 << "count.\n"); 5067 5068 // Bail if runtime checks are required, which are not good when optimising 5069 // for size. 5070 if (runtimeChecksRequired()) 5071 return FixedScalableVFPair::getNone(); 5072 5073 break; 5074 } 5075 5076 // The only loops we can vectorize without a scalar epilogue, are loops with 5077 // a bottom-test and a single exiting block. We'd have to handle the fact 5078 // that not every instruction executes on the last iteration. This will 5079 // require a lane mask which varies through the vector loop body. (TODO) 5080 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5081 // If there was a tail-folding hint/switch, but we can't fold the tail by 5082 // masking, fallback to a vectorization with a scalar epilogue. 5083 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5084 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5085 "scalar epilogue instead.\n"); 5086 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5087 return computeFeasibleMaxVF(TC, UserVF, false); 5088 } 5089 return FixedScalableVFPair::getNone(); 5090 } 5091 5092 // Now try the tail folding 5093 5094 // Invalidate interleave groups that require an epilogue if we can't mask 5095 // the interleave-group. 5096 if (!useMaskedInterleavedAccesses(TTI)) { 5097 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5098 "No decisions should have been taken at this point"); 5099 // Note: There is no need to invalidate any cost modeling decisions here, as 5100 // non where taken so far. 5101 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5102 } 5103 5104 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true); 5105 // Avoid tail folding if the trip count is known to be a multiple of any VF 5106 // we chose. 5107 // FIXME: The condition below pessimises the case for fixed-width vectors, 5108 // when scalable VFs are also candidates for vectorization. 5109 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5110 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5111 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5112 "MaxFixedVF must be a power of 2"); 5113 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5114 : MaxFixedVF.getFixedValue(); 5115 ScalarEvolution *SE = PSE.getSE(); 5116 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5117 const SCEV *ExitCount = SE->getAddExpr( 5118 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5119 const SCEV *Rem = SE->getURemExpr( 5120 SE->applyLoopGuards(ExitCount, TheLoop), 5121 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5122 if (Rem->isZero()) { 5123 // Accept MaxFixedVF if we do not have a tail. 5124 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5125 return MaxFactors; 5126 } 5127 } 5128 5129 // For scalable vectors don't use tail folding for low trip counts or 5130 // optimizing for code size. We only permit this if the user has explicitly 5131 // requested it. 5132 if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate && 5133 ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate && 5134 MaxFactors.ScalableVF.isVector()) 5135 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5136 5137 // If we don't know the precise trip count, or if the trip count that we 5138 // found modulo the vectorization factor is not zero, try to fold the tail 5139 // by masking. 5140 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5141 if (Legal->prepareToFoldTailByMasking()) { 5142 FoldTailByMasking = true; 5143 return MaxFactors; 5144 } 5145 5146 // If there was a tail-folding hint/switch, but we can't fold the tail by 5147 // masking, fallback to a vectorization with a scalar epilogue. 5148 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5149 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5150 "scalar epilogue instead.\n"); 5151 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5152 return MaxFactors; 5153 } 5154 5155 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5156 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5157 return FixedScalableVFPair::getNone(); 5158 } 5159 5160 if (TC == 0) { 5161 reportVectorizationFailure( 5162 "Unable to calculate the loop count due to complex control flow", 5163 "unable to calculate the loop count due to complex control flow", 5164 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5165 return FixedScalableVFPair::getNone(); 5166 } 5167 5168 reportVectorizationFailure( 5169 "Cannot optimize for size and vectorize at the same time.", 5170 "cannot optimize for size and vectorize at the same time. " 5171 "Enable vectorization of this loop with '#pragma clang loop " 5172 "vectorize(enable)' when compiling with -Os/-Oz", 5173 "NoTailLoopWithOptForSize", ORE, TheLoop); 5174 return FixedScalableVFPair::getNone(); 5175 } 5176 5177 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5178 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5179 const ElementCount &MaxSafeVF, bool FoldTailByMasking) { 5180 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5181 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5182 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5183 : TargetTransformInfo::RGK_FixedWidthVector); 5184 5185 // Convenience function to return the minimum of two ElementCounts. 5186 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5187 assert((LHS.isScalable() == RHS.isScalable()) && 5188 "Scalable flags must match"); 5189 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5190 }; 5191 5192 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5193 // Note that both WidestRegister and WidestType may not be a powers of 2. 5194 auto MaxVectorElementCount = ElementCount::get( 5195 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5196 ComputeScalableMaxVF); 5197 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5198 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5199 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5200 5201 if (!MaxVectorElementCount) { 5202 LLVM_DEBUG(dbgs() << "LV: The target has no " 5203 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5204 << " vector registers.\n"); 5205 return ElementCount::getFixed(1); 5206 } 5207 5208 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5209 if (ConstTripCount && 5210 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5211 (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) { 5212 // If loop trip count (TC) is known at compile time there is no point in 5213 // choosing VF greater than TC (as done in the loop below). Select maximum 5214 // power of two which doesn't exceed TC. 5215 // If MaxVectorElementCount is scalable, we only fall back on a fixed VF 5216 // when the TC is less than or equal to the known number of lanes. 5217 auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount); 5218 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not " 5219 "exceeding the constant trip count: " 5220 << ClampedConstTripCount << "\n"); 5221 return ElementCount::getFixed(ClampedConstTripCount); 5222 } 5223 5224 ElementCount MaxVF = MaxVectorElementCount; 5225 if (TTI.shouldMaximizeVectorBandwidth() || 5226 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5227 auto MaxVectorElementCountMaxBW = ElementCount::get( 5228 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5229 ComputeScalableMaxVF); 5230 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5231 5232 // Collect all viable vectorization factors larger than the default MaxVF 5233 // (i.e. MaxVectorElementCount). 5234 SmallVector<ElementCount, 8> VFs; 5235 for (ElementCount VS = MaxVectorElementCount * 2; 5236 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5237 VFs.push_back(VS); 5238 5239 // For each VF calculate its register usage. 5240 auto RUs = calculateRegisterUsage(VFs); 5241 5242 // Select the largest VF which doesn't require more registers than existing 5243 // ones. 5244 for (int i = RUs.size() - 1; i >= 0; --i) { 5245 bool Selected = true; 5246 for (auto &pair : RUs[i].MaxLocalUsers) { 5247 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5248 if (pair.second > TargetNumRegisters) 5249 Selected = false; 5250 } 5251 if (Selected) { 5252 MaxVF = VFs[i]; 5253 break; 5254 } 5255 } 5256 if (ElementCount MinVF = 5257 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5258 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5259 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5260 << ") with target's minimum: " << MinVF << '\n'); 5261 MaxVF = MinVF; 5262 } 5263 } 5264 } 5265 return MaxVF; 5266 } 5267 5268 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const { 5269 if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) { 5270 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange); 5271 auto Min = Attr.getVScaleRangeMin(); 5272 auto Max = Attr.getVScaleRangeMax(); 5273 if (Max && Min == Max) 5274 return Max; 5275 } 5276 5277 return TTI.getVScaleForTuning(); 5278 } 5279 5280 bool LoopVectorizationCostModel::isMoreProfitable( 5281 const VectorizationFactor &A, const VectorizationFactor &B) const { 5282 InstructionCost CostA = A.Cost; 5283 InstructionCost CostB = B.Cost; 5284 5285 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 5286 5287 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 5288 MaxTripCount) { 5289 // If we are folding the tail and the trip count is a known (possibly small) 5290 // constant, the trip count will be rounded up to an integer number of 5291 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 5292 // which we compare directly. When not folding the tail, the total cost will 5293 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 5294 // approximated with the per-lane cost below instead of using the tripcount 5295 // as here. 5296 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 5297 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 5298 return RTCostA < RTCostB; 5299 } 5300 5301 // Improve estimate for the vector width if it is scalable. 5302 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 5303 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 5304 if (Optional<unsigned> VScale = getVScaleForTuning()) { 5305 if (A.Width.isScalable()) 5306 EstimatedWidthA *= VScale.getValue(); 5307 if (B.Width.isScalable()) 5308 EstimatedWidthB *= VScale.getValue(); 5309 } 5310 5311 // Assume vscale may be larger than 1 (or the value being tuned for), 5312 // so that scalable vectorization is slightly favorable over fixed-width 5313 // vectorization. 5314 if (A.Width.isScalable() && !B.Width.isScalable()) 5315 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 5316 5317 // To avoid the need for FP division: 5318 // (CostA / A.Width) < (CostB / B.Width) 5319 // <=> (CostA * B.Width) < (CostB * A.Width) 5320 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 5321 } 5322 5323 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 5324 const ElementCountSet &VFCandidates) { 5325 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5326 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5327 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5328 assert(VFCandidates.count(ElementCount::getFixed(1)) && 5329 "Expected Scalar VF to be a candidate"); 5330 5331 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 5332 VectorizationFactor ChosenFactor = ScalarCost; 5333 5334 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5335 if (ForceVectorization && VFCandidates.size() > 1) { 5336 // Ignore scalar width, because the user explicitly wants vectorization. 5337 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5338 // evaluation. 5339 ChosenFactor.Cost = InstructionCost::getMax(); 5340 } 5341 5342 SmallVector<InstructionVFPair> InvalidCosts; 5343 for (const auto &i : VFCandidates) { 5344 // The cost for scalar VF=1 is already calculated, so ignore it. 5345 if (i.isScalar()) 5346 continue; 5347 5348 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 5349 VectorizationFactor Candidate(i, C.first); 5350 5351 #ifndef NDEBUG 5352 unsigned AssumedMinimumVscale = 1; 5353 if (Optional<unsigned> VScale = getVScaleForTuning()) 5354 AssumedMinimumVscale = VScale.getValue(); 5355 unsigned Width = 5356 Candidate.Width.isScalable() 5357 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 5358 : Candidate.Width.getFixedValue(); 5359 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5360 << " costs: " << (Candidate.Cost / Width)); 5361 if (i.isScalable()) 5362 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 5363 << AssumedMinimumVscale << ")"); 5364 LLVM_DEBUG(dbgs() << ".\n"); 5365 #endif 5366 5367 if (!C.second && !ForceVectorization) { 5368 LLVM_DEBUG( 5369 dbgs() << "LV: Not considering vector loop of width " << i 5370 << " because it will not generate any vector instructions.\n"); 5371 continue; 5372 } 5373 5374 // If profitable add it to ProfitableVF list. 5375 if (isMoreProfitable(Candidate, ScalarCost)) 5376 ProfitableVFs.push_back(Candidate); 5377 5378 if (isMoreProfitable(Candidate, ChosenFactor)) 5379 ChosenFactor = Candidate; 5380 } 5381 5382 // Emit a report of VFs with invalid costs in the loop. 5383 if (!InvalidCosts.empty()) { 5384 // Group the remarks per instruction, keeping the instruction order from 5385 // InvalidCosts. 5386 std::map<Instruction *, unsigned> Numbering; 5387 unsigned I = 0; 5388 for (auto &Pair : InvalidCosts) 5389 if (!Numbering.count(Pair.first)) 5390 Numbering[Pair.first] = I++; 5391 5392 // Sort the list, first on instruction(number) then on VF. 5393 llvm::sort(InvalidCosts, 5394 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 5395 if (Numbering[A.first] != Numbering[B.first]) 5396 return Numbering[A.first] < Numbering[B.first]; 5397 ElementCountComparator ECC; 5398 return ECC(A.second, B.second); 5399 }); 5400 5401 // For a list of ordered instruction-vf pairs: 5402 // [(load, vf1), (load, vf2), (store, vf1)] 5403 // Group the instructions together to emit separate remarks for: 5404 // load (vf1, vf2) 5405 // store (vf1) 5406 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 5407 auto Subset = ArrayRef<InstructionVFPair>(); 5408 do { 5409 if (Subset.empty()) 5410 Subset = Tail.take_front(1); 5411 5412 Instruction *I = Subset.front().first; 5413 5414 // If the next instruction is different, or if there are no other pairs, 5415 // emit a remark for the collated subset. e.g. 5416 // [(load, vf1), (load, vf2))] 5417 // to emit: 5418 // remark: invalid costs for 'load' at VF=(vf, vf2) 5419 if (Subset == Tail || Tail[Subset.size()].first != I) { 5420 std::string OutString; 5421 raw_string_ostream OS(OutString); 5422 assert(!Subset.empty() && "Unexpected empty range"); 5423 OS << "Instruction with invalid costs prevented vectorization at VF=("; 5424 for (auto &Pair : Subset) 5425 OS << (Pair.second == Subset.front().second ? "" : ", ") 5426 << Pair.second; 5427 OS << "):"; 5428 if (auto *CI = dyn_cast<CallInst>(I)) 5429 OS << " call to " << CI->getCalledFunction()->getName(); 5430 else 5431 OS << " " << I->getOpcodeName(); 5432 OS.flush(); 5433 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 5434 Tail = Tail.drop_front(Subset.size()); 5435 Subset = {}; 5436 } else 5437 // Grow the subset by one element 5438 Subset = Tail.take_front(Subset.size() + 1); 5439 } while (!Tail.empty()); 5440 } 5441 5442 if (!EnableCondStoresVectorization && NumPredStores) { 5443 reportVectorizationFailure("There are conditional stores.", 5444 "store that is conditionally executed prevents vectorization", 5445 "ConditionalStore", ORE, TheLoop); 5446 ChosenFactor = ScalarCost; 5447 } 5448 5449 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 5450 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 5451 << "LV: Vectorization seems to be not beneficial, " 5452 << "but was forced by a user.\n"); 5453 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 5454 return ChosenFactor; 5455 } 5456 5457 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5458 const Loop &L, ElementCount VF) const { 5459 // Cross iteration phis such as reductions need special handling and are 5460 // currently unsupported. 5461 if (any_of(L.getHeader()->phis(), 5462 [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); })) 5463 return false; 5464 5465 // Phis with uses outside of the loop require special handling and are 5466 // currently unsupported. 5467 for (auto &Entry : Legal->getInductionVars()) { 5468 // Look for uses of the value of the induction at the last iteration. 5469 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5470 for (User *U : PostInc->users()) 5471 if (!L.contains(cast<Instruction>(U))) 5472 return false; 5473 // Look for uses of penultimate value of the induction. 5474 for (User *U : Entry.first->users()) 5475 if (!L.contains(cast<Instruction>(U))) 5476 return false; 5477 } 5478 5479 // Induction variables that are widened require special handling that is 5480 // currently not supported. 5481 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5482 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5483 this->isProfitableToScalarize(Entry.first, VF)); 5484 })) 5485 return false; 5486 5487 // Epilogue vectorization code has not been auditted to ensure it handles 5488 // non-latch exits properly. It may be fine, but it needs auditted and 5489 // tested. 5490 if (L.getExitingBlock() != L.getLoopLatch()) 5491 return false; 5492 5493 return true; 5494 } 5495 5496 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5497 const ElementCount VF) const { 5498 // FIXME: We need a much better cost-model to take different parameters such 5499 // as register pressure, code size increase and cost of extra branches into 5500 // account. For now we apply a very crude heuristic and only consider loops 5501 // with vectorization factors larger than a certain value. 5502 // We also consider epilogue vectorization unprofitable for targets that don't 5503 // consider interleaving beneficial (eg. MVE). 5504 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5505 return false; 5506 // FIXME: We should consider changing the threshold for scalable 5507 // vectors to take VScaleForTuning into account. 5508 if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF) 5509 return true; 5510 return false; 5511 } 5512 5513 VectorizationFactor 5514 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5515 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5516 VectorizationFactor Result = VectorizationFactor::Disabled(); 5517 if (!EnableEpilogueVectorization) { 5518 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5519 return Result; 5520 } 5521 5522 if (!isScalarEpilogueAllowed()) { 5523 LLVM_DEBUG( 5524 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5525 "allowed.\n";); 5526 return Result; 5527 } 5528 5529 // Not really a cost consideration, but check for unsupported cases here to 5530 // simplify the logic. 5531 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5532 LLVM_DEBUG( 5533 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5534 "not a supported candidate.\n";); 5535 return Result; 5536 } 5537 5538 if (EpilogueVectorizationForceVF > 1) { 5539 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5540 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 5541 if (LVP.hasPlanWithVF(ForcedEC)) 5542 return {ForcedEC, 0}; 5543 else { 5544 LLVM_DEBUG( 5545 dbgs() 5546 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5547 return Result; 5548 } 5549 } 5550 5551 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5552 TheLoop->getHeader()->getParent()->hasMinSize()) { 5553 LLVM_DEBUG( 5554 dbgs() 5555 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5556 return Result; 5557 } 5558 5559 if (!isEpilogueVectorizationProfitable(MainLoopVF)) { 5560 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 5561 "this loop\n"); 5562 return Result; 5563 } 5564 5565 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know 5566 // the main loop handles 8 lanes per iteration. We could still benefit from 5567 // vectorizing the epilogue loop with VF=4. 5568 ElementCount EstimatedRuntimeVF = MainLoopVF; 5569 if (MainLoopVF.isScalable()) { 5570 EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 5571 if (Optional<unsigned> VScale = getVScaleForTuning()) 5572 EstimatedRuntimeVF *= VScale.getValue(); 5573 } 5574 5575 for (auto &NextVF : ProfitableVFs) 5576 if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() && 5577 ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) || 5578 ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) && 5579 (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) && 5580 LVP.hasPlanWithVF(NextVF.Width)) 5581 Result = NextVF; 5582 5583 if (Result != VectorizationFactor::Disabled()) 5584 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5585 << Result.Width << "\n";); 5586 return Result; 5587 } 5588 5589 std::pair<unsigned, unsigned> 5590 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5591 unsigned MinWidth = -1U; 5592 unsigned MaxWidth = 8; 5593 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5594 // For in-loop reductions, no element types are added to ElementTypesInLoop 5595 // if there are no loads/stores in the loop. In this case, check through the 5596 // reduction variables to determine the maximum width. 5597 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) { 5598 // Reset MaxWidth so that we can find the smallest type used by recurrences 5599 // in the loop. 5600 MaxWidth = -1U; 5601 for (auto &PhiDescriptorPair : Legal->getReductionVars()) { 5602 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second; 5603 // When finding the min width used by the recurrence we need to account 5604 // for casts on the input operands of the recurrence. 5605 MaxWidth = std::min<unsigned>( 5606 MaxWidth, std::min<unsigned>( 5607 RdxDesc.getMinWidthCastToRecurrenceTypeInBits(), 5608 RdxDesc.getRecurrenceType()->getScalarSizeInBits())); 5609 } 5610 } else { 5611 for (Type *T : ElementTypesInLoop) { 5612 MinWidth = std::min<unsigned>( 5613 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5614 MaxWidth = std::max<unsigned>( 5615 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5616 } 5617 } 5618 return {MinWidth, MaxWidth}; 5619 } 5620 5621 void LoopVectorizationCostModel::collectElementTypesForWidening() { 5622 ElementTypesInLoop.clear(); 5623 // For each block. 5624 for (BasicBlock *BB : TheLoop->blocks()) { 5625 // For each instruction in the loop. 5626 for (Instruction &I : BB->instructionsWithoutDebug()) { 5627 Type *T = I.getType(); 5628 5629 // Skip ignored values. 5630 if (ValuesToIgnore.count(&I)) 5631 continue; 5632 5633 // Only examine Loads, Stores and PHINodes. 5634 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5635 continue; 5636 5637 // Examine PHI nodes that are reduction variables. Update the type to 5638 // account for the recurrence type. 5639 if (auto *PN = dyn_cast<PHINode>(&I)) { 5640 if (!Legal->isReductionVariable(PN)) 5641 continue; 5642 const RecurrenceDescriptor &RdxDesc = 5643 Legal->getReductionVars().find(PN)->second; 5644 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 5645 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 5646 RdxDesc.getRecurrenceType(), 5647 TargetTransformInfo::ReductionFlags())) 5648 continue; 5649 T = RdxDesc.getRecurrenceType(); 5650 } 5651 5652 // Examine the stored values. 5653 if (auto *ST = dyn_cast<StoreInst>(&I)) 5654 T = ST->getValueOperand()->getType(); 5655 5656 assert(T->isSized() && 5657 "Expected the load/store/recurrence type to be sized"); 5658 5659 ElementTypesInLoop.insert(T); 5660 } 5661 } 5662 } 5663 5664 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 5665 unsigned LoopCost) { 5666 // -- The interleave heuristics -- 5667 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5668 // There are many micro-architectural considerations that we can't predict 5669 // at this level. For example, frontend pressure (on decode or fetch) due to 5670 // code size, or the number and capabilities of the execution ports. 5671 // 5672 // We use the following heuristics to select the interleave count: 5673 // 1. If the code has reductions, then we interleave to break the cross 5674 // iteration dependency. 5675 // 2. If the loop is really small, then we interleave to reduce the loop 5676 // overhead. 5677 // 3. We don't interleave if we think that we will spill registers to memory 5678 // due to the increased register pressure. 5679 5680 if (!isScalarEpilogueAllowed()) 5681 return 1; 5682 5683 // We used the distance for the interleave count. 5684 if (Legal->getMaxSafeDepDistBytes() != -1U) 5685 return 1; 5686 5687 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 5688 const bool HasReductions = !Legal->getReductionVars().empty(); 5689 // Do not interleave loops with a relatively small known or estimated trip 5690 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 5691 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 5692 // because with the above conditions interleaving can expose ILP and break 5693 // cross iteration dependences for reductions. 5694 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 5695 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 5696 return 1; 5697 5698 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5699 // We divide by these constants so assume that we have at least one 5700 // instruction that uses at least one register. 5701 for (auto& pair : R.MaxLocalUsers) { 5702 pair.second = std::max(pair.second, 1U); 5703 } 5704 5705 // We calculate the interleave count using the following formula. 5706 // Subtract the number of loop invariants from the number of available 5707 // registers. These registers are used by all of the interleaved instances. 5708 // Next, divide the remaining registers by the number of registers that is 5709 // required by the loop, in order to estimate how many parallel instances 5710 // fit without causing spills. All of this is rounded down if necessary to be 5711 // a power of two. We want power of two interleave count to simplify any 5712 // addressing operations or alignment considerations. 5713 // We also want power of two interleave counts to ensure that the induction 5714 // variable of the vector loop wraps to zero, when tail is folded by masking; 5715 // this currently happens when OptForSize, in which case IC is set to 1 above. 5716 unsigned IC = UINT_MAX; 5717 5718 for (auto& pair : R.MaxLocalUsers) { 5719 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5720 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5721 << " registers of " 5722 << TTI.getRegisterClassName(pair.first) << " register class\n"); 5723 if (VF.isScalar()) { 5724 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5725 TargetNumRegisters = ForceTargetNumScalarRegs; 5726 } else { 5727 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5728 TargetNumRegisters = ForceTargetNumVectorRegs; 5729 } 5730 unsigned MaxLocalUsers = pair.second; 5731 unsigned LoopInvariantRegs = 0; 5732 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 5733 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 5734 5735 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 5736 // Don't count the induction variable as interleaved. 5737 if (EnableIndVarRegisterHeur) { 5738 TmpIC = 5739 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 5740 std::max(1U, (MaxLocalUsers - 1))); 5741 } 5742 5743 IC = std::min(IC, TmpIC); 5744 } 5745 5746 // Clamp the interleave ranges to reasonable counts. 5747 unsigned MaxInterleaveCount = 5748 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 5749 5750 // Check if the user has overridden the max. 5751 if (VF.isScalar()) { 5752 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5753 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5754 } else { 5755 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5756 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5757 } 5758 5759 // If trip count is known or estimated compile time constant, limit the 5760 // interleave count to be less than the trip count divided by VF, provided it 5761 // is at least 1. 5762 // 5763 // For scalable vectors we can't know if interleaving is beneficial. It may 5764 // not be beneficial for small loops if none of the lanes in the second vector 5765 // iterations is enabled. However, for larger loops, there is likely to be a 5766 // similar benefit as for fixed-width vectors. For now, we choose to leave 5767 // the InterleaveCount as if vscale is '1', although if some information about 5768 // the vector is known (e.g. min vector size), we can make a better decision. 5769 if (BestKnownTC) { 5770 MaxInterleaveCount = 5771 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 5772 // Make sure MaxInterleaveCount is greater than 0. 5773 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 5774 } 5775 5776 assert(MaxInterleaveCount > 0 && 5777 "Maximum interleave count must be greater than 0"); 5778 5779 // Clamp the calculated IC to be between the 1 and the max interleave count 5780 // that the target and trip count allows. 5781 if (IC > MaxInterleaveCount) 5782 IC = MaxInterleaveCount; 5783 else 5784 // Make sure IC is greater than 0. 5785 IC = std::max(1u, IC); 5786 5787 assert(IC > 0 && "Interleave count must be greater than 0."); 5788 5789 // If we did not calculate the cost for VF (because the user selected the VF) 5790 // then we calculate the cost of VF here. 5791 if (LoopCost == 0) { 5792 InstructionCost C = expectedCost(VF).first; 5793 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 5794 LoopCost = *C.getValue(); 5795 } 5796 5797 assert(LoopCost && "Non-zero loop cost expected"); 5798 5799 // Interleave if we vectorized this loop and there is a reduction that could 5800 // benefit from interleaving. 5801 if (VF.isVector() && HasReductions) { 5802 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5803 return IC; 5804 } 5805 5806 // For any scalar loop that either requires runtime checks or predication we 5807 // are better off leaving this to the unroller. Note that if we've already 5808 // vectorized the loop we will have done the runtime check and so interleaving 5809 // won't require further checks. 5810 bool ScalarInterleavingRequiresPredication = 5811 (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) { 5812 return Legal->blockNeedsPredication(BB); 5813 })); 5814 bool ScalarInterleavingRequiresRuntimePointerCheck = 5815 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 5816 5817 // We want to interleave small loops in order to reduce the loop overhead and 5818 // potentially expose ILP opportunities. 5819 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 5820 << "LV: IC is " << IC << '\n' 5821 << "LV: VF is " << VF << '\n'); 5822 const bool AggressivelyInterleaveReductions = 5823 TTI.enableAggressiveInterleaving(HasReductions); 5824 if (!ScalarInterleavingRequiresRuntimePointerCheck && 5825 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) { 5826 // We assume that the cost overhead is 1 and we use the cost model 5827 // to estimate the cost of the loop and interleave until the cost of the 5828 // loop overhead is about 5% of the cost of the loop. 5829 unsigned SmallIC = 5830 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5831 5832 // Interleave until store/load ports (estimated by max interleave count) are 5833 // saturated. 5834 unsigned NumStores = Legal->getNumStores(); 5835 unsigned NumLoads = Legal->getNumLoads(); 5836 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5837 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5838 5839 // There is little point in interleaving for reductions containing selects 5840 // and compares when VF=1 since it may just create more overhead than it's 5841 // worth for loops with small trip counts. This is because we still have to 5842 // do the final reduction after the loop. 5843 bool HasSelectCmpReductions = 5844 HasReductions && 5845 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5846 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5847 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 5848 RdxDesc.getRecurrenceKind()); 5849 }); 5850 if (HasSelectCmpReductions) { 5851 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 5852 return 1; 5853 } 5854 5855 // If we have a scalar reduction (vector reductions are already dealt with 5856 // by this point), we can increase the critical path length if the loop 5857 // we're interleaving is inside another loop. For tree-wise reductions 5858 // set the limit to 2, and for ordered reductions it's best to disable 5859 // interleaving entirely. 5860 if (HasReductions && TheLoop->getLoopDepth() > 1) { 5861 bool HasOrderedReductions = 5862 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5863 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5864 return RdxDesc.isOrdered(); 5865 }); 5866 if (HasOrderedReductions) { 5867 LLVM_DEBUG( 5868 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 5869 return 1; 5870 } 5871 5872 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5873 SmallIC = std::min(SmallIC, F); 5874 StoresIC = std::min(StoresIC, F); 5875 LoadsIC = std::min(LoadsIC, F); 5876 } 5877 5878 if (EnableLoadStoreRuntimeInterleave && 5879 std::max(StoresIC, LoadsIC) > SmallIC) { 5880 LLVM_DEBUG( 5881 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5882 return std::max(StoresIC, LoadsIC); 5883 } 5884 5885 // If there are scalar reductions and TTI has enabled aggressive 5886 // interleaving for reductions, we will interleave to expose ILP. 5887 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 5888 AggressivelyInterleaveReductions) { 5889 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5890 // Interleave no less than SmallIC but not as aggressive as the normal IC 5891 // to satisfy the rare situation when resources are too limited. 5892 return std::max(IC / 2, SmallIC); 5893 } else { 5894 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5895 return SmallIC; 5896 } 5897 } 5898 5899 // Interleave if this is a large loop (small loops are already dealt with by 5900 // this point) that could benefit from interleaving. 5901 if (AggressivelyInterleaveReductions) { 5902 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5903 return IC; 5904 } 5905 5906 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5907 return 1; 5908 } 5909 5910 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5911 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 5912 // This function calculates the register usage by measuring the highest number 5913 // of values that are alive at a single location. Obviously, this is a very 5914 // rough estimation. We scan the loop in a topological order in order and 5915 // assign a number to each instruction. We use RPO to ensure that defs are 5916 // met before their users. We assume that each instruction that has in-loop 5917 // users starts an interval. We record every time that an in-loop value is 5918 // used, so we have a list of the first and last occurrences of each 5919 // instruction. Next, we transpose this data structure into a multi map that 5920 // holds the list of intervals that *end* at a specific location. This multi 5921 // map allows us to perform a linear search. We scan the instructions linearly 5922 // and record each time that a new interval starts, by placing it in a set. 5923 // If we find this value in the multi-map then we remove it from the set. 5924 // The max register usage is the maximum size of the set. 5925 // We also search for instructions that are defined outside the loop, but are 5926 // used inside the loop. We need this number separately from the max-interval 5927 // usage number because when we unroll, loop-invariant values do not take 5928 // more register. 5929 LoopBlocksDFS DFS(TheLoop); 5930 DFS.perform(LI); 5931 5932 RegisterUsage RU; 5933 5934 // Each 'key' in the map opens a new interval. The values 5935 // of the map are the index of the 'last seen' usage of the 5936 // instruction that is the key. 5937 using IntervalMap = DenseMap<Instruction *, unsigned>; 5938 5939 // Maps instruction to its index. 5940 SmallVector<Instruction *, 64> IdxToInstr; 5941 // Marks the end of each interval. 5942 IntervalMap EndPoint; 5943 // Saves the list of instruction indices that are used in the loop. 5944 SmallPtrSet<Instruction *, 8> Ends; 5945 // Saves the list of values that are used in the loop but are 5946 // defined outside the loop, such as arguments and constants. 5947 SmallPtrSet<Value *, 8> LoopInvariants; 5948 5949 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5950 for (Instruction &I : BB->instructionsWithoutDebug()) { 5951 IdxToInstr.push_back(&I); 5952 5953 // Save the end location of each USE. 5954 for (Value *U : I.operands()) { 5955 auto *Instr = dyn_cast<Instruction>(U); 5956 5957 // Ignore non-instruction values such as arguments, constants, etc. 5958 if (!Instr) 5959 continue; 5960 5961 // If this instruction is outside the loop then record it and continue. 5962 if (!TheLoop->contains(Instr)) { 5963 LoopInvariants.insert(Instr); 5964 continue; 5965 } 5966 5967 // Overwrite previous end points. 5968 EndPoint[Instr] = IdxToInstr.size(); 5969 Ends.insert(Instr); 5970 } 5971 } 5972 } 5973 5974 // Saves the list of intervals that end with the index in 'key'. 5975 using InstrList = SmallVector<Instruction *, 2>; 5976 DenseMap<unsigned, InstrList> TransposeEnds; 5977 5978 // Transpose the EndPoints to a list of values that end at each index. 5979 for (auto &Interval : EndPoint) 5980 TransposeEnds[Interval.second].push_back(Interval.first); 5981 5982 SmallPtrSet<Instruction *, 8> OpenIntervals; 5983 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5984 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 5985 5986 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5987 5988 // A lambda that gets the register usage for the given type and VF. 5989 const auto &TTICapture = TTI; 5990 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 5991 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 5992 return 0; 5993 InstructionCost::CostType RegUsage = 5994 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 5995 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 5996 "Nonsensical values for register usage."); 5997 return RegUsage; 5998 }; 5999 6000 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6001 Instruction *I = IdxToInstr[i]; 6002 6003 // Remove all of the instructions that end at this location. 6004 InstrList &List = TransposeEnds[i]; 6005 for (Instruction *ToRemove : List) 6006 OpenIntervals.erase(ToRemove); 6007 6008 // Ignore instructions that are never used within the loop. 6009 if (!Ends.count(I)) 6010 continue; 6011 6012 // Skip ignored values. 6013 if (ValuesToIgnore.count(I)) 6014 continue; 6015 6016 // For each VF find the maximum usage of registers. 6017 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6018 // Count the number of live intervals. 6019 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6020 6021 if (VFs[j].isScalar()) { 6022 for (auto Inst : OpenIntervals) { 6023 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6024 if (RegUsage.find(ClassID) == RegUsage.end()) 6025 RegUsage[ClassID] = 1; 6026 else 6027 RegUsage[ClassID] += 1; 6028 } 6029 } else { 6030 collectUniformsAndScalars(VFs[j]); 6031 for (auto Inst : OpenIntervals) { 6032 // Skip ignored values for VF > 1. 6033 if (VecValuesToIgnore.count(Inst)) 6034 continue; 6035 if (isScalarAfterVectorization(Inst, VFs[j])) { 6036 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6037 if (RegUsage.find(ClassID) == RegUsage.end()) 6038 RegUsage[ClassID] = 1; 6039 else 6040 RegUsage[ClassID] += 1; 6041 } else { 6042 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6043 if (RegUsage.find(ClassID) == RegUsage.end()) 6044 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6045 else 6046 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6047 } 6048 } 6049 } 6050 6051 for (auto& pair : RegUsage) { 6052 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6053 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6054 else 6055 MaxUsages[j][pair.first] = pair.second; 6056 } 6057 } 6058 6059 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6060 << OpenIntervals.size() << '\n'); 6061 6062 // Add the current instruction to the list of open intervals. 6063 OpenIntervals.insert(I); 6064 } 6065 6066 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6067 SmallMapVector<unsigned, unsigned, 4> Invariant; 6068 6069 for (auto Inst : LoopInvariants) { 6070 unsigned Usage = 6071 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6072 unsigned ClassID = 6073 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6074 if (Invariant.find(ClassID) == Invariant.end()) 6075 Invariant[ClassID] = Usage; 6076 else 6077 Invariant[ClassID] += Usage; 6078 } 6079 6080 LLVM_DEBUG({ 6081 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6082 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6083 << " item\n"; 6084 for (const auto &pair : MaxUsages[i]) { 6085 dbgs() << "LV(REG): RegisterClass: " 6086 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6087 << " registers\n"; 6088 } 6089 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6090 << " item\n"; 6091 for (const auto &pair : Invariant) { 6092 dbgs() << "LV(REG): RegisterClass: " 6093 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6094 << " registers\n"; 6095 } 6096 }); 6097 6098 RU.LoopInvariantRegs = Invariant; 6099 RU.MaxLocalUsers = MaxUsages[i]; 6100 RUs[i] = RU; 6101 } 6102 6103 return RUs; 6104 } 6105 6106 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I, 6107 ElementCount VF) { 6108 // TODO: Cost model for emulated masked load/store is completely 6109 // broken. This hack guides the cost model to use an artificially 6110 // high enough value to practically disable vectorization with such 6111 // operations, except where previously deployed legality hack allowed 6112 // using very low cost values. This is to avoid regressions coming simply 6113 // from moving "masked load/store" check from legality to cost model. 6114 // Masked Load/Gather emulation was previously never allowed. 6115 // Limited number of Masked Store/Scatter emulation was allowed. 6116 assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction"); 6117 return isa<LoadInst>(I) || 6118 (isa<StoreInst>(I) && 6119 NumPredStores > NumberOfStoresToPredicate); 6120 } 6121 6122 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6123 // If we aren't vectorizing the loop, or if we've already collected the 6124 // instructions to scalarize, there's nothing to do. Collection may already 6125 // have occurred if we have a user-selected VF and are now computing the 6126 // expected cost for interleaving. 6127 if (VF.isScalar() || VF.isZero() || 6128 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6129 return; 6130 6131 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6132 // not profitable to scalarize any instructions, the presence of VF in the 6133 // map will indicate that we've analyzed it already. 6134 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6135 6136 // Find all the instructions that are scalar with predication in the loop and 6137 // determine if it would be better to not if-convert the blocks they are in. 6138 // If so, we also record the instructions to scalarize. 6139 for (BasicBlock *BB : TheLoop->blocks()) { 6140 if (!blockNeedsPredicationForAnyReason(BB)) 6141 continue; 6142 for (Instruction &I : *BB) 6143 if (isScalarWithPredication(&I, VF)) { 6144 ScalarCostsTy ScalarCosts; 6145 // Do not apply discount if scalable, because that would lead to 6146 // invalid scalarization costs. 6147 // Do not apply discount logic if hacked cost is needed 6148 // for emulated masked memrefs. 6149 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) && 6150 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6151 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6152 // Remember that BB will remain after vectorization. 6153 PredicatedBBsAfterVectorization.insert(BB); 6154 } 6155 } 6156 } 6157 6158 int LoopVectorizationCostModel::computePredInstDiscount( 6159 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6160 assert(!isUniformAfterVectorization(PredInst, VF) && 6161 "Instruction marked uniform-after-vectorization will be predicated"); 6162 6163 // Initialize the discount to zero, meaning that the scalar version and the 6164 // vector version cost the same. 6165 InstructionCost Discount = 0; 6166 6167 // Holds instructions to analyze. The instructions we visit are mapped in 6168 // ScalarCosts. Those instructions are the ones that would be scalarized if 6169 // we find that the scalar version costs less. 6170 SmallVector<Instruction *, 8> Worklist; 6171 6172 // Returns true if the given instruction can be scalarized. 6173 auto canBeScalarized = [&](Instruction *I) -> bool { 6174 // We only attempt to scalarize instructions forming a single-use chain 6175 // from the original predicated block that would otherwise be vectorized. 6176 // Although not strictly necessary, we give up on instructions we know will 6177 // already be scalar to avoid traversing chains that are unlikely to be 6178 // beneficial. 6179 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6180 isScalarAfterVectorization(I, VF)) 6181 return false; 6182 6183 // If the instruction is scalar with predication, it will be analyzed 6184 // separately. We ignore it within the context of PredInst. 6185 if (isScalarWithPredication(I, VF)) 6186 return false; 6187 6188 // If any of the instruction's operands are uniform after vectorization, 6189 // the instruction cannot be scalarized. This prevents, for example, a 6190 // masked load from being scalarized. 6191 // 6192 // We assume we will only emit a value for lane zero of an instruction 6193 // marked uniform after vectorization, rather than VF identical values. 6194 // Thus, if we scalarize an instruction that uses a uniform, we would 6195 // create uses of values corresponding to the lanes we aren't emitting code 6196 // for. This behavior can be changed by allowing getScalarValue to clone 6197 // the lane zero values for uniforms rather than asserting. 6198 for (Use &U : I->operands()) 6199 if (auto *J = dyn_cast<Instruction>(U.get())) 6200 if (isUniformAfterVectorization(J, VF)) 6201 return false; 6202 6203 // Otherwise, we can scalarize the instruction. 6204 return true; 6205 }; 6206 6207 // Compute the expected cost discount from scalarizing the entire expression 6208 // feeding the predicated instruction. We currently only consider expressions 6209 // that are single-use instruction chains. 6210 Worklist.push_back(PredInst); 6211 while (!Worklist.empty()) { 6212 Instruction *I = Worklist.pop_back_val(); 6213 6214 // If we've already analyzed the instruction, there's nothing to do. 6215 if (ScalarCosts.find(I) != ScalarCosts.end()) 6216 continue; 6217 6218 // Compute the cost of the vector instruction. Note that this cost already 6219 // includes the scalarization overhead of the predicated instruction. 6220 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6221 6222 // Compute the cost of the scalarized instruction. This cost is the cost of 6223 // the instruction as if it wasn't if-converted and instead remained in the 6224 // predicated block. We will scale this cost by block probability after 6225 // computing the scalarization overhead. 6226 InstructionCost ScalarCost = 6227 VF.getFixedValue() * 6228 getInstructionCost(I, ElementCount::getFixed(1)).first; 6229 6230 // Compute the scalarization overhead of needed insertelement instructions 6231 // and phi nodes. 6232 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) { 6233 ScalarCost += TTI.getScalarizationOverhead( 6234 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6235 APInt::getAllOnes(VF.getFixedValue()), true, false); 6236 ScalarCost += 6237 VF.getFixedValue() * 6238 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6239 } 6240 6241 // Compute the scalarization overhead of needed extractelement 6242 // instructions. For each of the instruction's operands, if the operand can 6243 // be scalarized, add it to the worklist; otherwise, account for the 6244 // overhead. 6245 for (Use &U : I->operands()) 6246 if (auto *J = dyn_cast<Instruction>(U.get())) { 6247 assert(VectorType::isValidElementType(J->getType()) && 6248 "Instruction has non-scalar type"); 6249 if (canBeScalarized(J)) 6250 Worklist.push_back(J); 6251 else if (needsExtract(J, VF)) { 6252 ScalarCost += TTI.getScalarizationOverhead( 6253 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6254 APInt::getAllOnes(VF.getFixedValue()), false, true); 6255 } 6256 } 6257 6258 // Scale the total scalar cost by block probability. 6259 ScalarCost /= getReciprocalPredBlockProb(); 6260 6261 // Compute the discount. A non-negative discount means the vector version 6262 // of the instruction costs more, and scalarizing would be beneficial. 6263 Discount += VectorCost - ScalarCost; 6264 ScalarCosts[I] = ScalarCost; 6265 } 6266 6267 return *Discount.getValue(); 6268 } 6269 6270 LoopVectorizationCostModel::VectorizationCostTy 6271 LoopVectorizationCostModel::expectedCost( 6272 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6273 VectorizationCostTy Cost; 6274 6275 // For each block. 6276 for (BasicBlock *BB : TheLoop->blocks()) { 6277 VectorizationCostTy BlockCost; 6278 6279 // For each instruction in the old loop. 6280 for (Instruction &I : BB->instructionsWithoutDebug()) { 6281 // Skip ignored values. 6282 if (ValuesToIgnore.count(&I) || 6283 (VF.isVector() && VecValuesToIgnore.count(&I))) 6284 continue; 6285 6286 VectorizationCostTy C = getInstructionCost(&I, VF); 6287 6288 // Check if we should override the cost. 6289 if (C.first.isValid() && 6290 ForceTargetInstructionCost.getNumOccurrences() > 0) 6291 C.first = InstructionCost(ForceTargetInstructionCost); 6292 6293 // Keep a list of instructions with invalid costs. 6294 if (Invalid && !C.first.isValid()) 6295 Invalid->emplace_back(&I, VF); 6296 6297 BlockCost.first += C.first; 6298 BlockCost.second |= C.second; 6299 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6300 << " for VF " << VF << " For instruction: " << I 6301 << '\n'); 6302 } 6303 6304 // If we are vectorizing a predicated block, it will have been 6305 // if-converted. This means that the block's instructions (aside from 6306 // stores and instructions that may divide by zero) will now be 6307 // unconditionally executed. For the scalar case, we may not always execute 6308 // the predicated block, if it is an if-else block. Thus, scale the block's 6309 // cost by the probability of executing it. blockNeedsPredication from 6310 // Legal is used so as to not include all blocks in tail folded loops. 6311 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6312 BlockCost.first /= getReciprocalPredBlockProb(); 6313 6314 Cost.first += BlockCost.first; 6315 Cost.second |= BlockCost.second; 6316 } 6317 6318 return Cost; 6319 } 6320 6321 /// Gets Address Access SCEV after verifying that the access pattern 6322 /// is loop invariant except the induction variable dependence. 6323 /// 6324 /// This SCEV can be sent to the Target in order to estimate the address 6325 /// calculation cost. 6326 static const SCEV *getAddressAccessSCEV( 6327 Value *Ptr, 6328 LoopVectorizationLegality *Legal, 6329 PredicatedScalarEvolution &PSE, 6330 const Loop *TheLoop) { 6331 6332 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6333 if (!Gep) 6334 return nullptr; 6335 6336 // We are looking for a gep with all loop invariant indices except for one 6337 // which should be an induction variable. 6338 auto SE = PSE.getSE(); 6339 unsigned NumOperands = Gep->getNumOperands(); 6340 for (unsigned i = 1; i < NumOperands; ++i) { 6341 Value *Opd = Gep->getOperand(i); 6342 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6343 !Legal->isInductionVariable(Opd)) 6344 return nullptr; 6345 } 6346 6347 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6348 return PSE.getSCEV(Ptr); 6349 } 6350 6351 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6352 return Legal->hasStride(I->getOperand(0)) || 6353 Legal->hasStride(I->getOperand(1)); 6354 } 6355 6356 InstructionCost 6357 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6358 ElementCount VF) { 6359 assert(VF.isVector() && 6360 "Scalarization cost of instruction implies vectorization."); 6361 if (VF.isScalable()) 6362 return InstructionCost::getInvalid(); 6363 6364 Type *ValTy = getLoadStoreType(I); 6365 auto SE = PSE.getSE(); 6366 6367 unsigned AS = getLoadStoreAddressSpace(I); 6368 Value *Ptr = getLoadStorePointerOperand(I); 6369 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6370 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost` 6371 // that it is being called from this specific place. 6372 6373 // Figure out whether the access is strided and get the stride value 6374 // if it's known in compile time 6375 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6376 6377 // Get the cost of the scalar memory instruction and address computation. 6378 InstructionCost Cost = 6379 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6380 6381 // Don't pass *I here, since it is scalar but will actually be part of a 6382 // vectorized loop where the user of it is a vectorized instruction. 6383 const Align Alignment = getLoadStoreAlignment(I); 6384 Cost += VF.getKnownMinValue() * 6385 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6386 AS, TTI::TCK_RecipThroughput); 6387 6388 // Get the overhead of the extractelement and insertelement instructions 6389 // we might create due to scalarization. 6390 Cost += getScalarizationOverhead(I, VF); 6391 6392 // If we have a predicated load/store, it will need extra i1 extracts and 6393 // conditional branches, but may not be executed for each vector lane. Scale 6394 // the cost by the probability of executing the predicated block. 6395 if (isPredicatedInst(I, VF)) { 6396 Cost /= getReciprocalPredBlockProb(); 6397 6398 // Add the cost of an i1 extract and a branch 6399 auto *Vec_i1Ty = 6400 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6401 Cost += TTI.getScalarizationOverhead( 6402 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 6403 /*Insert=*/false, /*Extract=*/true); 6404 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6405 6406 if (useEmulatedMaskMemRefHack(I, VF)) 6407 // Artificially setting to a high enough value to practically disable 6408 // vectorization with such operations. 6409 Cost = 3000000; 6410 } 6411 6412 return Cost; 6413 } 6414 6415 InstructionCost 6416 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6417 ElementCount VF) { 6418 Type *ValTy = getLoadStoreType(I); 6419 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6420 Value *Ptr = getLoadStorePointerOperand(I); 6421 unsigned AS = getLoadStoreAddressSpace(I); 6422 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 6423 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6424 6425 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6426 "Stride should be 1 or -1 for consecutive memory access"); 6427 const Align Alignment = getLoadStoreAlignment(I); 6428 InstructionCost Cost = 0; 6429 if (Legal->isMaskRequired(I)) 6430 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6431 CostKind); 6432 else 6433 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6434 CostKind, I); 6435 6436 bool Reverse = ConsecutiveStride < 0; 6437 if (Reverse) 6438 Cost += 6439 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6440 return Cost; 6441 } 6442 6443 InstructionCost 6444 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6445 ElementCount VF) { 6446 assert(Legal->isUniformMemOp(*I)); 6447 6448 Type *ValTy = getLoadStoreType(I); 6449 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6450 const Align Alignment = getLoadStoreAlignment(I); 6451 unsigned AS = getLoadStoreAddressSpace(I); 6452 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6453 if (isa<LoadInst>(I)) { 6454 return TTI.getAddressComputationCost(ValTy) + 6455 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6456 CostKind) + 6457 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6458 } 6459 StoreInst *SI = cast<StoreInst>(I); 6460 6461 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6462 return TTI.getAddressComputationCost(ValTy) + 6463 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6464 CostKind) + 6465 (isLoopInvariantStoreValue 6466 ? 0 6467 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6468 VF.getKnownMinValue() - 1)); 6469 } 6470 6471 InstructionCost 6472 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6473 ElementCount VF) { 6474 Type *ValTy = getLoadStoreType(I); 6475 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6476 const Align Alignment = getLoadStoreAlignment(I); 6477 const Value *Ptr = getLoadStorePointerOperand(I); 6478 6479 return TTI.getAddressComputationCost(VectorTy) + 6480 TTI.getGatherScatterOpCost( 6481 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6482 TargetTransformInfo::TCK_RecipThroughput, I); 6483 } 6484 6485 InstructionCost 6486 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6487 ElementCount VF) { 6488 // TODO: Once we have support for interleaving with scalable vectors 6489 // we can calculate the cost properly here. 6490 if (VF.isScalable()) 6491 return InstructionCost::getInvalid(); 6492 6493 Type *ValTy = getLoadStoreType(I); 6494 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6495 unsigned AS = getLoadStoreAddressSpace(I); 6496 6497 auto Group = getInterleavedAccessGroup(I); 6498 assert(Group && "Fail to get an interleaved access group."); 6499 6500 unsigned InterleaveFactor = Group->getFactor(); 6501 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6502 6503 // Holds the indices of existing members in the interleaved group. 6504 SmallVector<unsigned, 4> Indices; 6505 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 6506 if (Group->getMember(IF)) 6507 Indices.push_back(IF); 6508 6509 // Calculate the cost of the whole interleaved group. 6510 bool UseMaskForGaps = 6511 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 6512 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 6513 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6514 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6515 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6516 6517 if (Group->isReverse()) { 6518 // TODO: Add support for reversed masked interleaved access. 6519 assert(!Legal->isMaskRequired(I) && 6520 "Reverse masked interleaved access not supported."); 6521 Cost += 6522 Group->getNumMembers() * 6523 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6524 } 6525 return Cost; 6526 } 6527 6528 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 6529 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6530 using namespace llvm::PatternMatch; 6531 // Early exit for no inloop reductions 6532 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6533 return None; 6534 auto *VectorTy = cast<VectorType>(Ty); 6535 6536 // We are looking for a pattern of, and finding the minimal acceptable cost: 6537 // reduce(mul(ext(A), ext(B))) or 6538 // reduce(mul(A, B)) or 6539 // reduce(ext(A)) or 6540 // reduce(A). 6541 // The basic idea is that we walk down the tree to do that, finding the root 6542 // reduction instruction in InLoopReductionImmediateChains. From there we find 6543 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6544 // of the components. If the reduction cost is lower then we return it for the 6545 // reduction instruction and 0 for the other instructions in the pattern. If 6546 // it is not we return an invalid cost specifying the orignal cost method 6547 // should be used. 6548 Instruction *RetI = I; 6549 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 6550 if (!RetI->hasOneUser()) 6551 return None; 6552 RetI = RetI->user_back(); 6553 } 6554 if (match(RetI, m_Mul(m_Value(), m_Value())) && 6555 RetI->user_back()->getOpcode() == Instruction::Add) { 6556 if (!RetI->hasOneUser()) 6557 return None; 6558 RetI = RetI->user_back(); 6559 } 6560 6561 // Test if the found instruction is a reduction, and if not return an invalid 6562 // cost specifying the parent to use the original cost modelling. 6563 if (!InLoopReductionImmediateChains.count(RetI)) 6564 return None; 6565 6566 // Find the reduction this chain is a part of and calculate the basic cost of 6567 // the reduction on its own. 6568 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6569 Instruction *ReductionPhi = LastChain; 6570 while (!isa<PHINode>(ReductionPhi)) 6571 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6572 6573 const RecurrenceDescriptor &RdxDesc = 6574 Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second; 6575 6576 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 6577 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 6578 6579 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a 6580 // normal fmul instruction to the cost of the fadd reduction. 6581 if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd) 6582 BaseCost += 6583 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind); 6584 6585 // If we're using ordered reductions then we can just return the base cost 6586 // here, since getArithmeticReductionCost calculates the full ordered 6587 // reduction cost when FP reassociation is not allowed. 6588 if (useOrderedReductions(RdxDesc)) 6589 return BaseCost; 6590 6591 // Get the operand that was not the reduction chain and match it to one of the 6592 // patterns, returning the better cost if it is found. 6593 Instruction *RedOp = RetI->getOperand(1) == LastChain 6594 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6595 : dyn_cast<Instruction>(RetI->getOperand(1)); 6596 6597 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6598 6599 Instruction *Op0, *Op1; 6600 if (RedOp && 6601 match(RedOp, 6602 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 6603 match(Op0, m_ZExtOrSExt(m_Value())) && 6604 Op0->getOpcode() == Op1->getOpcode() && 6605 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6606 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 6607 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 6608 6609 // Matched reduce(ext(mul(ext(A), ext(B))) 6610 // Note that the extend opcodes need to all match, or if A==B they will have 6611 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 6612 // which is equally fine. 6613 bool IsUnsigned = isa<ZExtInst>(Op0); 6614 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6615 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 6616 6617 InstructionCost ExtCost = 6618 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 6619 TTI::CastContextHint::None, CostKind, Op0); 6620 InstructionCost MulCost = 6621 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 6622 InstructionCost Ext2Cost = 6623 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 6624 TTI::CastContextHint::None, CostKind, RedOp); 6625 6626 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6627 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6628 CostKind); 6629 6630 if (RedCost.isValid() && 6631 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 6632 return I == RetI ? RedCost : 0; 6633 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 6634 !TheLoop->isLoopInvariant(RedOp)) { 6635 // Matched reduce(ext(A)) 6636 bool IsUnsigned = isa<ZExtInst>(RedOp); 6637 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6638 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6639 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6640 CostKind); 6641 6642 InstructionCost ExtCost = 6643 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6644 TTI::CastContextHint::None, CostKind, RedOp); 6645 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6646 return I == RetI ? RedCost : 0; 6647 } else if (RedOp && 6648 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 6649 if (match(Op0, m_ZExtOrSExt(m_Value())) && 6650 Op0->getOpcode() == Op1->getOpcode() && 6651 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 6652 bool IsUnsigned = isa<ZExtInst>(Op0); 6653 Type *Op0Ty = Op0->getOperand(0)->getType(); 6654 Type *Op1Ty = Op1->getOperand(0)->getType(); 6655 Type *LargestOpTy = 6656 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty 6657 : Op0Ty; 6658 auto *ExtType = VectorType::get(LargestOpTy, VectorTy); 6659 6660 // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of 6661 // different sizes. We take the largest type as the ext to reduce, and add 6662 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))). 6663 InstructionCost ExtCost0 = TTI.getCastInstrCost( 6664 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy), 6665 TTI::CastContextHint::None, CostKind, Op0); 6666 InstructionCost ExtCost1 = TTI.getCastInstrCost( 6667 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy), 6668 TTI::CastContextHint::None, CostKind, Op1); 6669 InstructionCost MulCost = 6670 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6671 6672 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6673 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6674 CostKind); 6675 InstructionCost ExtraExtCost = 0; 6676 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) { 6677 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1; 6678 ExtraExtCost = TTI.getCastInstrCost( 6679 ExtraExtOp->getOpcode(), ExtType, 6680 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy), 6681 TTI::CastContextHint::None, CostKind, ExtraExtOp); 6682 } 6683 6684 if (RedCost.isValid() && 6685 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost)) 6686 return I == RetI ? RedCost : 0; 6687 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 6688 // Matched reduce(mul()) 6689 InstructionCost MulCost = 6690 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6691 6692 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6693 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 6694 CostKind); 6695 6696 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 6697 return I == RetI ? RedCost : 0; 6698 } 6699 } 6700 6701 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 6702 } 6703 6704 InstructionCost 6705 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6706 ElementCount VF) { 6707 // Calculate scalar cost only. Vectorization cost should be ready at this 6708 // moment. 6709 if (VF.isScalar()) { 6710 Type *ValTy = getLoadStoreType(I); 6711 const Align Alignment = getLoadStoreAlignment(I); 6712 unsigned AS = getLoadStoreAddressSpace(I); 6713 6714 return TTI.getAddressComputationCost(ValTy) + 6715 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 6716 TTI::TCK_RecipThroughput, I); 6717 } 6718 return getWideningCost(I, VF); 6719 } 6720 6721 LoopVectorizationCostModel::VectorizationCostTy 6722 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6723 ElementCount VF) { 6724 // If we know that this instruction will remain uniform, check the cost of 6725 // the scalar version. 6726 if (isUniformAfterVectorization(I, VF)) 6727 VF = ElementCount::getFixed(1); 6728 6729 if (VF.isVector() && isProfitableToScalarize(I, VF)) 6730 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6731 6732 // Forced scalars do not have any scalarization overhead. 6733 auto ForcedScalar = ForcedScalars.find(VF); 6734 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 6735 auto InstSet = ForcedScalar->second; 6736 if (InstSet.count(I)) 6737 return VectorizationCostTy( 6738 (getInstructionCost(I, ElementCount::getFixed(1)).first * 6739 VF.getKnownMinValue()), 6740 false); 6741 } 6742 6743 Type *VectorTy; 6744 InstructionCost C = getInstructionCost(I, VF, VectorTy); 6745 6746 bool TypeNotScalarized = false; 6747 if (VF.isVector() && VectorTy->isVectorTy()) { 6748 unsigned NumParts = TTI.getNumberOfParts(VectorTy); 6749 if (NumParts) 6750 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 6751 else 6752 C = InstructionCost::getInvalid(); 6753 } 6754 return VectorizationCostTy(C, TypeNotScalarized); 6755 } 6756 6757 InstructionCost 6758 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6759 ElementCount VF) const { 6760 6761 // There is no mechanism yet to create a scalable scalarization loop, 6762 // so this is currently Invalid. 6763 if (VF.isScalable()) 6764 return InstructionCost::getInvalid(); 6765 6766 if (VF.isScalar()) 6767 return 0; 6768 6769 InstructionCost Cost = 0; 6770 Type *RetTy = ToVectorTy(I->getType(), VF); 6771 if (!RetTy->isVoidTy() && 6772 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 6773 Cost += TTI.getScalarizationOverhead( 6774 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 6775 false); 6776 6777 // Some targets keep addresses scalar. 6778 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 6779 return Cost; 6780 6781 // Some targets support efficient element stores. 6782 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 6783 return Cost; 6784 6785 // Collect operands to consider. 6786 CallInst *CI = dyn_cast<CallInst>(I); 6787 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 6788 6789 // Skip operands that do not require extraction/scalarization and do not incur 6790 // any overhead. 6791 SmallVector<Type *> Tys; 6792 for (auto *V : filterExtractingOperands(Ops, VF)) 6793 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 6794 return Cost + TTI.getOperandsScalarizationOverhead( 6795 filterExtractingOperands(Ops, VF), Tys); 6796 } 6797 6798 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 6799 if (VF.isScalar()) 6800 return; 6801 NumPredStores = 0; 6802 for (BasicBlock *BB : TheLoop->blocks()) { 6803 // For each instruction in the old loop. 6804 for (Instruction &I : *BB) { 6805 Value *Ptr = getLoadStorePointerOperand(&I); 6806 if (!Ptr) 6807 continue; 6808 6809 // TODO: We should generate better code and update the cost model for 6810 // predicated uniform stores. Today they are treated as any other 6811 // predicated store (see added test cases in 6812 // invariant-store-vectorization.ll). 6813 if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF)) 6814 NumPredStores++; 6815 6816 if (Legal->isUniformMemOp(I)) { 6817 // TODO: Avoid replicating loads and stores instead of 6818 // relying on instcombine to remove them. 6819 // Load: Scalar load + broadcast 6820 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 6821 InstructionCost Cost; 6822 if (isa<StoreInst>(&I) && VF.isScalable() && 6823 isLegalGatherOrScatter(&I, VF)) { 6824 Cost = getGatherScatterCost(&I, VF); 6825 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 6826 } else { 6827 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 6828 "Cannot yet scalarize uniform stores"); 6829 Cost = getUniformMemOpCost(&I, VF); 6830 setWideningDecision(&I, VF, CM_Scalarize, Cost); 6831 } 6832 continue; 6833 } 6834 6835 // We assume that widening is the best solution when possible. 6836 if (memoryInstructionCanBeWidened(&I, VF)) { 6837 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 6838 int ConsecutiveStride = Legal->isConsecutivePtr( 6839 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 6840 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6841 "Expected consecutive stride."); 6842 InstWidening Decision = 6843 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 6844 setWideningDecision(&I, VF, Decision, Cost); 6845 continue; 6846 } 6847 6848 // Choose between Interleaving, Gather/Scatter or Scalarization. 6849 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 6850 unsigned NumAccesses = 1; 6851 if (isAccessInterleaved(&I)) { 6852 auto Group = getInterleavedAccessGroup(&I); 6853 assert(Group && "Fail to get an interleaved access group."); 6854 6855 // Make one decision for the whole group. 6856 if (getWideningDecision(&I, VF) != CM_Unknown) 6857 continue; 6858 6859 NumAccesses = Group->getNumMembers(); 6860 if (interleavedAccessCanBeWidened(&I, VF)) 6861 InterleaveCost = getInterleaveGroupCost(&I, VF); 6862 } 6863 6864 InstructionCost GatherScatterCost = 6865 isLegalGatherOrScatter(&I, VF) 6866 ? getGatherScatterCost(&I, VF) * NumAccesses 6867 : InstructionCost::getInvalid(); 6868 6869 InstructionCost ScalarizationCost = 6870 getMemInstScalarizationCost(&I, VF) * NumAccesses; 6871 6872 // Choose better solution for the current VF, 6873 // write down this decision and use it during vectorization. 6874 InstructionCost Cost; 6875 InstWidening Decision; 6876 if (InterleaveCost <= GatherScatterCost && 6877 InterleaveCost < ScalarizationCost) { 6878 Decision = CM_Interleave; 6879 Cost = InterleaveCost; 6880 } else if (GatherScatterCost < ScalarizationCost) { 6881 Decision = CM_GatherScatter; 6882 Cost = GatherScatterCost; 6883 } else { 6884 Decision = CM_Scalarize; 6885 Cost = ScalarizationCost; 6886 } 6887 // If the instructions belongs to an interleave group, the whole group 6888 // receives the same decision. The whole group receives the cost, but 6889 // the cost will actually be assigned to one instruction. 6890 if (auto Group = getInterleavedAccessGroup(&I)) 6891 setWideningDecision(Group, VF, Decision, Cost); 6892 else 6893 setWideningDecision(&I, VF, Decision, Cost); 6894 } 6895 } 6896 6897 // Make sure that any load of address and any other address computation 6898 // remains scalar unless there is gather/scatter support. This avoids 6899 // inevitable extracts into address registers, and also has the benefit of 6900 // activating LSR more, since that pass can't optimize vectorized 6901 // addresses. 6902 if (TTI.prefersVectorizedAddressing()) 6903 return; 6904 6905 // Start with all scalar pointer uses. 6906 SmallPtrSet<Instruction *, 8> AddrDefs; 6907 for (BasicBlock *BB : TheLoop->blocks()) 6908 for (Instruction &I : *BB) { 6909 Instruction *PtrDef = 6910 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 6911 if (PtrDef && TheLoop->contains(PtrDef) && 6912 getWideningDecision(&I, VF) != CM_GatherScatter) 6913 AddrDefs.insert(PtrDef); 6914 } 6915 6916 // Add all instructions used to generate the addresses. 6917 SmallVector<Instruction *, 4> Worklist; 6918 append_range(Worklist, AddrDefs); 6919 while (!Worklist.empty()) { 6920 Instruction *I = Worklist.pop_back_val(); 6921 for (auto &Op : I->operands()) 6922 if (auto *InstOp = dyn_cast<Instruction>(Op)) 6923 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 6924 AddrDefs.insert(InstOp).second) 6925 Worklist.push_back(InstOp); 6926 } 6927 6928 for (auto *I : AddrDefs) { 6929 if (isa<LoadInst>(I)) { 6930 // Setting the desired widening decision should ideally be handled in 6931 // by cost functions, but since this involves the task of finding out 6932 // if the loaded register is involved in an address computation, it is 6933 // instead changed here when we know this is the case. 6934 InstWidening Decision = getWideningDecision(I, VF); 6935 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 6936 // Scalarize a widened load of address. 6937 setWideningDecision( 6938 I, VF, CM_Scalarize, 6939 (VF.getKnownMinValue() * 6940 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 6941 else if (auto Group = getInterleavedAccessGroup(I)) { 6942 // Scalarize an interleave group of address loads. 6943 for (unsigned I = 0; I < Group->getFactor(); ++I) { 6944 if (Instruction *Member = Group->getMember(I)) 6945 setWideningDecision( 6946 Member, VF, CM_Scalarize, 6947 (VF.getKnownMinValue() * 6948 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 6949 } 6950 } 6951 } else 6952 // Make sure I gets scalarized and a cost estimate without 6953 // scalarization overhead. 6954 ForcedScalars[VF].insert(I); 6955 } 6956 } 6957 6958 InstructionCost 6959 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 6960 Type *&VectorTy) { 6961 Type *RetTy = I->getType(); 6962 if (canTruncateToMinimalBitwidth(I, VF)) 6963 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6964 auto SE = PSE.getSE(); 6965 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6966 6967 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 6968 ElementCount VF) -> bool { 6969 if (VF.isScalar()) 6970 return true; 6971 6972 auto Scalarized = InstsToScalarize.find(VF); 6973 assert(Scalarized != InstsToScalarize.end() && 6974 "VF not yet analyzed for scalarization profitability"); 6975 return !Scalarized->second.count(I) && 6976 llvm::all_of(I->users(), [&](User *U) { 6977 auto *UI = cast<Instruction>(U); 6978 return !Scalarized->second.count(UI); 6979 }); 6980 }; 6981 (void) hasSingleCopyAfterVectorization; 6982 6983 if (isScalarAfterVectorization(I, VF)) { 6984 // With the exception of GEPs and PHIs, after scalarization there should 6985 // only be one copy of the instruction generated in the loop. This is 6986 // because the VF is either 1, or any instructions that need scalarizing 6987 // have already been dealt with by the the time we get here. As a result, 6988 // it means we don't have to multiply the instruction cost by VF. 6989 assert(I->getOpcode() == Instruction::GetElementPtr || 6990 I->getOpcode() == Instruction::PHI || 6991 (I->getOpcode() == Instruction::BitCast && 6992 I->getType()->isPointerTy()) || 6993 hasSingleCopyAfterVectorization(I, VF)); 6994 VectorTy = RetTy; 6995 } else 6996 VectorTy = ToVectorTy(RetTy, VF); 6997 6998 // TODO: We need to estimate the cost of intrinsic calls. 6999 switch (I->getOpcode()) { 7000 case Instruction::GetElementPtr: 7001 // We mark this instruction as zero-cost because the cost of GEPs in 7002 // vectorized code depends on whether the corresponding memory instruction 7003 // is scalarized or not. Therefore, we handle GEPs with the memory 7004 // instruction cost. 7005 return 0; 7006 case Instruction::Br: { 7007 // In cases of scalarized and predicated instructions, there will be VF 7008 // predicated blocks in the vectorized loop. Each branch around these 7009 // blocks requires also an extract of its vector compare i1 element. 7010 bool ScalarPredicatedBB = false; 7011 BranchInst *BI = cast<BranchInst>(I); 7012 if (VF.isVector() && BI->isConditional() && 7013 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7014 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7015 ScalarPredicatedBB = true; 7016 7017 if (ScalarPredicatedBB) { 7018 // Not possible to scalarize scalable vector with predicated instructions. 7019 if (VF.isScalable()) 7020 return InstructionCost::getInvalid(); 7021 // Return cost for branches around scalarized and predicated blocks. 7022 auto *Vec_i1Ty = 7023 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7024 return ( 7025 TTI.getScalarizationOverhead( 7026 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7027 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7028 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7029 // The back-edge branch will remain, as will all scalar branches. 7030 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7031 else 7032 // This branch will be eliminated by if-conversion. 7033 return 0; 7034 // Note: We currently assume zero cost for an unconditional branch inside 7035 // a predicated block since it will become a fall-through, although we 7036 // may decide in the future to call TTI for all branches. 7037 } 7038 case Instruction::PHI: { 7039 auto *Phi = cast<PHINode>(I); 7040 7041 // First-order recurrences are replaced by vector shuffles inside the loop. 7042 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7043 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7044 return TTI.getShuffleCost( 7045 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7046 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7047 7048 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7049 // converted into select instructions. We require N - 1 selects per phi 7050 // node, where N is the number of incoming values. 7051 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7052 return (Phi->getNumIncomingValues() - 1) * 7053 TTI.getCmpSelInstrCost( 7054 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7055 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7056 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7057 7058 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7059 } 7060 case Instruction::UDiv: 7061 case Instruction::SDiv: 7062 case Instruction::URem: 7063 case Instruction::SRem: 7064 // If we have a predicated instruction, it may not be executed for each 7065 // vector lane. Get the scalarization cost and scale this amount by the 7066 // probability of executing the predicated block. If the instruction is not 7067 // predicated, we fall through to the next case. 7068 if (VF.isVector() && isScalarWithPredication(I, VF)) { 7069 InstructionCost Cost = 0; 7070 7071 // These instructions have a non-void type, so account for the phi nodes 7072 // that we will create. This cost is likely to be zero. The phi node 7073 // cost, if any, should be scaled by the block probability because it 7074 // models a copy at the end of each predicated block. 7075 Cost += VF.getKnownMinValue() * 7076 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7077 7078 // The cost of the non-predicated instruction. 7079 Cost += VF.getKnownMinValue() * 7080 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7081 7082 // The cost of insertelement and extractelement instructions needed for 7083 // scalarization. 7084 Cost += getScalarizationOverhead(I, VF); 7085 7086 // Scale the cost by the probability of executing the predicated blocks. 7087 // This assumes the predicated block for each vector lane is equally 7088 // likely. 7089 return Cost / getReciprocalPredBlockProb(); 7090 } 7091 LLVM_FALLTHROUGH; 7092 case Instruction::Add: 7093 case Instruction::FAdd: 7094 case Instruction::Sub: 7095 case Instruction::FSub: 7096 case Instruction::Mul: 7097 case Instruction::FMul: 7098 case Instruction::FDiv: 7099 case Instruction::FRem: 7100 case Instruction::Shl: 7101 case Instruction::LShr: 7102 case Instruction::AShr: 7103 case Instruction::And: 7104 case Instruction::Or: 7105 case Instruction::Xor: { 7106 // Since we will replace the stride by 1 the multiplication should go away. 7107 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7108 return 0; 7109 7110 // Detect reduction patterns 7111 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7112 return *RedCost; 7113 7114 // Certain instructions can be cheaper to vectorize if they have a constant 7115 // second vector operand. One example of this are shifts on x86. 7116 Value *Op2 = I->getOperand(1); 7117 TargetTransformInfo::OperandValueProperties Op2VP; 7118 TargetTransformInfo::OperandValueKind Op2VK = 7119 TTI.getOperandInfo(Op2, Op2VP); 7120 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7121 Op2VK = TargetTransformInfo::OK_UniformValue; 7122 7123 SmallVector<const Value *, 4> Operands(I->operand_values()); 7124 return TTI.getArithmeticInstrCost( 7125 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7126 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7127 } 7128 case Instruction::FNeg: { 7129 return TTI.getArithmeticInstrCost( 7130 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7131 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7132 TargetTransformInfo::OP_None, I->getOperand(0), I); 7133 } 7134 case Instruction::Select: { 7135 SelectInst *SI = cast<SelectInst>(I); 7136 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7137 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7138 7139 const Value *Op0, *Op1; 7140 using namespace llvm::PatternMatch; 7141 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7142 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7143 // select x, y, false --> x & y 7144 // select x, true, y --> x | y 7145 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7146 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7147 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7148 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7149 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7150 Op1->getType()->getScalarSizeInBits() == 1); 7151 7152 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7153 return TTI.getArithmeticInstrCost( 7154 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7155 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7156 } 7157 7158 Type *CondTy = SI->getCondition()->getType(); 7159 if (!ScalarCond) 7160 CondTy = VectorType::get(CondTy, VF); 7161 7162 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 7163 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition())) 7164 Pred = Cmp->getPredicate(); 7165 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred, 7166 CostKind, I); 7167 } 7168 case Instruction::ICmp: 7169 case Instruction::FCmp: { 7170 Type *ValTy = I->getOperand(0)->getType(); 7171 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7172 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7173 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7174 VectorTy = ToVectorTy(ValTy, VF); 7175 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7176 cast<CmpInst>(I)->getPredicate(), CostKind, 7177 I); 7178 } 7179 case Instruction::Store: 7180 case Instruction::Load: { 7181 ElementCount Width = VF; 7182 if (Width.isVector()) { 7183 InstWidening Decision = getWideningDecision(I, Width); 7184 assert(Decision != CM_Unknown && 7185 "CM decision should be taken at this point"); 7186 if (Decision == CM_Scalarize) 7187 Width = ElementCount::getFixed(1); 7188 } 7189 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7190 return getMemoryInstructionCost(I, VF); 7191 } 7192 case Instruction::BitCast: 7193 if (I->getType()->isPointerTy()) 7194 return 0; 7195 LLVM_FALLTHROUGH; 7196 case Instruction::ZExt: 7197 case Instruction::SExt: 7198 case Instruction::FPToUI: 7199 case Instruction::FPToSI: 7200 case Instruction::FPExt: 7201 case Instruction::PtrToInt: 7202 case Instruction::IntToPtr: 7203 case Instruction::SIToFP: 7204 case Instruction::UIToFP: 7205 case Instruction::Trunc: 7206 case Instruction::FPTrunc: { 7207 // Computes the CastContextHint from a Load/Store instruction. 7208 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7209 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7210 "Expected a load or a store!"); 7211 7212 if (VF.isScalar() || !TheLoop->contains(I)) 7213 return TTI::CastContextHint::Normal; 7214 7215 switch (getWideningDecision(I, VF)) { 7216 case LoopVectorizationCostModel::CM_GatherScatter: 7217 return TTI::CastContextHint::GatherScatter; 7218 case LoopVectorizationCostModel::CM_Interleave: 7219 return TTI::CastContextHint::Interleave; 7220 case LoopVectorizationCostModel::CM_Scalarize: 7221 case LoopVectorizationCostModel::CM_Widen: 7222 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7223 : TTI::CastContextHint::Normal; 7224 case LoopVectorizationCostModel::CM_Widen_Reverse: 7225 return TTI::CastContextHint::Reversed; 7226 case LoopVectorizationCostModel::CM_Unknown: 7227 llvm_unreachable("Instr did not go through cost modelling?"); 7228 } 7229 7230 llvm_unreachable("Unhandled case!"); 7231 }; 7232 7233 unsigned Opcode = I->getOpcode(); 7234 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7235 // For Trunc, the context is the only user, which must be a StoreInst. 7236 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7237 if (I->hasOneUse()) 7238 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7239 CCH = ComputeCCH(Store); 7240 } 7241 // For Z/Sext, the context is the operand, which must be a LoadInst. 7242 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7243 Opcode == Instruction::FPExt) { 7244 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7245 CCH = ComputeCCH(Load); 7246 } 7247 7248 // We optimize the truncation of induction variables having constant 7249 // integer steps. The cost of these truncations is the same as the scalar 7250 // operation. 7251 if (isOptimizableIVTruncate(I, VF)) { 7252 auto *Trunc = cast<TruncInst>(I); 7253 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7254 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7255 } 7256 7257 // Detect reduction patterns 7258 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7259 return *RedCost; 7260 7261 Type *SrcScalarTy = I->getOperand(0)->getType(); 7262 Type *SrcVecTy = 7263 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7264 if (canTruncateToMinimalBitwidth(I, VF)) { 7265 // This cast is going to be shrunk. This may remove the cast or it might 7266 // turn it into slightly different cast. For example, if MinBW == 16, 7267 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7268 // 7269 // Calculate the modified src and dest types. 7270 Type *MinVecTy = VectorTy; 7271 if (Opcode == Instruction::Trunc) { 7272 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7273 VectorTy = 7274 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7275 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7276 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7277 VectorTy = 7278 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7279 } 7280 } 7281 7282 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7283 } 7284 case Instruction::Call: { 7285 if (RecurrenceDescriptor::isFMulAddIntrinsic(I)) 7286 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7287 return *RedCost; 7288 bool NeedToScalarize; 7289 CallInst *CI = cast<CallInst>(I); 7290 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7291 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7292 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7293 return std::min(CallCost, IntrinsicCost); 7294 } 7295 return CallCost; 7296 } 7297 case Instruction::ExtractValue: 7298 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7299 case Instruction::Alloca: 7300 // We cannot easily widen alloca to a scalable alloca, as 7301 // the result would need to be a vector of pointers. 7302 if (VF.isScalable()) 7303 return InstructionCost::getInvalid(); 7304 LLVM_FALLTHROUGH; 7305 default: 7306 // This opcode is unknown. Assume that it is the same as 'mul'. 7307 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7308 } // end of switch. 7309 } 7310 7311 char LoopVectorize::ID = 0; 7312 7313 static const char lv_name[] = "Loop Vectorization"; 7314 7315 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7316 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7317 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7318 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7319 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7320 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7321 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7322 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7323 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7324 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7325 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7326 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7327 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7328 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7329 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7330 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7331 7332 namespace llvm { 7333 7334 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7335 7336 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7337 bool VectorizeOnlyWhenForced) { 7338 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7339 } 7340 7341 } // end namespace llvm 7342 7343 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7344 // Check if the pointer operand of a load or store instruction is 7345 // consecutive. 7346 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7347 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 7348 return false; 7349 } 7350 7351 void LoopVectorizationCostModel::collectValuesToIgnore() { 7352 // Ignore ephemeral values. 7353 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7354 7355 // Ignore type-promoting instructions we identified during reduction 7356 // detection. 7357 for (auto &Reduction : Legal->getReductionVars()) { 7358 const RecurrenceDescriptor &RedDes = Reduction.second; 7359 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7360 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7361 } 7362 // Ignore type-casting instructions we identified during induction 7363 // detection. 7364 for (auto &Induction : Legal->getInductionVars()) { 7365 const InductionDescriptor &IndDes = Induction.second; 7366 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7367 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7368 } 7369 } 7370 7371 void LoopVectorizationCostModel::collectInLoopReductions() { 7372 for (auto &Reduction : Legal->getReductionVars()) { 7373 PHINode *Phi = Reduction.first; 7374 const RecurrenceDescriptor &RdxDesc = Reduction.second; 7375 7376 // We don't collect reductions that are type promoted (yet). 7377 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7378 continue; 7379 7380 // If the target would prefer this reduction to happen "in-loop", then we 7381 // want to record it as such. 7382 unsigned Opcode = RdxDesc.getOpcode(); 7383 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7384 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7385 TargetTransformInfo::ReductionFlags())) 7386 continue; 7387 7388 // Check that we can correctly put the reductions into the loop, by 7389 // finding the chain of operations that leads from the phi to the loop 7390 // exit value. 7391 SmallVector<Instruction *, 4> ReductionOperations = 7392 RdxDesc.getReductionOpChain(Phi, TheLoop); 7393 bool InLoop = !ReductionOperations.empty(); 7394 if (InLoop) { 7395 InLoopReductionChains[Phi] = ReductionOperations; 7396 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7397 Instruction *LastChain = Phi; 7398 for (auto *I : ReductionOperations) { 7399 InLoopReductionImmediateChains[I] = LastChain; 7400 LastChain = I; 7401 } 7402 } 7403 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7404 << " reduction for phi: " << *Phi << "\n"); 7405 } 7406 } 7407 7408 // TODO: we could return a pair of values that specify the max VF and 7409 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7410 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7411 // doesn't have a cost model that can choose which plan to execute if 7412 // more than one is generated. 7413 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7414 LoopVectorizationCostModel &CM) { 7415 unsigned WidestType; 7416 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7417 return WidestVectorRegBits / WidestType; 7418 } 7419 7420 VectorizationFactor 7421 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7422 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7423 ElementCount VF = UserVF; 7424 // Outer loop handling: They may require CFG and instruction level 7425 // transformations before even evaluating whether vectorization is profitable. 7426 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7427 // the vectorization pipeline. 7428 if (!OrigLoop->isInnermost()) { 7429 // If the user doesn't provide a vectorization factor, determine a 7430 // reasonable one. 7431 if (UserVF.isZero()) { 7432 VF = ElementCount::getFixed(determineVPlanVF( 7433 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7434 .getFixedSize(), 7435 CM)); 7436 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7437 7438 // Make sure we have a VF > 1 for stress testing. 7439 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7440 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7441 << "overriding computed VF.\n"); 7442 VF = ElementCount::getFixed(4); 7443 } 7444 } 7445 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7446 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7447 "VF needs to be a power of two"); 7448 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7449 << "VF " << VF << " to build VPlans.\n"); 7450 buildVPlans(VF, VF); 7451 7452 // For VPlan build stress testing, we bail out after VPlan construction. 7453 if (VPlanBuildStressTest) 7454 return VectorizationFactor::Disabled(); 7455 7456 return {VF, 0 /*Cost*/}; 7457 } 7458 7459 LLVM_DEBUG( 7460 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7461 "VPlan-native path.\n"); 7462 return VectorizationFactor::Disabled(); 7463 } 7464 7465 Optional<VectorizationFactor> 7466 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7467 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7468 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 7469 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 7470 return None; 7471 7472 // Invalidate interleave groups if all blocks of loop will be predicated. 7473 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 7474 !useMaskedInterleavedAccesses(*TTI)) { 7475 LLVM_DEBUG( 7476 dbgs() 7477 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7478 "which requires masked-interleaved support.\n"); 7479 if (CM.InterleaveInfo.invalidateGroups()) 7480 // Invalidating interleave groups also requires invalidating all decisions 7481 // based on them, which includes widening decisions and uniform and scalar 7482 // values. 7483 CM.invalidateCostModelingDecisions(); 7484 } 7485 7486 ElementCount MaxUserVF = 7487 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 7488 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 7489 if (!UserVF.isZero() && UserVFIsLegal) { 7490 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 7491 "VF needs to be a power of two"); 7492 // Collect the instructions (and their associated costs) that will be more 7493 // profitable to scalarize. 7494 if (CM.selectUserVectorizationFactor(UserVF)) { 7495 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7496 CM.collectInLoopReductions(); 7497 buildVPlansWithVPRecipes(UserVF, UserVF); 7498 LLVM_DEBUG(printPlans(dbgs())); 7499 return {{UserVF, 0}}; 7500 } else 7501 reportVectorizationInfo("UserVF ignored because of invalid costs.", 7502 "InvalidCost", ORE, OrigLoop); 7503 } 7504 7505 // Populate the set of Vectorization Factor Candidates. 7506 ElementCountSet VFCandidates; 7507 for (auto VF = ElementCount::getFixed(1); 7508 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 7509 VFCandidates.insert(VF); 7510 for (auto VF = ElementCount::getScalable(1); 7511 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 7512 VFCandidates.insert(VF); 7513 7514 for (const auto &VF : VFCandidates) { 7515 // Collect Uniform and Scalar instructions after vectorization with VF. 7516 CM.collectUniformsAndScalars(VF); 7517 7518 // Collect the instructions (and their associated costs) that will be more 7519 // profitable to scalarize. 7520 if (VF.isVector()) 7521 CM.collectInstsToScalarize(VF); 7522 } 7523 7524 CM.collectInLoopReductions(); 7525 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 7526 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 7527 7528 LLVM_DEBUG(printPlans(dbgs())); 7529 if (!MaxFactors.hasVector()) 7530 return VectorizationFactor::Disabled(); 7531 7532 // Select the optimal vectorization factor. 7533 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 7534 7535 // Check if it is profitable to vectorize with runtime checks. 7536 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 7537 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 7538 bool PragmaThresholdReached = 7539 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 7540 bool ThresholdReached = 7541 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 7542 if ((ThresholdReached && !Hints.allowReordering()) || 7543 PragmaThresholdReached) { 7544 ORE->emit([&]() { 7545 return OptimizationRemarkAnalysisAliasing( 7546 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 7547 OrigLoop->getHeader()) 7548 << "loop not vectorized: cannot prove it is safe to reorder " 7549 "memory operations"; 7550 }); 7551 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 7552 Hints.emitRemarkWithHints(); 7553 return VectorizationFactor::Disabled(); 7554 } 7555 } 7556 return SelectedVF; 7557 } 7558 7559 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 7560 assert(count_if(VPlans, 7561 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 7562 1 && 7563 "Best VF has not a single VPlan."); 7564 7565 for (const VPlanPtr &Plan : VPlans) { 7566 if (Plan->hasVF(VF)) 7567 return *Plan.get(); 7568 } 7569 llvm_unreachable("No plan found!"); 7570 } 7571 7572 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7573 SmallVector<Metadata *, 4> MDs; 7574 // Reserve first location for self reference to the LoopID metadata node. 7575 MDs.push_back(nullptr); 7576 bool IsUnrollMetadata = false; 7577 MDNode *LoopID = L->getLoopID(); 7578 if (LoopID) { 7579 // First find existing loop unrolling disable metadata. 7580 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7581 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7582 if (MD) { 7583 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7584 IsUnrollMetadata = 7585 S && S->getString().startswith("llvm.loop.unroll.disable"); 7586 } 7587 MDs.push_back(LoopID->getOperand(i)); 7588 } 7589 } 7590 7591 if (!IsUnrollMetadata) { 7592 // Add runtime unroll disable metadata. 7593 LLVMContext &Context = L->getHeader()->getContext(); 7594 SmallVector<Metadata *, 1> DisableOperands; 7595 DisableOperands.push_back( 7596 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7597 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7598 MDs.push_back(DisableNode); 7599 MDNode *NewLoopID = MDNode::get(Context, MDs); 7600 // Set operand 0 to refer to the loop id itself. 7601 NewLoopID->replaceOperandWith(0, NewLoopID); 7602 L->setLoopID(NewLoopID); 7603 } 7604 } 7605 7606 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 7607 VPlan &BestVPlan, 7608 InnerLoopVectorizer &ILV, 7609 DominatorTree *DT) { 7610 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 7611 << '\n'); 7612 7613 // Perform the actual loop transformation. 7614 7615 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7616 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 7617 Value *CanonicalIVStartValue; 7618 std::tie(State.CFG.PrevBB, CanonicalIVStartValue) = 7619 ILV.createVectorizedLoopSkeleton(); 7620 ILV.collectPoisonGeneratingRecipes(State); 7621 7622 ILV.printDebugTracesAtStart(); 7623 7624 //===------------------------------------------------===// 7625 // 7626 // Notice: any optimization or new instruction that go 7627 // into the code below should also be implemented in 7628 // the cost-model. 7629 // 7630 //===------------------------------------------------===// 7631 7632 // 2. Copy and widen instructions from the old loop into the new loop. 7633 BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr), 7634 ILV.getOrCreateVectorTripCount(nullptr), 7635 CanonicalIVStartValue, State); 7636 BestVPlan.execute(&State); 7637 7638 // Keep all loop hints from the original loop on the vector loop (we'll 7639 // replace the vectorizer-specific hints below). 7640 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7641 7642 Optional<MDNode *> VectorizedLoopID = 7643 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7644 LLVMLoopVectorizeFollowupVectorized}); 7645 7646 Loop *L = LI->getLoopFor(State.CFG.PrevBB); 7647 if (VectorizedLoopID.hasValue()) 7648 L->setLoopID(VectorizedLoopID.getValue()); 7649 else { 7650 // Keep all loop hints from the original loop on the vector loop (we'll 7651 // replace the vectorizer-specific hints below). 7652 if (MDNode *LID = OrigLoop->getLoopID()) 7653 L->setLoopID(LID); 7654 7655 LoopVectorizeHints Hints(L, true, *ORE); 7656 Hints.setAlreadyVectorized(); 7657 } 7658 // Disable runtime unrolling when vectorizing the epilogue loop. 7659 if (CanonicalIVStartValue) 7660 AddRuntimeUnrollDisableMetaData(L); 7661 7662 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7663 // predication, updating analyses. 7664 ILV.fixVectorizedLoop(State); 7665 7666 ILV.printDebugTracesAtEnd(); 7667 } 7668 7669 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 7670 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 7671 for (const auto &Plan : VPlans) 7672 if (PrintVPlansInDotFormat) 7673 Plan->printDOT(O); 7674 else 7675 Plan->print(O); 7676 } 7677 #endif 7678 7679 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7680 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7681 7682 // We create new control-flow for the vectorized loop, so the original exit 7683 // conditions will be dead after vectorization if it's only used by the 7684 // terminator 7685 SmallVector<BasicBlock*> ExitingBlocks; 7686 OrigLoop->getExitingBlocks(ExitingBlocks); 7687 for (auto *BB : ExitingBlocks) { 7688 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 7689 if (!Cmp || !Cmp->hasOneUse()) 7690 continue; 7691 7692 // TODO: we should introduce a getUniqueExitingBlocks on Loop 7693 if (!DeadInstructions.insert(Cmp).second) 7694 continue; 7695 7696 // The operands of the icmp is often a dead trunc, used by IndUpdate. 7697 // TODO: can recurse through operands in general 7698 for (Value *Op : Cmp->operands()) { 7699 if (isa<TruncInst>(Op) && Op->hasOneUse()) 7700 DeadInstructions.insert(cast<Instruction>(Op)); 7701 } 7702 } 7703 7704 // We create new "steps" for induction variable updates to which the original 7705 // induction variables map. An original update instruction will be dead if 7706 // all its users except the induction variable are dead. 7707 auto *Latch = OrigLoop->getLoopLatch(); 7708 for (auto &Induction : Legal->getInductionVars()) { 7709 PHINode *Ind = Induction.first; 7710 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7711 7712 // If the tail is to be folded by masking, the primary induction variable, 7713 // if exists, isn't dead: it will be used for masking. Don't kill it. 7714 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 7715 continue; 7716 7717 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7718 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7719 })) 7720 DeadInstructions.insert(IndUpdate); 7721 } 7722 } 7723 7724 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7725 7726 //===--------------------------------------------------------------------===// 7727 // EpilogueVectorizerMainLoop 7728 //===--------------------------------------------------------------------===// 7729 7730 /// This function is partially responsible for generating the control flow 7731 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7732 std::pair<BasicBlock *, Value *> 7733 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 7734 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7735 Loop *Lp = createVectorLoopSkeleton(""); 7736 7737 // Generate the code to check the minimum iteration count of the vector 7738 // epilogue (see below). 7739 EPI.EpilogueIterationCountCheck = 7740 emitMinimumIterationCountCheck(LoopScalarPreHeader, true); 7741 EPI.EpilogueIterationCountCheck->setName("iter.check"); 7742 7743 // Generate the code to check any assumptions that we've made for SCEV 7744 // expressions. 7745 EPI.SCEVSafetyCheck = emitSCEVChecks(LoopScalarPreHeader); 7746 7747 // Generate the code that checks at runtime if arrays overlap. We put the 7748 // checks into a separate block to make the more common case of few elements 7749 // faster. 7750 EPI.MemSafetyCheck = emitMemRuntimeChecks(LoopScalarPreHeader); 7751 7752 // Generate the iteration count check for the main loop, *after* the check 7753 // for the epilogue loop, so that the path-length is shorter for the case 7754 // that goes directly through the vector epilogue. The longer-path length for 7755 // the main loop is compensated for, by the gain from vectorizing the larger 7756 // trip count. Note: the branch will get updated later on when we vectorize 7757 // the epilogue. 7758 EPI.MainLoopIterationCountCheck = 7759 emitMinimumIterationCountCheck(LoopScalarPreHeader, false); 7760 7761 // Generate the induction variable. 7762 Value *CountRoundDown = getOrCreateVectorTripCount(LoopVectorPreHeader); 7763 EPI.VectorTripCount = CountRoundDown; 7764 createHeaderBranch(Lp); 7765 7766 // Skip induction resume value creation here because they will be created in 7767 // the second pass. If we created them here, they wouldn't be used anyway, 7768 // because the vplan in the second pass still contains the inductions from the 7769 // original loop. 7770 7771 return {completeLoopSkeleton(OrigLoopID), nullptr}; 7772 } 7773 7774 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 7775 LLVM_DEBUG({ 7776 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 7777 << "Main Loop VF:" << EPI.MainLoopVF 7778 << ", Main Loop UF:" << EPI.MainLoopUF 7779 << ", Epilogue Loop VF:" << EPI.EpilogueVF 7780 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7781 }); 7782 } 7783 7784 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 7785 DEBUG_WITH_TYPE(VerboseDebug, { 7786 dbgs() << "intermediate fn:\n" 7787 << *OrigLoop->getHeader()->getParent() << "\n"; 7788 }); 7789 } 7790 7791 BasicBlock * 7792 EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(BasicBlock *Bypass, 7793 bool ForEpilogue) { 7794 assert(Bypass && "Expected valid bypass basic block."); 7795 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 7796 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 7797 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 7798 // Reuse existing vector loop preheader for TC checks. 7799 // Note that new preheader block is generated for vector loop. 7800 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 7801 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 7802 7803 // Generate code to check if the loop's trip count is less than VF * UF of the 7804 // main vector loop. 7805 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 7806 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7807 7808 Value *CheckMinIters = Builder.CreateICmp( 7809 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 7810 "min.iters.check"); 7811 7812 if (!ForEpilogue) 7813 TCCheckBlock->setName("vector.main.loop.iter.check"); 7814 7815 // Create new preheader for vector loop. 7816 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 7817 DT, LI, nullptr, "vector.ph"); 7818 7819 if (ForEpilogue) { 7820 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 7821 DT->getNode(Bypass)->getIDom()) && 7822 "TC check is expected to dominate Bypass"); 7823 7824 // Update dominator for Bypass & LoopExit. 7825 DT->changeImmediateDominator(Bypass, TCCheckBlock); 7826 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7827 // For loops with multiple exits, there's no edge from the middle block 7828 // to exit blocks (as the epilogue must run) and thus no need to update 7829 // the immediate dominator of the exit blocks. 7830 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 7831 7832 LoopBypassBlocks.push_back(TCCheckBlock); 7833 7834 // Save the trip count so we don't have to regenerate it in the 7835 // vec.epilog.iter.check. This is safe to do because the trip count 7836 // generated here dominates the vector epilog iter check. 7837 EPI.TripCount = Count; 7838 } 7839 7840 ReplaceInstWithInst( 7841 TCCheckBlock->getTerminator(), 7842 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7843 7844 return TCCheckBlock; 7845 } 7846 7847 //===--------------------------------------------------------------------===// 7848 // EpilogueVectorizerEpilogueLoop 7849 //===--------------------------------------------------------------------===// 7850 7851 /// This function is partially responsible for generating the control flow 7852 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7853 std::pair<BasicBlock *, Value *> 7854 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 7855 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7856 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 7857 7858 // Now, compare the remaining count and if there aren't enough iterations to 7859 // execute the vectorized epilogue skip to the scalar part. 7860 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 7861 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 7862 LoopVectorPreHeader = 7863 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 7864 LI, nullptr, "vec.epilog.ph"); 7865 emitMinimumVectorEpilogueIterCountCheck(LoopScalarPreHeader, 7866 VecEpilogueIterationCountCheck); 7867 7868 // Adjust the control flow taking the state info from the main loop 7869 // vectorization into account. 7870 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 7871 "expected this to be saved from the previous pass."); 7872 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 7873 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 7874 7875 DT->changeImmediateDominator(LoopVectorPreHeader, 7876 EPI.MainLoopIterationCountCheck); 7877 7878 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 7879 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7880 7881 if (EPI.SCEVSafetyCheck) 7882 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 7883 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7884 if (EPI.MemSafetyCheck) 7885 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 7886 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7887 7888 DT->changeImmediateDominator( 7889 VecEpilogueIterationCountCheck, 7890 VecEpilogueIterationCountCheck->getSinglePredecessor()); 7891 7892 DT->changeImmediateDominator(LoopScalarPreHeader, 7893 EPI.EpilogueIterationCountCheck); 7894 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7895 // If there is an epilogue which must run, there's no edge from the 7896 // middle block to exit blocks and thus no need to update the immediate 7897 // dominator of the exit blocks. 7898 DT->changeImmediateDominator(LoopExitBlock, 7899 EPI.EpilogueIterationCountCheck); 7900 7901 // Keep track of bypass blocks, as they feed start values to the induction 7902 // phis in the scalar loop preheader. 7903 if (EPI.SCEVSafetyCheck) 7904 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 7905 if (EPI.MemSafetyCheck) 7906 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 7907 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 7908 7909 // The vec.epilog.iter.check block may contain Phi nodes from reductions which 7910 // merge control-flow from the latch block and the middle block. Update the 7911 // incoming values here and move the Phi into the preheader. 7912 SmallVector<PHINode *, 4> PhisInBlock; 7913 for (PHINode &Phi : VecEpilogueIterationCountCheck->phis()) 7914 PhisInBlock.push_back(&Phi); 7915 7916 for (PHINode *Phi : PhisInBlock) { 7917 Phi->replaceIncomingBlockWith( 7918 VecEpilogueIterationCountCheck->getSinglePredecessor(), 7919 VecEpilogueIterationCountCheck); 7920 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck); 7921 if (EPI.SCEVSafetyCheck) 7922 Phi->removeIncomingValue(EPI.SCEVSafetyCheck); 7923 if (EPI.MemSafetyCheck) 7924 Phi->removeIncomingValue(EPI.MemSafetyCheck); 7925 Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI()); 7926 } 7927 7928 // Generate a resume induction for the vector epilogue and put it in the 7929 // vector epilogue preheader 7930 Type *IdxTy = Legal->getWidestInductionType(); 7931 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 7932 LoopVectorPreHeader->getFirstNonPHI()); 7933 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 7934 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 7935 EPI.MainLoopIterationCountCheck); 7936 7937 // Generate the induction variable. 7938 createHeaderBranch(Lp); 7939 7940 // Generate induction resume values. These variables save the new starting 7941 // indexes for the scalar loop. They are used to test if there are any tail 7942 // iterations left once the vector loop has completed. 7943 // Note that when the vectorized epilogue is skipped due to iteration count 7944 // check, then the resume value for the induction variable comes from 7945 // the trip count of the main vector loop, hence passing the AdditionalBypass 7946 // argument. 7947 createInductionResumeValues({VecEpilogueIterationCountCheck, 7948 EPI.VectorTripCount} /* AdditionalBypass */); 7949 7950 return {completeLoopSkeleton(OrigLoopID), EPResumeVal}; 7951 } 7952 7953 BasicBlock * 7954 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 7955 BasicBlock *Bypass, BasicBlock *Insert) { 7956 7957 assert(EPI.TripCount && 7958 "Expected trip count to have been safed in the first pass."); 7959 assert( 7960 (!isa<Instruction>(EPI.TripCount) || 7961 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 7962 "saved trip count does not dominate insertion point."); 7963 Value *TC = EPI.TripCount; 7964 IRBuilder<> Builder(Insert->getTerminator()); 7965 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 7966 7967 // Generate code to check if the loop's trip count is less than VF * UF of the 7968 // vector epilogue loop. 7969 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 7970 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7971 7972 Value *CheckMinIters = 7973 Builder.CreateICmp(P, Count, 7974 createStepForVF(Builder, Count->getType(), 7975 EPI.EpilogueVF, EPI.EpilogueUF), 7976 "min.epilog.iters.check"); 7977 7978 ReplaceInstWithInst( 7979 Insert->getTerminator(), 7980 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7981 7982 LoopBypassBlocks.push_back(Insert); 7983 return Insert; 7984 } 7985 7986 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 7987 LLVM_DEBUG({ 7988 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 7989 << "Epilogue Loop VF:" << EPI.EpilogueVF 7990 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7991 }); 7992 } 7993 7994 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 7995 DEBUG_WITH_TYPE(VerboseDebug, { 7996 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n"; 7997 }); 7998 } 7999 8000 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8001 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8002 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8003 bool PredicateAtRangeStart = Predicate(Range.Start); 8004 8005 for (ElementCount TmpVF = Range.Start * 2; 8006 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8007 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8008 Range.End = TmpVF; 8009 break; 8010 } 8011 8012 return PredicateAtRangeStart; 8013 } 8014 8015 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8016 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8017 /// of VF's starting at a given VF and extending it as much as possible. Each 8018 /// vectorization decision can potentially shorten this sub-range during 8019 /// buildVPlan(). 8020 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8021 ElementCount MaxVF) { 8022 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8023 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8024 VFRange SubRange = {VF, MaxVFPlusOne}; 8025 VPlans.push_back(buildVPlan(SubRange)); 8026 VF = SubRange.End; 8027 } 8028 } 8029 8030 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8031 VPlanPtr &Plan) { 8032 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8033 8034 // Look for cached value. 8035 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8036 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8037 if (ECEntryIt != EdgeMaskCache.end()) 8038 return ECEntryIt->second; 8039 8040 VPValue *SrcMask = createBlockInMask(Src, Plan); 8041 8042 // The terminator has to be a branch inst! 8043 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8044 assert(BI && "Unexpected terminator found"); 8045 8046 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8047 return EdgeMaskCache[Edge] = SrcMask; 8048 8049 // If source is an exiting block, we know the exit edge is dynamically dead 8050 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8051 // adding uses of an otherwise potentially dead instruction. 8052 if (OrigLoop->isLoopExiting(Src)) 8053 return EdgeMaskCache[Edge] = SrcMask; 8054 8055 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8056 assert(EdgeMask && "No Edge Mask found for condition"); 8057 8058 if (BI->getSuccessor(0) != Dst) 8059 EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc()); 8060 8061 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8062 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8063 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8064 // The select version does not introduce new UB if SrcMask is false and 8065 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8066 VPValue *False = Plan->getOrAddVPValue( 8067 ConstantInt::getFalse(BI->getCondition()->getType())); 8068 EdgeMask = 8069 Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc()); 8070 } 8071 8072 return EdgeMaskCache[Edge] = EdgeMask; 8073 } 8074 8075 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8076 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8077 8078 // Look for cached value. 8079 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8080 if (BCEntryIt != BlockMaskCache.end()) 8081 return BCEntryIt->second; 8082 8083 // All-one mask is modelled as no-mask following the convention for masked 8084 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8085 VPValue *BlockMask = nullptr; 8086 8087 if (OrigLoop->getHeader() == BB) { 8088 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8089 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8090 8091 // Introduce the early-exit compare IV <= BTC to form header block mask. 8092 // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by 8093 // constructing the desired canonical IV in the header block as its first 8094 // non-phi instructions. 8095 assert(CM.foldTailByMasking() && "must fold the tail"); 8096 VPBasicBlock *HeaderVPBB = 8097 Plan->getVectorLoopRegion()->getEntryBasicBlock(); 8098 auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi(); 8099 auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV()); 8100 HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi()); 8101 8102 VPBuilder::InsertPointGuard Guard(Builder); 8103 Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint); 8104 if (CM.TTI.emitGetActiveLaneMask()) { 8105 VPValue *TC = Plan->getOrCreateTripCount(); 8106 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC}); 8107 } else { 8108 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8109 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8110 } 8111 return BlockMaskCache[BB] = BlockMask; 8112 } 8113 8114 // This is the block mask. We OR all incoming edges. 8115 for (auto *Predecessor : predecessors(BB)) { 8116 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8117 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8118 return BlockMaskCache[BB] = EdgeMask; 8119 8120 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8121 BlockMask = EdgeMask; 8122 continue; 8123 } 8124 8125 BlockMask = Builder.createOr(BlockMask, EdgeMask, {}); 8126 } 8127 8128 return BlockMaskCache[BB] = BlockMask; 8129 } 8130 8131 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8132 ArrayRef<VPValue *> Operands, 8133 VFRange &Range, 8134 VPlanPtr &Plan) { 8135 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8136 "Must be called with either a load or store"); 8137 8138 auto willWiden = [&](ElementCount VF) -> bool { 8139 if (VF.isScalar()) 8140 return false; 8141 LoopVectorizationCostModel::InstWidening Decision = 8142 CM.getWideningDecision(I, VF); 8143 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8144 "CM decision should be taken at this point."); 8145 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8146 return true; 8147 if (CM.isScalarAfterVectorization(I, VF) || 8148 CM.isProfitableToScalarize(I, VF)) 8149 return false; 8150 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8151 }; 8152 8153 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8154 return nullptr; 8155 8156 VPValue *Mask = nullptr; 8157 if (Legal->isMaskRequired(I)) 8158 Mask = createBlockInMask(I->getParent(), Plan); 8159 8160 // Determine if the pointer operand of the access is either consecutive or 8161 // reverse consecutive. 8162 LoopVectorizationCostModel::InstWidening Decision = 8163 CM.getWideningDecision(I, Range.Start); 8164 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8165 bool Consecutive = 8166 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8167 8168 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8169 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8170 Consecutive, Reverse); 8171 8172 StoreInst *Store = cast<StoreInst>(I); 8173 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8174 Mask, Consecutive, Reverse); 8175 } 8176 8177 static VPWidenIntOrFpInductionRecipe * 8178 createWidenInductionRecipe(PHINode *Phi, Instruction *PhiOrTrunc, 8179 VPValue *Start, const InductionDescriptor &IndDesc, 8180 LoopVectorizationCostModel &CM, ScalarEvolution &SE, 8181 Loop &OrigLoop, VFRange &Range) { 8182 // Returns true if an instruction \p I should be scalarized instead of 8183 // vectorized for the chosen vectorization factor. 8184 auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) { 8185 return CM.isScalarAfterVectorization(I, VF) || 8186 CM.isProfitableToScalarize(I, VF); 8187 }; 8188 8189 bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange( 8190 [&](ElementCount VF) { 8191 // Returns true if we should generate a scalar version of \p IV. 8192 if (ShouldScalarizeInstruction(PhiOrTrunc, VF)) 8193 return true; 8194 auto isScalarInst = [&](User *U) -> bool { 8195 auto *I = cast<Instruction>(U); 8196 return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF); 8197 }; 8198 return any_of(PhiOrTrunc->users(), isScalarInst); 8199 }, 8200 Range); 8201 bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange( 8202 [&](ElementCount VF) { 8203 return ShouldScalarizeInstruction(PhiOrTrunc, VF); 8204 }, 8205 Range); 8206 assert(IndDesc.getStartValue() == 8207 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader())); 8208 assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) && 8209 "step must be loop invariant"); 8210 if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) { 8211 return new VPWidenIntOrFpInductionRecipe( 8212 Phi, Start, IndDesc, TruncI, NeedsScalarIV, !NeedsScalarIVOnly, SE); 8213 } 8214 assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here"); 8215 return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, NeedsScalarIV, 8216 !NeedsScalarIVOnly, SE); 8217 } 8218 8219 VPRecipeBase *VPRecipeBuilder::tryToOptimizeInductionPHI( 8220 PHINode *Phi, ArrayRef<VPValue *> Operands, VFRange &Range) const { 8221 8222 // Check if this is an integer or fp induction. If so, build the recipe that 8223 // produces its scalar and vector values. 8224 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) 8225 return createWidenInductionRecipe(Phi, Phi, Operands[0], *II, CM, 8226 *PSE.getSE(), *OrigLoop, Range); 8227 8228 // Check if this is pointer induction. If so, build the recipe for it. 8229 if (auto *II = Legal->getPointerInductionDescriptor(Phi)) 8230 return new VPWidenPointerInductionRecipe(Phi, Operands[0], *II, 8231 *PSE.getSE()); 8232 return nullptr; 8233 } 8234 8235 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8236 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8237 VPlan &Plan) const { 8238 // Optimize the special case where the source is a constant integer 8239 // induction variable. Notice that we can only optimize the 'trunc' case 8240 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8241 // (c) other casts depend on pointer size. 8242 8243 // Determine whether \p K is a truncation based on an induction variable that 8244 // can be optimized. 8245 auto isOptimizableIVTruncate = 8246 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8247 return [=](ElementCount VF) -> bool { 8248 return CM.isOptimizableIVTruncate(K, VF); 8249 }; 8250 }; 8251 8252 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8253 isOptimizableIVTruncate(I), Range)) { 8254 8255 auto *Phi = cast<PHINode>(I->getOperand(0)); 8256 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi); 8257 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8258 return createWidenInductionRecipe(Phi, I, Start, II, CM, *PSE.getSE(), 8259 *OrigLoop, Range); 8260 } 8261 return nullptr; 8262 } 8263 8264 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8265 ArrayRef<VPValue *> Operands, 8266 VPlanPtr &Plan) { 8267 // If all incoming values are equal, the incoming VPValue can be used directly 8268 // instead of creating a new VPBlendRecipe. 8269 VPValue *FirstIncoming = Operands[0]; 8270 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8271 return FirstIncoming == Inc; 8272 })) { 8273 return Operands[0]; 8274 } 8275 8276 unsigned NumIncoming = Phi->getNumIncomingValues(); 8277 // For in-loop reductions, we do not need to create an additional select. 8278 VPValue *InLoopVal = nullptr; 8279 for (unsigned In = 0; In < NumIncoming; In++) { 8280 PHINode *PhiOp = 8281 dyn_cast_or_null<PHINode>(Operands[In]->getUnderlyingValue()); 8282 if (PhiOp && CM.isInLoopReduction(PhiOp)) { 8283 assert(!InLoopVal && "Found more than one in-loop reduction!"); 8284 InLoopVal = Operands[In]; 8285 } 8286 } 8287 8288 assert((!InLoopVal || NumIncoming == 2) && 8289 "Found an in-loop reduction for PHI with unexpected number of " 8290 "incoming values"); 8291 if (InLoopVal) 8292 return Operands[Operands[0] == InLoopVal ? 1 : 0]; 8293 8294 // We know that all PHIs in non-header blocks are converted into selects, so 8295 // we don't have to worry about the insertion order and we can just use the 8296 // builder. At this point we generate the predication tree. There may be 8297 // duplications since this is a simple recursive scan, but future 8298 // optimizations will clean it up. 8299 SmallVector<VPValue *, 2> OperandsWithMask; 8300 8301 for (unsigned In = 0; In < NumIncoming; In++) { 8302 VPValue *EdgeMask = 8303 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8304 assert((EdgeMask || NumIncoming == 1) && 8305 "Multiple predecessors with one having a full mask"); 8306 OperandsWithMask.push_back(Operands[In]); 8307 if (EdgeMask) 8308 OperandsWithMask.push_back(EdgeMask); 8309 } 8310 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8311 } 8312 8313 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8314 ArrayRef<VPValue *> Operands, 8315 VFRange &Range) const { 8316 8317 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8318 [this, CI](ElementCount VF) { 8319 return CM.isScalarWithPredication(CI, VF); 8320 }, 8321 Range); 8322 8323 if (IsPredicated) 8324 return nullptr; 8325 8326 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8327 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8328 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8329 ID == Intrinsic::pseudoprobe || 8330 ID == Intrinsic::experimental_noalias_scope_decl)) 8331 return nullptr; 8332 8333 auto willWiden = [&](ElementCount VF) -> bool { 8334 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8335 // The following case may be scalarized depending on the VF. 8336 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8337 // version of the instruction. 8338 // Is it beneficial to perform intrinsic call compared to lib call? 8339 bool NeedToScalarize = false; 8340 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8341 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8342 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8343 return UseVectorIntrinsic || !NeedToScalarize; 8344 }; 8345 8346 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8347 return nullptr; 8348 8349 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8350 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8351 } 8352 8353 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8354 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8355 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8356 // Instruction should be widened, unless it is scalar after vectorization, 8357 // scalarization is profitable or it is predicated. 8358 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8359 return CM.isScalarAfterVectorization(I, VF) || 8360 CM.isProfitableToScalarize(I, VF) || 8361 CM.isScalarWithPredication(I, VF); 8362 }; 8363 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8364 Range); 8365 } 8366 8367 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8368 ArrayRef<VPValue *> Operands) const { 8369 auto IsVectorizableOpcode = [](unsigned Opcode) { 8370 switch (Opcode) { 8371 case Instruction::Add: 8372 case Instruction::And: 8373 case Instruction::AShr: 8374 case Instruction::BitCast: 8375 case Instruction::FAdd: 8376 case Instruction::FCmp: 8377 case Instruction::FDiv: 8378 case Instruction::FMul: 8379 case Instruction::FNeg: 8380 case Instruction::FPExt: 8381 case Instruction::FPToSI: 8382 case Instruction::FPToUI: 8383 case Instruction::FPTrunc: 8384 case Instruction::FRem: 8385 case Instruction::FSub: 8386 case Instruction::ICmp: 8387 case Instruction::IntToPtr: 8388 case Instruction::LShr: 8389 case Instruction::Mul: 8390 case Instruction::Or: 8391 case Instruction::PtrToInt: 8392 case Instruction::SDiv: 8393 case Instruction::Select: 8394 case Instruction::SExt: 8395 case Instruction::Shl: 8396 case Instruction::SIToFP: 8397 case Instruction::SRem: 8398 case Instruction::Sub: 8399 case Instruction::Trunc: 8400 case Instruction::UDiv: 8401 case Instruction::UIToFP: 8402 case Instruction::URem: 8403 case Instruction::Xor: 8404 case Instruction::ZExt: 8405 return true; 8406 } 8407 return false; 8408 }; 8409 8410 if (!IsVectorizableOpcode(I->getOpcode())) 8411 return nullptr; 8412 8413 // Success: widen this instruction. 8414 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8415 } 8416 8417 void VPRecipeBuilder::fixHeaderPhis() { 8418 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8419 for (VPHeaderPHIRecipe *R : PhisToFix) { 8420 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8421 VPRecipeBase *IncR = 8422 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8423 R->addOperand(IncR->getVPSingleValue()); 8424 } 8425 } 8426 8427 VPBasicBlock *VPRecipeBuilder::handleReplication( 8428 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8429 VPlanPtr &Plan) { 8430 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8431 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8432 Range); 8433 8434 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8435 [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); }, 8436 Range); 8437 8438 // Even if the instruction is not marked as uniform, there are certain 8439 // intrinsic calls that can be effectively treated as such, so we check for 8440 // them here. Conservatively, we only do this for scalable vectors, since 8441 // for fixed-width VFs we can always fall back on full scalarization. 8442 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 8443 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 8444 case Intrinsic::assume: 8445 case Intrinsic::lifetime_start: 8446 case Intrinsic::lifetime_end: 8447 // For scalable vectors if one of the operands is variant then we still 8448 // want to mark as uniform, which will generate one instruction for just 8449 // the first lane of the vector. We can't scalarize the call in the same 8450 // way as for fixed-width vectors because we don't know how many lanes 8451 // there are. 8452 // 8453 // The reasons for doing it this way for scalable vectors are: 8454 // 1. For the assume intrinsic generating the instruction for the first 8455 // lane is still be better than not generating any at all. For 8456 // example, the input may be a splat across all lanes. 8457 // 2. For the lifetime start/end intrinsics the pointer operand only 8458 // does anything useful when the input comes from a stack object, 8459 // which suggests it should always be uniform. For non-stack objects 8460 // the effect is to poison the object, which still allows us to 8461 // remove the call. 8462 IsUniform = true; 8463 break; 8464 default: 8465 break; 8466 } 8467 } 8468 8469 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8470 IsUniform, IsPredicated); 8471 setRecipe(I, Recipe); 8472 Plan->addVPValue(I, Recipe); 8473 8474 // Find if I uses a predicated instruction. If so, it will use its scalar 8475 // value. Avoid hoisting the insert-element which packs the scalar value into 8476 // a vector value, as that happens iff all users use the vector value. 8477 for (VPValue *Op : Recipe->operands()) { 8478 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8479 if (!PredR) 8480 continue; 8481 auto *RepR = 8482 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8483 assert(RepR->isPredicated() && 8484 "expected Replicate recipe to be predicated"); 8485 RepR->setAlsoPack(false); 8486 } 8487 8488 // Finalize the recipe for Instr, first if it is not predicated. 8489 if (!IsPredicated) { 8490 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8491 VPBB->appendRecipe(Recipe); 8492 return VPBB; 8493 } 8494 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8495 8496 VPBlockBase *SingleSucc = VPBB->getSingleSuccessor(); 8497 assert(SingleSucc && "VPBB must have a single successor when handling " 8498 "predicated replication."); 8499 VPBlockUtils::disconnectBlocks(VPBB, SingleSucc); 8500 // Record predicated instructions for above packing optimizations. 8501 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8502 VPBlockUtils::insertBlockAfter(Region, VPBB); 8503 auto *RegSucc = new VPBasicBlock(); 8504 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8505 VPBlockUtils::connectBlocks(RegSucc, SingleSucc); 8506 return RegSucc; 8507 } 8508 8509 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8510 VPRecipeBase *PredRecipe, 8511 VPlanPtr &Plan) { 8512 // Instructions marked for predication are replicated and placed under an 8513 // if-then construct to prevent side-effects. 8514 8515 // Generate recipes to compute the block mask for this region. 8516 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8517 8518 // Build the triangular if-then region. 8519 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8520 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8521 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8522 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8523 auto *PHIRecipe = Instr->getType()->isVoidTy() 8524 ? nullptr 8525 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8526 if (PHIRecipe) { 8527 Plan->removeVPValueFor(Instr); 8528 Plan->addVPValue(Instr, PHIRecipe); 8529 } 8530 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8531 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8532 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8533 8534 // Note: first set Entry as region entry and then connect successors starting 8535 // from it in order, to propagate the "parent" of each VPBasicBlock. 8536 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8537 VPBlockUtils::connectBlocks(Pred, Exit); 8538 8539 return Region; 8540 } 8541 8542 VPRecipeOrVPValueTy 8543 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8544 ArrayRef<VPValue *> Operands, 8545 VFRange &Range, VPlanPtr &Plan) { 8546 // First, check for specific widening recipes that deal with calls, memory 8547 // operations, inductions and Phi nodes. 8548 if (auto *CI = dyn_cast<CallInst>(Instr)) 8549 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8550 8551 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8552 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8553 8554 VPRecipeBase *Recipe; 8555 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8556 if (Phi->getParent() != OrigLoop->getHeader()) 8557 return tryToBlend(Phi, Operands, Plan); 8558 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range))) 8559 return toVPRecipeResult(Recipe); 8560 8561 VPHeaderPHIRecipe *PhiRecipe = nullptr; 8562 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 8563 VPValue *StartV = Operands[0]; 8564 if (Legal->isReductionVariable(Phi)) { 8565 const RecurrenceDescriptor &RdxDesc = 8566 Legal->getReductionVars().find(Phi)->second; 8567 assert(RdxDesc.getRecurrenceStartValue() == 8568 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8569 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 8570 CM.isInLoopReduction(Phi), 8571 CM.useOrderedReductions(RdxDesc)); 8572 } else { 8573 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 8574 } 8575 8576 // Record the incoming value from the backedge, so we can add the incoming 8577 // value from the backedge after all recipes have been created. 8578 recordRecipeOf(cast<Instruction>( 8579 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8580 PhisToFix.push_back(PhiRecipe); 8581 } else { 8582 // TODO: record backedge value for remaining pointer induction phis. 8583 assert(Phi->getType()->isPointerTy() && 8584 "only pointer phis should be handled here"); 8585 assert(Legal->getInductionVars().count(Phi) && 8586 "Not an induction variable"); 8587 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8588 VPValue *Start = Plan->getOrAddVPValue(II.getStartValue()); 8589 PhiRecipe = new VPWidenPHIRecipe(Phi, Start); 8590 } 8591 8592 return toVPRecipeResult(PhiRecipe); 8593 } 8594 8595 if (isa<TruncInst>(Instr) && 8596 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8597 Range, *Plan))) 8598 return toVPRecipeResult(Recipe); 8599 8600 if (!shouldWiden(Instr, Range)) 8601 return nullptr; 8602 8603 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8604 return toVPRecipeResult(new VPWidenGEPRecipe( 8605 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8606 8607 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8608 bool InvariantCond = 8609 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8610 return toVPRecipeResult(new VPWidenSelectRecipe( 8611 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8612 } 8613 8614 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8615 } 8616 8617 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8618 ElementCount MaxVF) { 8619 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8620 8621 // Collect instructions from the original loop that will become trivially dead 8622 // in the vectorized loop. We don't need to vectorize these instructions. For 8623 // example, original induction update instructions can become dead because we 8624 // separately emit induction "steps" when generating code for the new loop. 8625 // Similarly, we create a new latch condition when setting up the structure 8626 // of the new loop, so the old one can become dead. 8627 SmallPtrSet<Instruction *, 4> DeadInstructions; 8628 collectTriviallyDeadInstructions(DeadInstructions); 8629 8630 // Add assume instructions we need to drop to DeadInstructions, to prevent 8631 // them from being added to the VPlan. 8632 // TODO: We only need to drop assumes in blocks that get flattend. If the 8633 // control flow is preserved, we should keep them. 8634 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8635 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8636 8637 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8638 // Dead instructions do not need sinking. Remove them from SinkAfter. 8639 for (Instruction *I : DeadInstructions) 8640 SinkAfter.erase(I); 8641 8642 // Cannot sink instructions after dead instructions (there won't be any 8643 // recipes for them). Instead, find the first non-dead previous instruction. 8644 for (auto &P : Legal->getSinkAfter()) { 8645 Instruction *SinkTarget = P.second; 8646 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 8647 (void)FirstInst; 8648 while (DeadInstructions.contains(SinkTarget)) { 8649 assert( 8650 SinkTarget != FirstInst && 8651 "Must find a live instruction (at least the one feeding the " 8652 "first-order recurrence PHI) before reaching beginning of the block"); 8653 SinkTarget = SinkTarget->getPrevNode(); 8654 assert(SinkTarget != P.first && 8655 "sink source equals target, no sinking required"); 8656 } 8657 P.second = SinkTarget; 8658 } 8659 8660 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8661 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8662 VFRange SubRange = {VF, MaxVFPlusOne}; 8663 VPlans.push_back( 8664 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8665 VF = SubRange.End; 8666 } 8667 } 8668 8669 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a 8670 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a 8671 // BranchOnCount VPInstruction to the latch. 8672 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL, 8673 bool HasNUW, bool IsVPlanNative) { 8674 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8675 auto *StartV = Plan.getOrAddVPValue(StartIdx); 8676 8677 auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL); 8678 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion(); 8679 VPBasicBlock *Header = TopRegion->getEntryBasicBlock(); 8680 if (IsVPlanNative) 8681 Header = cast<VPBasicBlock>(Header->getSingleSuccessor()); 8682 Header->insert(CanonicalIVPHI, Header->begin()); 8683 8684 auto *CanonicalIVIncrement = 8685 new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW 8686 : VPInstruction::CanonicalIVIncrement, 8687 {CanonicalIVPHI}, DL); 8688 CanonicalIVPHI->addOperand(CanonicalIVIncrement); 8689 8690 VPBasicBlock *EB = TopRegion->getExitBasicBlock(); 8691 if (IsVPlanNative) { 8692 EB = cast<VPBasicBlock>(EB->getSinglePredecessor()); 8693 EB->setCondBit(nullptr); 8694 } 8695 EB->appendRecipe(CanonicalIVIncrement); 8696 8697 auto *BranchOnCount = 8698 new VPInstruction(VPInstruction::BranchOnCount, 8699 {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL); 8700 EB->appendRecipe(BranchOnCount); 8701 } 8702 8703 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8704 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8705 const MapVector<Instruction *, Instruction *> &SinkAfter) { 8706 8707 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8708 8709 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8710 8711 // --------------------------------------------------------------------------- 8712 // Pre-construction: record ingredients whose recipes we'll need to further 8713 // process after constructing the initial VPlan. 8714 // --------------------------------------------------------------------------- 8715 8716 // Mark instructions we'll need to sink later and their targets as 8717 // ingredients whose recipe we'll need to record. 8718 for (auto &Entry : SinkAfter) { 8719 RecipeBuilder.recordRecipeOf(Entry.first); 8720 RecipeBuilder.recordRecipeOf(Entry.second); 8721 } 8722 for (auto &Reduction : CM.getInLoopReductionChains()) { 8723 PHINode *Phi = Reduction.first; 8724 RecurKind Kind = 8725 Legal->getReductionVars().find(Phi)->second.getRecurrenceKind(); 8726 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8727 8728 RecipeBuilder.recordRecipeOf(Phi); 8729 for (auto &R : ReductionOperations) { 8730 RecipeBuilder.recordRecipeOf(R); 8731 // For min/max reductions, where we have a pair of icmp/select, we also 8732 // need to record the ICmp recipe, so it can be removed later. 8733 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 8734 "Only min/max recurrences allowed for inloop reductions"); 8735 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8736 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8737 } 8738 } 8739 8740 // For each interleave group which is relevant for this (possibly trimmed) 8741 // Range, add it to the set of groups to be later applied to the VPlan and add 8742 // placeholders for its members' Recipes which we'll be replacing with a 8743 // single VPInterleaveRecipe. 8744 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8745 auto applyIG = [IG, this](ElementCount VF) -> bool { 8746 return (VF.isVector() && // Query is illegal for VF == 1 8747 CM.getWideningDecision(IG->getInsertPos(), VF) == 8748 LoopVectorizationCostModel::CM_Interleave); 8749 }; 8750 if (!getDecisionAndClampRange(applyIG, Range)) 8751 continue; 8752 InterleaveGroups.insert(IG); 8753 for (unsigned i = 0; i < IG->getFactor(); i++) 8754 if (Instruction *Member = IG->getMember(i)) 8755 RecipeBuilder.recordRecipeOf(Member); 8756 }; 8757 8758 // --------------------------------------------------------------------------- 8759 // Build initial VPlan: Scan the body of the loop in a topological order to 8760 // visit each basic block after having visited its predecessor basic blocks. 8761 // --------------------------------------------------------------------------- 8762 8763 // Create initial VPlan skeleton, with separate header and latch blocks. 8764 VPBasicBlock *HeaderVPBB = new VPBasicBlock(); 8765 VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch"); 8766 VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB); 8767 auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop"); 8768 auto Plan = std::make_unique<VPlan>(TopRegion); 8769 8770 Instruction *DLInst = 8771 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 8772 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), 8773 DLInst ? DLInst->getDebugLoc() : DebugLoc(), 8774 !CM.foldTailByMasking(), false); 8775 8776 // Scan the body of the loop in a topological order to visit each basic block 8777 // after having visited its predecessor basic blocks. 8778 LoopBlocksDFS DFS(OrigLoop); 8779 DFS.perform(LI); 8780 8781 VPBasicBlock *VPBB = HeaderVPBB; 8782 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 8783 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8784 // Relevant instructions from basic block BB will be grouped into VPRecipe 8785 // ingredients and fill a new VPBasicBlock. 8786 unsigned VPBBsForBB = 0; 8787 VPBB->setName(BB->getName()); 8788 Builder.setInsertPoint(VPBB); 8789 8790 // Introduce each ingredient into VPlan. 8791 // TODO: Model and preserve debug instrinsics in VPlan. 8792 for (Instruction &I : BB->instructionsWithoutDebug()) { 8793 Instruction *Instr = &I; 8794 8795 // First filter out irrelevant instructions, to ensure no recipes are 8796 // built for them. 8797 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 8798 continue; 8799 8800 SmallVector<VPValue *, 4> Operands; 8801 auto *Phi = dyn_cast<PHINode>(Instr); 8802 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 8803 Operands.push_back(Plan->getOrAddVPValue( 8804 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 8805 } else { 8806 auto OpRange = Plan->mapToVPValues(Instr->operands()); 8807 Operands = {OpRange.begin(), OpRange.end()}; 8808 } 8809 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 8810 Instr, Operands, Range, Plan)) { 8811 // If Instr can be simplified to an existing VPValue, use it. 8812 if (RecipeOrValue.is<VPValue *>()) { 8813 auto *VPV = RecipeOrValue.get<VPValue *>(); 8814 Plan->addVPValue(Instr, VPV); 8815 // If the re-used value is a recipe, register the recipe for the 8816 // instruction, in case the recipe for Instr needs to be recorded. 8817 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 8818 RecipeBuilder.setRecipe(Instr, R); 8819 continue; 8820 } 8821 // Otherwise, add the new recipe. 8822 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 8823 for (auto *Def : Recipe->definedValues()) { 8824 auto *UV = Def->getUnderlyingValue(); 8825 Plan->addVPValue(UV, Def); 8826 } 8827 8828 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 8829 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 8830 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 8831 // of the header block. That can happen for truncates of induction 8832 // variables. Those recipes are moved to the phi section of the header 8833 // block after applying SinkAfter, which relies on the original 8834 // position of the trunc. 8835 assert(isa<TruncInst>(Instr)); 8836 InductionsToMove.push_back( 8837 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 8838 } 8839 RecipeBuilder.setRecipe(Instr, Recipe); 8840 VPBB->appendRecipe(Recipe); 8841 continue; 8842 } 8843 8844 // Otherwise, if all widening options failed, Instruction is to be 8845 // replicated. This may create a successor for VPBB. 8846 VPBasicBlock *NextVPBB = 8847 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 8848 if (NextVPBB != VPBB) { 8849 VPBB = NextVPBB; 8850 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8851 : ""); 8852 } 8853 } 8854 8855 VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB); 8856 VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor()); 8857 } 8858 8859 // Fold the last, empty block into its predecessor. 8860 VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB); 8861 assert(VPBB && "expected to fold last (empty) block"); 8862 // After here, VPBB should not be used. 8863 VPBB = nullptr; 8864 8865 assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) && 8866 !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() && 8867 "entry block must be set to a VPRegionBlock having a non-empty entry " 8868 "VPBasicBlock"); 8869 RecipeBuilder.fixHeaderPhis(); 8870 8871 // --------------------------------------------------------------------------- 8872 // Transform initial VPlan: Apply previously taken decisions, in order, to 8873 // bring the VPlan to its final state. 8874 // --------------------------------------------------------------------------- 8875 8876 // Apply Sink-After legal constraints. 8877 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 8878 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 8879 if (Region && Region->isReplicator()) { 8880 assert(Region->getNumSuccessors() == 1 && 8881 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 8882 assert(R->getParent()->size() == 1 && 8883 "A recipe in an original replicator region must be the only " 8884 "recipe in its block"); 8885 return Region; 8886 } 8887 return nullptr; 8888 }; 8889 for (auto &Entry : SinkAfter) { 8890 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 8891 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 8892 8893 auto *TargetRegion = GetReplicateRegion(Target); 8894 auto *SinkRegion = GetReplicateRegion(Sink); 8895 if (!SinkRegion) { 8896 // If the sink source is not a replicate region, sink the recipe directly. 8897 if (TargetRegion) { 8898 // The target is in a replication region, make sure to move Sink to 8899 // the block after it, not into the replication region itself. 8900 VPBasicBlock *NextBlock = 8901 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 8902 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 8903 } else 8904 Sink->moveAfter(Target); 8905 continue; 8906 } 8907 8908 // The sink source is in a replicate region. Unhook the region from the CFG. 8909 auto *SinkPred = SinkRegion->getSinglePredecessor(); 8910 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 8911 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 8912 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 8913 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 8914 8915 if (TargetRegion) { 8916 // The target recipe is also in a replicate region, move the sink region 8917 // after the target region. 8918 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 8919 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 8920 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 8921 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 8922 } else { 8923 // The sink source is in a replicate region, we need to move the whole 8924 // replicate region, which should only contain a single recipe in the 8925 // main block. 8926 auto *SplitBlock = 8927 Target->getParent()->splitAt(std::next(Target->getIterator())); 8928 8929 auto *SplitPred = SplitBlock->getSinglePredecessor(); 8930 8931 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 8932 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 8933 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 8934 } 8935 } 8936 8937 VPlanTransforms::removeRedundantCanonicalIVs(*Plan); 8938 VPlanTransforms::removeRedundantInductionCasts(*Plan); 8939 8940 // Now that sink-after is done, move induction recipes for optimized truncates 8941 // to the phi section of the header block. 8942 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 8943 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 8944 8945 // Adjust the recipes for any inloop reductions. 8946 adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan, 8947 RecipeBuilder, Range.Start); 8948 8949 // Introduce a recipe to combine the incoming and previous values of a 8950 // first-order recurrence. 8951 for (VPRecipeBase &R : 8952 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) { 8953 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 8954 if (!RecurPhi) 8955 continue; 8956 8957 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 8958 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 8959 auto *Region = GetReplicateRegion(PrevRecipe); 8960 if (Region) 8961 InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor()); 8962 if (Region || PrevRecipe->isPhi()) 8963 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 8964 else 8965 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 8966 8967 auto *RecurSplice = cast<VPInstruction>( 8968 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 8969 {RecurPhi, RecurPhi->getBackedgeValue()})); 8970 8971 RecurPhi->replaceAllUsesWith(RecurSplice); 8972 // Set the first operand of RecurSplice to RecurPhi again, after replacing 8973 // all users. 8974 RecurSplice->setOperand(0, RecurPhi); 8975 } 8976 8977 // Interleave memory: for each Interleave Group we marked earlier as relevant 8978 // for this VPlan, replace the Recipes widening its memory instructions with a 8979 // single VPInterleaveRecipe at its insertion point. 8980 for (auto IG : InterleaveGroups) { 8981 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 8982 RecipeBuilder.getRecipe(IG->getInsertPos())); 8983 SmallVector<VPValue *, 4> StoredValues; 8984 for (unsigned i = 0; i < IG->getFactor(); ++i) 8985 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 8986 auto *StoreR = 8987 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 8988 StoredValues.push_back(StoreR->getStoredValue()); 8989 } 8990 8991 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 8992 Recipe->getMask()); 8993 VPIG->insertBefore(Recipe); 8994 unsigned J = 0; 8995 for (unsigned i = 0; i < IG->getFactor(); ++i) 8996 if (Instruction *Member = IG->getMember(i)) { 8997 if (!Member->getType()->isVoidTy()) { 8998 VPValue *OriginalV = Plan->getVPValue(Member); 8999 Plan->removeVPValueFor(Member); 9000 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9001 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9002 J++; 9003 } 9004 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9005 } 9006 } 9007 9008 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9009 // in ways that accessing values using original IR values is incorrect. 9010 Plan->disableValue2VPValue(); 9011 9012 VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE()); 9013 VPlanTransforms::sinkScalarOperands(*Plan); 9014 VPlanTransforms::mergeReplicateRegions(*Plan); 9015 VPlanTransforms::removeDeadRecipes(*Plan, *OrigLoop); 9016 9017 std::string PlanName; 9018 raw_string_ostream RSO(PlanName); 9019 ElementCount VF = Range.Start; 9020 Plan->addVF(VF); 9021 RSO << "Initial VPlan for VF={" << VF; 9022 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9023 Plan->addVF(VF); 9024 RSO << "," << VF; 9025 } 9026 RSO << "},UF>=1"; 9027 RSO.flush(); 9028 Plan->setName(PlanName); 9029 9030 // Fold Exit block into its predecessor if possible. 9031 // TODO: Fold block earlier once all VPlan transforms properly maintain a 9032 // VPBasicBlock as exit. 9033 VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit()); 9034 9035 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9036 return Plan; 9037 } 9038 9039 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9040 // Outer loop handling: They may require CFG and instruction level 9041 // transformations before even evaluating whether vectorization is profitable. 9042 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9043 // the vectorization pipeline. 9044 assert(!OrigLoop->isInnermost()); 9045 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9046 9047 // Create new empty VPlan 9048 auto Plan = std::make_unique<VPlan>(); 9049 9050 // Build hierarchical CFG 9051 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9052 HCFGBuilder.buildHierarchicalCFG(); 9053 9054 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9055 VF *= 2) 9056 Plan->addVF(VF); 9057 9058 if (EnableVPlanPredication) { 9059 VPlanPredicator VPP(*Plan); 9060 VPP.predicate(); 9061 9062 // Avoid running transformation to recipes until masked code generation in 9063 // VPlan-native path is in place. 9064 return Plan; 9065 } 9066 9067 SmallPtrSet<Instruction *, 1> DeadInstructions; 9068 VPlanTransforms::VPInstructionsToVPRecipes( 9069 OrigLoop, Plan, 9070 [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); }, 9071 DeadInstructions, *PSE.getSE()); 9072 9073 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(), 9074 true, true); 9075 return Plan; 9076 } 9077 9078 // Adjust the recipes for reductions. For in-loop reductions the chain of 9079 // instructions leading from the loop exit instr to the phi need to be converted 9080 // to reductions, with one operand being vector and the other being the scalar 9081 // reduction chain. For other reductions, a select is introduced between the phi 9082 // and live-out recipes when folding the tail. 9083 void LoopVectorizationPlanner::adjustRecipesForReductions( 9084 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9085 ElementCount MinVF) { 9086 for (auto &Reduction : CM.getInLoopReductionChains()) { 9087 PHINode *Phi = Reduction.first; 9088 const RecurrenceDescriptor &RdxDesc = 9089 Legal->getReductionVars().find(Phi)->second; 9090 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9091 9092 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9093 continue; 9094 9095 // ReductionOperations are orders top-down from the phi's use to the 9096 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9097 // which of the two operands will remain scalar and which will be reduced. 9098 // For minmax the chain will be the select instructions. 9099 Instruction *Chain = Phi; 9100 for (Instruction *R : ReductionOperations) { 9101 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9102 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9103 9104 VPValue *ChainOp = Plan->getVPValue(Chain); 9105 unsigned FirstOpId; 9106 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9107 "Only min/max recurrences allowed for inloop reductions"); 9108 // Recognize a call to the llvm.fmuladd intrinsic. 9109 bool IsFMulAdd = (Kind == RecurKind::FMulAdd); 9110 assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) && 9111 "Expected instruction to be a call to the llvm.fmuladd intrinsic"); 9112 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9113 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9114 "Expected to replace a VPWidenSelectSC"); 9115 FirstOpId = 1; 9116 } else { 9117 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) || 9118 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) && 9119 "Expected to replace a VPWidenSC"); 9120 FirstOpId = 0; 9121 } 9122 unsigned VecOpId = 9123 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9124 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9125 9126 auto *CondOp = CM.blockNeedsPredicationForAnyReason(R->getParent()) 9127 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9128 : nullptr; 9129 9130 if (IsFMulAdd) { 9131 // If the instruction is a call to the llvm.fmuladd intrinsic then we 9132 // need to create an fmul recipe to use as the vector operand for the 9133 // fadd reduction. 9134 VPInstruction *FMulRecipe = new VPInstruction( 9135 Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))}); 9136 FMulRecipe->setFastMathFlags(R->getFastMathFlags()); 9137 WidenRecipe->getParent()->insert(FMulRecipe, 9138 WidenRecipe->getIterator()); 9139 VecOp = FMulRecipe; 9140 } 9141 VPReductionRecipe *RedRecipe = 9142 new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9143 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9144 Plan->removeVPValueFor(R); 9145 Plan->addVPValue(R, RedRecipe); 9146 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9147 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9148 WidenRecipe->eraseFromParent(); 9149 9150 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9151 VPRecipeBase *CompareRecipe = 9152 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9153 assert(isa<VPWidenRecipe>(CompareRecipe) && 9154 "Expected to replace a VPWidenSC"); 9155 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9156 "Expected no remaining users"); 9157 CompareRecipe->eraseFromParent(); 9158 } 9159 Chain = R; 9160 } 9161 } 9162 9163 // If tail is folded by masking, introduce selects between the phi 9164 // and the live-out instruction of each reduction, at the beginning of the 9165 // dedicated latch block. 9166 if (CM.foldTailByMasking()) { 9167 Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin()); 9168 for (VPRecipeBase &R : 9169 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) { 9170 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9171 if (!PhiR || PhiR->isInLoop()) 9172 continue; 9173 VPValue *Cond = 9174 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9175 VPValue *Red = PhiR->getBackedgeValue(); 9176 assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB && 9177 "reduction recipe must be defined before latch"); 9178 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9179 } 9180 } 9181 } 9182 9183 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9184 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9185 VPSlotTracker &SlotTracker) const { 9186 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9187 IG->getInsertPos()->printAsOperand(O, false); 9188 O << ", "; 9189 getAddr()->printAsOperand(O, SlotTracker); 9190 VPValue *Mask = getMask(); 9191 if (Mask) { 9192 O << ", "; 9193 Mask->printAsOperand(O, SlotTracker); 9194 } 9195 9196 unsigned OpIdx = 0; 9197 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9198 if (!IG->getMember(i)) 9199 continue; 9200 if (getNumStoreOperands() > 0) { 9201 O << "\n" << Indent << " store "; 9202 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9203 O << " to index " << i; 9204 } else { 9205 O << "\n" << Indent << " "; 9206 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9207 O << " = load from index " << i; 9208 } 9209 ++OpIdx; 9210 } 9211 } 9212 #endif 9213 9214 void VPWidenCallRecipe::execute(VPTransformState &State) { 9215 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9216 *this, State); 9217 } 9218 9219 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9220 auto &I = *cast<SelectInst>(getUnderlyingInstr()); 9221 State.ILV->setDebugLocFromInst(&I); 9222 9223 // The condition can be loop invariant but still defined inside the 9224 // loop. This means that we can't just use the original 'cond' value. 9225 // We have to take the 'vectorized' value and pick the first lane. 9226 // Instcombine will make this a no-op. 9227 auto *InvarCond = 9228 InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr; 9229 9230 for (unsigned Part = 0; Part < State.UF; ++Part) { 9231 Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part); 9232 Value *Op0 = State.get(getOperand(1), Part); 9233 Value *Op1 = State.get(getOperand(2), Part); 9234 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1); 9235 State.set(this, Sel, Part); 9236 State.ILV->addMetadata(Sel, &I); 9237 } 9238 } 9239 9240 void VPWidenRecipe::execute(VPTransformState &State) { 9241 auto &I = *cast<Instruction>(getUnderlyingValue()); 9242 auto &Builder = State.Builder; 9243 switch (I.getOpcode()) { 9244 case Instruction::Call: 9245 case Instruction::Br: 9246 case Instruction::PHI: 9247 case Instruction::GetElementPtr: 9248 case Instruction::Select: 9249 llvm_unreachable("This instruction is handled by a different recipe."); 9250 case Instruction::UDiv: 9251 case Instruction::SDiv: 9252 case Instruction::SRem: 9253 case Instruction::URem: 9254 case Instruction::Add: 9255 case Instruction::FAdd: 9256 case Instruction::Sub: 9257 case Instruction::FSub: 9258 case Instruction::FNeg: 9259 case Instruction::Mul: 9260 case Instruction::FMul: 9261 case Instruction::FDiv: 9262 case Instruction::FRem: 9263 case Instruction::Shl: 9264 case Instruction::LShr: 9265 case Instruction::AShr: 9266 case Instruction::And: 9267 case Instruction::Or: 9268 case Instruction::Xor: { 9269 // Just widen unops and binops. 9270 State.ILV->setDebugLocFromInst(&I); 9271 9272 for (unsigned Part = 0; Part < State.UF; ++Part) { 9273 SmallVector<Value *, 2> Ops; 9274 for (VPValue *VPOp : operands()) 9275 Ops.push_back(State.get(VPOp, Part)); 9276 9277 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 9278 9279 if (auto *VecOp = dyn_cast<Instruction>(V)) { 9280 VecOp->copyIRFlags(&I); 9281 9282 // If the instruction is vectorized and was in a basic block that needed 9283 // predication, we can't propagate poison-generating flags (nuw/nsw, 9284 // exact, etc.). The control flow has been linearized and the 9285 // instruction is no longer guarded by the predicate, which could make 9286 // the flag properties to no longer hold. 9287 if (State.MayGeneratePoisonRecipes.contains(this)) 9288 VecOp->dropPoisonGeneratingFlags(); 9289 } 9290 9291 // Use this vector value for all users of the original instruction. 9292 State.set(this, V, Part); 9293 State.ILV->addMetadata(V, &I); 9294 } 9295 9296 break; 9297 } 9298 case Instruction::ICmp: 9299 case Instruction::FCmp: { 9300 // Widen compares. Generate vector compares. 9301 bool FCmp = (I.getOpcode() == Instruction::FCmp); 9302 auto *Cmp = cast<CmpInst>(&I); 9303 State.ILV->setDebugLocFromInst(Cmp); 9304 for (unsigned Part = 0; Part < State.UF; ++Part) { 9305 Value *A = State.get(getOperand(0), Part); 9306 Value *B = State.get(getOperand(1), Part); 9307 Value *C = nullptr; 9308 if (FCmp) { 9309 // Propagate fast math flags. 9310 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9311 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 9312 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 9313 } else { 9314 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 9315 } 9316 State.set(this, C, Part); 9317 State.ILV->addMetadata(C, &I); 9318 } 9319 9320 break; 9321 } 9322 9323 case Instruction::ZExt: 9324 case Instruction::SExt: 9325 case Instruction::FPToUI: 9326 case Instruction::FPToSI: 9327 case Instruction::FPExt: 9328 case Instruction::PtrToInt: 9329 case Instruction::IntToPtr: 9330 case Instruction::SIToFP: 9331 case Instruction::UIToFP: 9332 case Instruction::Trunc: 9333 case Instruction::FPTrunc: 9334 case Instruction::BitCast: { 9335 auto *CI = cast<CastInst>(&I); 9336 State.ILV->setDebugLocFromInst(CI); 9337 9338 /// Vectorize casts. 9339 Type *DestTy = (State.VF.isScalar()) 9340 ? CI->getType() 9341 : VectorType::get(CI->getType(), State.VF); 9342 9343 for (unsigned Part = 0; Part < State.UF; ++Part) { 9344 Value *A = State.get(getOperand(0), Part); 9345 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 9346 State.set(this, Cast, Part); 9347 State.ILV->addMetadata(Cast, &I); 9348 } 9349 break; 9350 } 9351 default: 9352 // This instruction is not vectorized by simple widening. 9353 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 9354 llvm_unreachable("Unhandled instruction!"); 9355 } // end of switch. 9356 } 9357 9358 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9359 auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr()); 9360 // Construct a vector GEP by widening the operands of the scalar GEP as 9361 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 9362 // results in a vector of pointers when at least one operand of the GEP 9363 // is vector-typed. Thus, to keep the representation compact, we only use 9364 // vector-typed operands for loop-varying values. 9365 9366 if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 9367 // If we are vectorizing, but the GEP has only loop-invariant operands, 9368 // the GEP we build (by only using vector-typed operands for 9369 // loop-varying values) would be a scalar pointer. Thus, to ensure we 9370 // produce a vector of pointers, we need to either arbitrarily pick an 9371 // operand to broadcast, or broadcast a clone of the original GEP. 9372 // Here, we broadcast a clone of the original. 9373 // 9374 // TODO: If at some point we decide to scalarize instructions having 9375 // loop-invariant operands, this special case will no longer be 9376 // required. We would add the scalarization decision to 9377 // collectLoopScalars() and teach getVectorValue() to broadcast 9378 // the lane-zero scalar value. 9379 auto *Clone = State.Builder.Insert(GEP->clone()); 9380 for (unsigned Part = 0; Part < State.UF; ++Part) { 9381 Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone); 9382 State.set(this, EntryPart, Part); 9383 State.ILV->addMetadata(EntryPart, GEP); 9384 } 9385 } else { 9386 // If the GEP has at least one loop-varying operand, we are sure to 9387 // produce a vector of pointers. But if we are only unrolling, we want 9388 // to produce a scalar GEP for each unroll part. Thus, the GEP we 9389 // produce with the code below will be scalar (if VF == 1) or vector 9390 // (otherwise). Note that for the unroll-only case, we still maintain 9391 // values in the vector mapping with initVector, as we do for other 9392 // instructions. 9393 for (unsigned Part = 0; Part < State.UF; ++Part) { 9394 // The pointer operand of the new GEP. If it's loop-invariant, we 9395 // won't broadcast it. 9396 auto *Ptr = IsPtrLoopInvariant 9397 ? State.get(getOperand(0), VPIteration(0, 0)) 9398 : State.get(getOperand(0), Part); 9399 9400 // Collect all the indices for the new GEP. If any index is 9401 // loop-invariant, we won't broadcast it. 9402 SmallVector<Value *, 4> Indices; 9403 for (unsigned I = 1, E = getNumOperands(); I < E; I++) { 9404 VPValue *Operand = getOperand(I); 9405 if (IsIndexLoopInvariant[I - 1]) 9406 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 9407 else 9408 Indices.push_back(State.get(Operand, Part)); 9409 } 9410 9411 // If the GEP instruction is vectorized and was in a basic block that 9412 // needed predication, we can't propagate the poison-generating 'inbounds' 9413 // flag. The control flow has been linearized and the GEP is no longer 9414 // guarded by the predicate, which could make the 'inbounds' properties to 9415 // no longer hold. 9416 bool IsInBounds = 9417 GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0; 9418 9419 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 9420 // but it should be a vector, otherwise. 9421 auto *NewGEP = IsInBounds 9422 ? State.Builder.CreateInBoundsGEP( 9423 GEP->getSourceElementType(), Ptr, Indices) 9424 : State.Builder.CreateGEP(GEP->getSourceElementType(), 9425 Ptr, Indices); 9426 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) && 9427 "NewGEP is not a pointer vector"); 9428 State.set(this, NewGEP, Part); 9429 State.ILV->addMetadata(NewGEP, GEP); 9430 } 9431 } 9432 } 9433 9434 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9435 assert(!State.Instance && "Int or FP induction being replicated."); 9436 9437 Value *Start = getStartValue()->getLiveInIRValue(); 9438 const InductionDescriptor &ID = getInductionDescriptor(); 9439 TruncInst *Trunc = getTruncInst(); 9440 IRBuilderBase &Builder = State.Builder; 9441 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 9442 assert(State.VF.isVector() && "must have vector VF"); 9443 9444 // The value from the original loop to which we are mapping the new induction 9445 // variable. 9446 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 9447 9448 auto &DL = EntryVal->getModule()->getDataLayout(); 9449 9450 // Generate code for the induction step. Note that induction steps are 9451 // required to be loop-invariant 9452 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 9453 if (SE.isSCEVable(IV->getType())) { 9454 SCEVExpander Exp(SE, DL, "induction"); 9455 return Exp.expandCodeFor(Step, Step->getType(), 9456 State.CFG.VectorPreHeader->getTerminator()); 9457 } 9458 return cast<SCEVUnknown>(Step)->getValue(); 9459 }; 9460 9461 // Fast-math-flags propagate from the original induction instruction. 9462 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9463 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 9464 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 9465 9466 // Now do the actual transformations, and start with creating the step value. 9467 Value *Step = CreateStepValue(ID.getStep()); 9468 9469 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 9470 "Expected either an induction phi-node or a truncate of it!"); 9471 9472 // Construct the initial value of the vector IV in the vector loop preheader 9473 auto CurrIP = Builder.saveIP(); 9474 Builder.SetInsertPoint(State.CFG.VectorPreHeader->getTerminator()); 9475 if (isa<TruncInst>(EntryVal)) { 9476 assert(Start->getType()->isIntegerTy() && 9477 "Truncation requires an integer type"); 9478 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 9479 Step = Builder.CreateTrunc(Step, TruncType); 9480 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 9481 } 9482 9483 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 9484 Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start); 9485 Value *SteppedStart = getStepVector( 9486 SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder); 9487 9488 // We create vector phi nodes for both integer and floating-point induction 9489 // variables. Here, we determine the kind of arithmetic we will perform. 9490 Instruction::BinaryOps AddOp; 9491 Instruction::BinaryOps MulOp; 9492 if (Step->getType()->isIntegerTy()) { 9493 AddOp = Instruction::Add; 9494 MulOp = Instruction::Mul; 9495 } else { 9496 AddOp = ID.getInductionOpcode(); 9497 MulOp = Instruction::FMul; 9498 } 9499 9500 // Multiply the vectorization factor by the step using integer or 9501 // floating-point arithmetic as appropriate. 9502 Type *StepType = Step->getType(); 9503 Value *RuntimeVF; 9504 if (Step->getType()->isFloatingPointTy()) 9505 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF); 9506 else 9507 RuntimeVF = getRuntimeVF(Builder, StepType, State.VF); 9508 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 9509 9510 // Create a vector splat to use in the induction update. 9511 // 9512 // FIXME: If the step is non-constant, we create the vector splat with 9513 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 9514 // handle a constant vector splat. 9515 Value *SplatVF = isa<Constant>(Mul) 9516 ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul)) 9517 : Builder.CreateVectorSplat(State.VF, Mul); 9518 Builder.restoreIP(CurrIP); 9519 9520 // We may need to add the step a number of times, depending on the unroll 9521 // factor. The last of those goes into the PHI. 9522 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 9523 &*State.CFG.PrevBB->getFirstInsertionPt()); 9524 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 9525 Instruction *LastInduction = VecInd; 9526 for (unsigned Part = 0; Part < State.UF; ++Part) { 9527 State.set(this, LastInduction, Part); 9528 9529 if (isa<TruncInst>(EntryVal)) 9530 State.ILV->addMetadata(LastInduction, EntryVal); 9531 9532 LastInduction = cast<Instruction>( 9533 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 9534 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 9535 } 9536 9537 // Move the last step to the end of the latch block. This ensures consistent 9538 // placement of all induction updates. 9539 auto *LoopVectorLatch = 9540 State.LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch(); 9541 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 9542 LastInduction->moveBefore(Br); 9543 LastInduction->setName("vec.ind.next"); 9544 9545 VecInd->addIncoming(SteppedStart, State.CFG.VectorPreHeader); 9546 VecInd->addIncoming(LastInduction, LoopVectorLatch); 9547 } 9548 9549 void VPWidenPointerInductionRecipe::execute(VPTransformState &State) { 9550 assert(IndDesc.getKind() == InductionDescriptor::IK_PtrInduction && 9551 "Not a pointer induction according to InductionDescriptor!"); 9552 assert(cast<PHINode>(getUnderlyingInstr())->getType()->isPointerTy() && 9553 "Unexpected type."); 9554 9555 auto *IVR = getParent()->getPlan()->getCanonicalIV(); 9556 PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0)); 9557 9558 if (all_of(users(), [this](const VPUser *U) { 9559 return cast<VPRecipeBase>(U)->usesScalars(this); 9560 })) { 9561 // This is the normalized GEP that starts counting at zero. 9562 Value *PtrInd = State.Builder.CreateSExtOrTrunc( 9563 CanonicalIV, IndDesc.getStep()->getType()); 9564 // Determine the number of scalars we need to generate for each unroll 9565 // iteration. If the instruction is uniform, we only need to generate the 9566 // first lane. Otherwise, we generate all VF values. 9567 bool IsUniform = vputils::onlyFirstLaneUsed(this); 9568 assert((IsUniform || !State.VF.isScalable()) && 9569 "Cannot scalarize a scalable VF"); 9570 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 9571 9572 for (unsigned Part = 0; Part < State.UF; ++Part) { 9573 Value *PartStart = 9574 createStepForVF(State.Builder, PtrInd->getType(), State.VF, Part); 9575 9576 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 9577 Value *Idx = State.Builder.CreateAdd( 9578 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 9579 Value *GlobalIdx = State.Builder.CreateAdd(PtrInd, Idx); 9580 9581 Value *Step = CreateStepValue(IndDesc.getStep(), SE, 9582 State.CFG.PrevBB->getTerminator()); 9583 Value *SclrGep = emitTransformedIndex( 9584 State.Builder, GlobalIdx, IndDesc.getStartValue(), Step, IndDesc); 9585 SclrGep->setName("next.gep"); 9586 State.set(this, SclrGep, VPIteration(Part, Lane)); 9587 } 9588 } 9589 return; 9590 } 9591 9592 assert(isa<SCEVConstant>(IndDesc.getStep()) && 9593 "Induction step not a SCEV constant!"); 9594 Type *PhiType = IndDesc.getStep()->getType(); 9595 9596 // Build a pointer phi 9597 Value *ScalarStartValue = getStartValue()->getLiveInIRValue(); 9598 Type *ScStValueType = ScalarStartValue->getType(); 9599 PHINode *NewPointerPhi = 9600 PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV); 9601 NewPointerPhi->addIncoming(ScalarStartValue, State.CFG.VectorPreHeader); 9602 9603 // A pointer induction, performed by using a gep 9604 BasicBlock *LoopLatch = 9605 State.LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch(); 9606 9607 const DataLayout &DL = LoopLatch->getModule()->getDataLayout(); 9608 Instruction *InductionLoc = LoopLatch->getTerminator(); 9609 const SCEV *ScalarStep = IndDesc.getStep(); 9610 SCEVExpander Exp(SE, DL, "induction"); 9611 Value *ScalarStepValue = Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 9612 Value *RuntimeVF = getRuntimeVF(State.Builder, PhiType, State.VF); 9613 Value *NumUnrolledElems = 9614 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 9615 Value *InductionGEP = GetElementPtrInst::Create( 9616 IndDesc.getElementType(), NewPointerPhi, 9617 State.Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 9618 InductionLoc); 9619 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 9620 9621 // Create UF many actual address geps that use the pointer 9622 // phi as base and a vectorized version of the step value 9623 // (<step*0, ..., step*N>) as offset. 9624 for (unsigned Part = 0; Part < State.UF; ++Part) { 9625 Type *VecPhiType = VectorType::get(PhiType, State.VF); 9626 Value *StartOffsetScalar = 9627 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 9628 Value *StartOffset = 9629 State.Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 9630 // Create a vector of consecutive numbers from zero to VF. 9631 StartOffset = State.Builder.CreateAdd( 9632 StartOffset, State.Builder.CreateStepVector(VecPhiType)); 9633 9634 Value *GEP = State.Builder.CreateGEP( 9635 IndDesc.getElementType(), NewPointerPhi, 9636 State.Builder.CreateMul( 9637 StartOffset, 9638 State.Builder.CreateVectorSplat(State.VF, ScalarStepValue), 9639 "vector.gep")); 9640 State.set(this, GEP, Part); 9641 } 9642 } 9643 9644 void VPScalarIVStepsRecipe::execute(VPTransformState &State) { 9645 assert(!State.Instance && "VPScalarIVStepsRecipe being replicated."); 9646 9647 // Fast-math-flags propagate from the original induction instruction. 9648 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder); 9649 if (IndDesc.getInductionBinOp() && 9650 isa<FPMathOperator>(IndDesc.getInductionBinOp())) 9651 State.Builder.setFastMathFlags( 9652 IndDesc.getInductionBinOp()->getFastMathFlags()); 9653 9654 Value *Step = State.get(getStepValue(), VPIteration(0, 0)); 9655 auto CreateScalarIV = [&](Value *&Step) -> Value * { 9656 Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0)); 9657 auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0); 9658 if (!isCanonical() || CanonicalIV->getType() != Ty) { 9659 ScalarIV = 9660 Ty->isIntegerTy() 9661 ? State.Builder.CreateSExtOrTrunc(ScalarIV, Ty) 9662 : State.Builder.CreateCast(Instruction::SIToFP, ScalarIV, Ty); 9663 ScalarIV = emitTransformedIndex(State.Builder, ScalarIV, 9664 getStartValue()->getLiveInIRValue(), Step, 9665 IndDesc); 9666 ScalarIV->setName("offset.idx"); 9667 } 9668 if (TruncToTy) { 9669 assert(Step->getType()->isIntegerTy() && 9670 "Truncation requires an integer step"); 9671 ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncToTy); 9672 Step = State.Builder.CreateTrunc(Step, TruncToTy); 9673 } 9674 return ScalarIV; 9675 }; 9676 9677 Value *ScalarIV = CreateScalarIV(Step); 9678 if (State.VF.isVector()) { 9679 buildScalarSteps(ScalarIV, Step, IndDesc, this, State); 9680 return; 9681 } 9682 9683 for (unsigned Part = 0; Part < State.UF; ++Part) { 9684 assert(!State.VF.isScalable() && "scalable vectors not yet supported."); 9685 Value *EntryPart; 9686 if (Step->getType()->isFloatingPointTy()) { 9687 Value *StartIdx = 9688 getRuntimeVFAsFloat(State.Builder, Step->getType(), State.VF * Part); 9689 // Floating-point operations inherit FMF via the builder's flags. 9690 Value *MulOp = State.Builder.CreateFMul(StartIdx, Step); 9691 EntryPart = State.Builder.CreateBinOp(IndDesc.getInductionOpcode(), 9692 ScalarIV, MulOp); 9693 } else { 9694 Value *StartIdx = 9695 getRuntimeVF(State.Builder, Step->getType(), State.VF * Part); 9696 EntryPart = State.Builder.CreateAdd( 9697 ScalarIV, State.Builder.CreateMul(StartIdx, Step), "induction"); 9698 } 9699 State.set(this, EntryPart, Part); 9700 } 9701 } 9702 9703 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9704 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9705 State); 9706 } 9707 9708 void VPBlendRecipe::execute(VPTransformState &State) { 9709 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9710 // We know that all PHIs in non-header blocks are converted into 9711 // selects, so we don't have to worry about the insertion order and we 9712 // can just use the builder. 9713 // At this point we generate the predication tree. There may be 9714 // duplications since this is a simple recursive scan, but future 9715 // optimizations will clean it up. 9716 9717 unsigned NumIncoming = getNumIncomingValues(); 9718 9719 // Generate a sequence of selects of the form: 9720 // SELECT(Mask3, In3, 9721 // SELECT(Mask2, In2, 9722 // SELECT(Mask1, In1, 9723 // In0))) 9724 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9725 // are essentially undef are taken from In0. 9726 InnerLoopVectorizer::VectorParts Entry(State.UF); 9727 for (unsigned In = 0; In < NumIncoming; ++In) { 9728 for (unsigned Part = 0; Part < State.UF; ++Part) { 9729 // We might have single edge PHIs (blocks) - use an identity 9730 // 'select' for the first PHI operand. 9731 Value *In0 = State.get(getIncomingValue(In), Part); 9732 if (In == 0) 9733 Entry[Part] = In0; // Initialize with the first incoming value. 9734 else { 9735 // Select between the current value and the previous incoming edge 9736 // based on the incoming mask. 9737 Value *Cond = State.get(getMask(In), Part); 9738 Entry[Part] = 9739 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9740 } 9741 } 9742 } 9743 for (unsigned Part = 0; Part < State.UF; ++Part) 9744 State.set(this, Entry[Part], Part); 9745 } 9746 9747 void VPInterleaveRecipe::execute(VPTransformState &State) { 9748 assert(!State.Instance && "Interleave group being replicated."); 9749 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9750 getStoredValues(), getMask()); 9751 } 9752 9753 void VPReductionRecipe::execute(VPTransformState &State) { 9754 assert(!State.Instance && "Reduction being replicated."); 9755 Value *PrevInChain = State.get(getChainOp(), 0); 9756 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9757 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9758 // Propagate the fast-math flags carried by the underlying instruction. 9759 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9760 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9761 for (unsigned Part = 0; Part < State.UF; ++Part) { 9762 Value *NewVecOp = State.get(getVecOp(), Part); 9763 if (VPValue *Cond = getCondOp()) { 9764 Value *NewCond = State.get(Cond, Part); 9765 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9766 Value *Iden = RdxDesc->getRecurrenceIdentity( 9767 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9768 Value *IdenVec = 9769 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9770 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9771 NewVecOp = Select; 9772 } 9773 Value *NewRed; 9774 Value *NextInChain; 9775 if (IsOrdered) { 9776 if (State.VF.isVector()) 9777 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9778 PrevInChain); 9779 else 9780 NewRed = State.Builder.CreateBinOp( 9781 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9782 NewVecOp); 9783 PrevInChain = NewRed; 9784 } else { 9785 PrevInChain = State.get(getChainOp(), Part); 9786 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9787 } 9788 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9789 NextInChain = 9790 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9791 NewRed, PrevInChain); 9792 } else if (IsOrdered) 9793 NextInChain = NewRed; 9794 else 9795 NextInChain = State.Builder.CreateBinOp( 9796 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9797 PrevInChain); 9798 State.set(this, NextInChain, Part); 9799 } 9800 } 9801 9802 void VPReplicateRecipe::execute(VPTransformState &State) { 9803 if (State.Instance) { // Generate a single instance. 9804 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9805 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance, 9806 IsPredicated, State); 9807 // Insert scalar instance packing it into a vector. 9808 if (AlsoPack && State.VF.isVector()) { 9809 // If we're constructing lane 0, initialize to start from poison. 9810 if (State.Instance->Lane.isFirstLane()) { 9811 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9812 Value *Poison = PoisonValue::get( 9813 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9814 State.set(this, Poison, State.Instance->Part); 9815 } 9816 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9817 } 9818 return; 9819 } 9820 9821 // Generate scalar instances for all VF lanes of all UF parts, unless the 9822 // instruction is uniform inwhich case generate only the first lane for each 9823 // of the UF parts. 9824 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9825 assert((!State.VF.isScalable() || IsUniform) && 9826 "Can't scalarize a scalable vector"); 9827 for (unsigned Part = 0; Part < State.UF; ++Part) 9828 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9829 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 9830 VPIteration(Part, Lane), IsPredicated, 9831 State); 9832 } 9833 9834 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9835 assert(State.Instance && "Branch on Mask works only on single instance."); 9836 9837 unsigned Part = State.Instance->Part; 9838 unsigned Lane = State.Instance->Lane.getKnownLane(); 9839 9840 Value *ConditionBit = nullptr; 9841 VPValue *BlockInMask = getMask(); 9842 if (BlockInMask) { 9843 ConditionBit = State.get(BlockInMask, Part); 9844 if (ConditionBit->getType()->isVectorTy()) 9845 ConditionBit = State.Builder.CreateExtractElement( 9846 ConditionBit, State.Builder.getInt32(Lane)); 9847 } else // Block in mask is all-one. 9848 ConditionBit = State.Builder.getTrue(); 9849 9850 // Replace the temporary unreachable terminator with a new conditional branch, 9851 // whose two destinations will be set later when they are created. 9852 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9853 assert(isa<UnreachableInst>(CurrentTerminator) && 9854 "Expected to replace unreachable terminator with conditional branch."); 9855 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9856 CondBr->setSuccessor(0, nullptr); 9857 ReplaceInstWithInst(CurrentTerminator, CondBr); 9858 } 9859 9860 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9861 assert(State.Instance && "Predicated instruction PHI works per instance."); 9862 Instruction *ScalarPredInst = 9863 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9864 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9865 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9866 assert(PredicatingBB && "Predicated block has no single predecessor."); 9867 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9868 "operand must be VPReplicateRecipe"); 9869 9870 // By current pack/unpack logic we need to generate only a single phi node: if 9871 // a vector value for the predicated instruction exists at this point it means 9872 // the instruction has vector users only, and a phi for the vector value is 9873 // needed. In this case the recipe of the predicated instruction is marked to 9874 // also do that packing, thereby "hoisting" the insert-element sequence. 9875 // Otherwise, a phi node for the scalar value is needed. 9876 unsigned Part = State.Instance->Part; 9877 if (State.hasVectorValue(getOperand(0), Part)) { 9878 Value *VectorValue = State.get(getOperand(0), Part); 9879 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9880 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9881 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9882 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9883 if (State.hasVectorValue(this, Part)) 9884 State.reset(this, VPhi, Part); 9885 else 9886 State.set(this, VPhi, Part); 9887 // NOTE: Currently we need to update the value of the operand, so the next 9888 // predicated iteration inserts its generated value in the correct vector. 9889 State.reset(getOperand(0), VPhi, Part); 9890 } else { 9891 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9892 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9893 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9894 PredicatingBB); 9895 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9896 if (State.hasScalarValue(this, *State.Instance)) 9897 State.reset(this, Phi, *State.Instance); 9898 else 9899 State.set(this, Phi, *State.Instance); 9900 // NOTE: Currently we need to update the value of the operand, so the next 9901 // predicated iteration inserts its generated value in the correct vector. 9902 State.reset(getOperand(0), Phi, *State.Instance); 9903 } 9904 } 9905 9906 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9907 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9908 9909 // Attempt to issue a wide load. 9910 LoadInst *LI = dyn_cast<LoadInst>(&Ingredient); 9911 StoreInst *SI = dyn_cast<StoreInst>(&Ingredient); 9912 9913 assert((LI || SI) && "Invalid Load/Store instruction"); 9914 assert((!SI || StoredValue) && "No stored value provided for widened store"); 9915 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 9916 9917 Type *ScalarDataTy = getLoadStoreType(&Ingredient); 9918 9919 auto *DataTy = VectorType::get(ScalarDataTy, State.VF); 9920 const Align Alignment = getLoadStoreAlignment(&Ingredient); 9921 bool CreateGatherScatter = !Consecutive; 9922 9923 auto &Builder = State.Builder; 9924 InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF); 9925 bool isMaskRequired = getMask(); 9926 if (isMaskRequired) 9927 for (unsigned Part = 0; Part < State.UF; ++Part) 9928 BlockInMaskParts[Part] = State.get(getMask(), Part); 9929 9930 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 9931 // Calculate the pointer for the specific unroll-part. 9932 GetElementPtrInst *PartPtr = nullptr; 9933 9934 bool InBounds = false; 9935 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 9936 InBounds = gep->isInBounds(); 9937 if (Reverse) { 9938 // If the address is consecutive but reversed, then the 9939 // wide store needs to start at the last vector element. 9940 // RunTimeVF = VScale * VF.getKnownMinValue() 9941 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 9942 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF); 9943 // NumElt = -Part * RunTimeVF 9944 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 9945 // LastLane = 1 - RunTimeVF 9946 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 9947 PartPtr = 9948 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 9949 PartPtr->setIsInBounds(InBounds); 9950 PartPtr = cast<GetElementPtrInst>( 9951 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 9952 PartPtr->setIsInBounds(InBounds); 9953 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 9954 BlockInMaskParts[Part] = 9955 Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse"); 9956 } else { 9957 Value *Increment = 9958 createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part); 9959 PartPtr = cast<GetElementPtrInst>( 9960 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 9961 PartPtr->setIsInBounds(InBounds); 9962 } 9963 9964 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 9965 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 9966 }; 9967 9968 // Handle Stores: 9969 if (SI) { 9970 State.ILV->setDebugLocFromInst(SI); 9971 9972 for (unsigned Part = 0; Part < State.UF; ++Part) { 9973 Instruction *NewSI = nullptr; 9974 Value *StoredVal = State.get(StoredValue, Part); 9975 if (CreateGatherScatter) { 9976 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 9977 Value *VectorGep = State.get(getAddr(), Part); 9978 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 9979 MaskPart); 9980 } else { 9981 if (Reverse) { 9982 // If we store to reverse consecutive memory locations, then we need 9983 // to reverse the order of elements in the stored value. 9984 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse"); 9985 // We don't want to update the value in the map as it might be used in 9986 // another expression. So don't call resetVectorValue(StoredVal). 9987 } 9988 auto *VecPtr = 9989 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 9990 if (isMaskRequired) 9991 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 9992 BlockInMaskParts[Part]); 9993 else 9994 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 9995 } 9996 State.ILV->addMetadata(NewSI, SI); 9997 } 9998 return; 9999 } 10000 10001 // Handle loads. 10002 assert(LI && "Must have a load instruction"); 10003 State.ILV->setDebugLocFromInst(LI); 10004 for (unsigned Part = 0; Part < State.UF; ++Part) { 10005 Value *NewLI; 10006 if (CreateGatherScatter) { 10007 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 10008 Value *VectorGep = State.get(getAddr(), Part); 10009 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 10010 nullptr, "wide.masked.gather"); 10011 State.ILV->addMetadata(NewLI, LI); 10012 } else { 10013 auto *VecPtr = 10014 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10015 if (isMaskRequired) 10016 NewLI = Builder.CreateMaskedLoad( 10017 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 10018 PoisonValue::get(DataTy), "wide.masked.load"); 10019 else 10020 NewLI = 10021 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 10022 10023 // Add metadata to the load, but setVectorValue to the reverse shuffle. 10024 State.ILV->addMetadata(NewLI, LI); 10025 if (Reverse) 10026 NewLI = Builder.CreateVectorReverse(NewLI, "reverse"); 10027 } 10028 10029 State.set(this, NewLI, Part); 10030 } 10031 } 10032 10033 // Determine how to lower the scalar epilogue, which depends on 1) optimising 10034 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 10035 // predication, and 4) a TTI hook that analyses whether the loop is suitable 10036 // for predication. 10037 static ScalarEpilogueLowering getScalarEpilogueLowering( 10038 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 10039 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 10040 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 10041 LoopVectorizationLegality &LVL) { 10042 // 1) OptSize takes precedence over all other options, i.e. if this is set, 10043 // don't look at hints or options, and don't request a scalar epilogue. 10044 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 10045 // LoopAccessInfo (due to code dependency and not being able to reliably get 10046 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 10047 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 10048 // versioning when the vectorization is forced, unlike hasOptSize. So revert 10049 // back to the old way and vectorize with versioning when forced. See D81345.) 10050 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 10051 PGSOQueryType::IRPass) && 10052 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 10053 return CM_ScalarEpilogueNotAllowedOptSize; 10054 10055 // 2) If set, obey the directives 10056 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 10057 switch (PreferPredicateOverEpilogue) { 10058 case PreferPredicateTy::ScalarEpilogue: 10059 return CM_ScalarEpilogueAllowed; 10060 case PreferPredicateTy::PredicateElseScalarEpilogue: 10061 return CM_ScalarEpilogueNotNeededUsePredicate; 10062 case PreferPredicateTy::PredicateOrDontVectorize: 10063 return CM_ScalarEpilogueNotAllowedUsePredicate; 10064 }; 10065 } 10066 10067 // 3) If set, obey the hints 10068 switch (Hints.getPredicate()) { 10069 case LoopVectorizeHints::FK_Enabled: 10070 return CM_ScalarEpilogueNotNeededUsePredicate; 10071 case LoopVectorizeHints::FK_Disabled: 10072 return CM_ScalarEpilogueAllowed; 10073 }; 10074 10075 // 4) if the TTI hook indicates this is profitable, request predication. 10076 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 10077 LVL.getLAI())) 10078 return CM_ScalarEpilogueNotNeededUsePredicate; 10079 10080 return CM_ScalarEpilogueAllowed; 10081 } 10082 10083 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 10084 // If Values have been set for this Def return the one relevant for \p Part. 10085 if (hasVectorValue(Def, Part)) 10086 return Data.PerPartOutput[Def][Part]; 10087 10088 if (!hasScalarValue(Def, {Part, 0})) { 10089 Value *IRV = Def->getLiveInIRValue(); 10090 Value *B = ILV->getBroadcastInstrs(IRV); 10091 set(Def, B, Part); 10092 return B; 10093 } 10094 10095 Value *ScalarValue = get(Def, {Part, 0}); 10096 // If we aren't vectorizing, we can just copy the scalar map values over 10097 // to the vector map. 10098 if (VF.isScalar()) { 10099 set(Def, ScalarValue, Part); 10100 return ScalarValue; 10101 } 10102 10103 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 10104 bool IsUniform = RepR && RepR->isUniform(); 10105 10106 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 10107 // Check if there is a scalar value for the selected lane. 10108 if (!hasScalarValue(Def, {Part, LastLane})) { 10109 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 10110 assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) || 10111 isa<VPScalarIVStepsRecipe>(Def->getDef())) && 10112 "unexpected recipe found to be invariant"); 10113 IsUniform = true; 10114 LastLane = 0; 10115 } 10116 10117 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 10118 // Set the insert point after the last scalarized instruction or after the 10119 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 10120 // will directly follow the scalar definitions. 10121 auto OldIP = Builder.saveIP(); 10122 auto NewIP = 10123 isa<PHINode>(LastInst) 10124 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 10125 : std::next(BasicBlock::iterator(LastInst)); 10126 Builder.SetInsertPoint(&*NewIP); 10127 10128 // However, if we are vectorizing, we need to construct the vector values. 10129 // If the value is known to be uniform after vectorization, we can just 10130 // broadcast the scalar value corresponding to lane zero for each unroll 10131 // iteration. Otherwise, we construct the vector values using 10132 // insertelement instructions. Since the resulting vectors are stored in 10133 // State, we will only generate the insertelements once. 10134 Value *VectorValue = nullptr; 10135 if (IsUniform) { 10136 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 10137 set(Def, VectorValue, Part); 10138 } else { 10139 // Initialize packing with insertelements to start from undef. 10140 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 10141 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10142 set(Def, Undef, Part); 10143 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10144 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10145 VectorValue = get(Def, Part); 10146 } 10147 Builder.restoreIP(OldIP); 10148 return VectorValue; 10149 } 10150 10151 // Process the loop in the VPlan-native vectorization path. This path builds 10152 // VPlan upfront in the vectorization pipeline, which allows to apply 10153 // VPlan-to-VPlan transformations from the very beginning without modifying the 10154 // input LLVM IR. 10155 static bool processLoopInVPlanNativePath( 10156 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10157 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10158 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10159 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10160 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10161 LoopVectorizationRequirements &Requirements) { 10162 10163 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10164 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10165 return false; 10166 } 10167 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10168 Function *F = L->getHeader()->getParent(); 10169 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10170 10171 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10172 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10173 10174 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10175 &Hints, IAI); 10176 // Use the planner for outer loop vectorization. 10177 // TODO: CM is not used at this point inside the planner. Turn CM into an 10178 // optional argument if we don't need it in the future. 10179 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10180 Requirements, ORE); 10181 10182 // Get user vectorization factor. 10183 ElementCount UserVF = Hints.getWidth(); 10184 10185 CM.collectElementTypesForWidening(); 10186 10187 // Plan how to best vectorize, return the best VF and its cost. 10188 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10189 10190 // If we are stress testing VPlan builds, do not attempt to generate vector 10191 // code. Masked vector code generation support will follow soon. 10192 // Also, do not attempt to vectorize if no vector code will be produced. 10193 if (VPlanBuildStressTest || EnableVPlanPredication || 10194 VectorizationFactor::Disabled() == VF) 10195 return false; 10196 10197 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10198 10199 { 10200 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10201 F->getParent()->getDataLayout()); 10202 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10203 &CM, BFI, PSI, Checks); 10204 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10205 << L->getHeader()->getParent()->getName() << "\"\n"); 10206 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT); 10207 } 10208 10209 // Mark the loop as already vectorized to avoid vectorizing again. 10210 Hints.setAlreadyVectorized(); 10211 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10212 return true; 10213 } 10214 10215 // Emit a remark if there are stores to floats that required a floating point 10216 // extension. If the vectorized loop was generated with floating point there 10217 // will be a performance penalty from the conversion overhead and the change in 10218 // the vector width. 10219 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10220 SmallVector<Instruction *, 4> Worklist; 10221 for (BasicBlock *BB : L->getBlocks()) { 10222 for (Instruction &Inst : *BB) { 10223 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10224 if (S->getValueOperand()->getType()->isFloatTy()) 10225 Worklist.push_back(S); 10226 } 10227 } 10228 } 10229 10230 // Traverse the floating point stores upwards searching, for floating point 10231 // conversions. 10232 SmallPtrSet<const Instruction *, 4> Visited; 10233 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10234 while (!Worklist.empty()) { 10235 auto *I = Worklist.pop_back_val(); 10236 if (!L->contains(I)) 10237 continue; 10238 if (!Visited.insert(I).second) 10239 continue; 10240 10241 // Emit a remark if the floating point store required a floating 10242 // point conversion. 10243 // TODO: More work could be done to identify the root cause such as a 10244 // constant or a function return type and point the user to it. 10245 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10246 ORE->emit([&]() { 10247 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10248 I->getDebugLoc(), L->getHeader()) 10249 << "floating point conversion changes vector width. " 10250 << "Mixed floating point precision requires an up/down " 10251 << "cast that will negatively impact performance."; 10252 }); 10253 10254 for (Use &Op : I->operands()) 10255 if (auto *OpI = dyn_cast<Instruction>(Op)) 10256 Worklist.push_back(OpI); 10257 } 10258 } 10259 10260 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10261 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10262 !EnableLoopInterleaving), 10263 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10264 !EnableLoopVectorization) {} 10265 10266 bool LoopVectorizePass::processLoop(Loop *L) { 10267 assert((EnableVPlanNativePath || L->isInnermost()) && 10268 "VPlan-native path is not enabled. Only process inner loops."); 10269 10270 #ifndef NDEBUG 10271 const std::string DebugLocStr = getDebugLocString(L); 10272 #endif /* NDEBUG */ 10273 10274 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '" 10275 << L->getHeader()->getParent()->getName() << "' from " 10276 << DebugLocStr << "\n"); 10277 10278 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI); 10279 10280 LLVM_DEBUG( 10281 dbgs() << "LV: Loop hints:" 10282 << " force=" 10283 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10284 ? "disabled" 10285 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10286 ? "enabled" 10287 : "?")) 10288 << " width=" << Hints.getWidth() 10289 << " interleave=" << Hints.getInterleave() << "\n"); 10290 10291 // Function containing loop 10292 Function *F = L->getHeader()->getParent(); 10293 10294 // Looking at the diagnostic output is the only way to determine if a loop 10295 // was vectorized (other than looking at the IR or machine code), so it 10296 // is important to generate an optimization remark for each loop. Most of 10297 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10298 // generated as OptimizationRemark and OptimizationRemarkMissed are 10299 // less verbose reporting vectorized loops and unvectorized loops that may 10300 // benefit from vectorization, respectively. 10301 10302 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10303 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10304 return false; 10305 } 10306 10307 PredicatedScalarEvolution PSE(*SE, *L); 10308 10309 // Check if it is legal to vectorize the loop. 10310 LoopVectorizationRequirements Requirements; 10311 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10312 &Requirements, &Hints, DB, AC, BFI, PSI); 10313 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10314 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10315 Hints.emitRemarkWithHints(); 10316 return false; 10317 } 10318 10319 // Check the function attributes and profiles to find out if this function 10320 // should be optimized for size. 10321 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10322 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10323 10324 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10325 // here. They may require CFG and instruction level transformations before 10326 // even evaluating whether vectorization is profitable. Since we cannot modify 10327 // the incoming IR, we need to build VPlan upfront in the vectorization 10328 // pipeline. 10329 if (!L->isInnermost()) 10330 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10331 ORE, BFI, PSI, Hints, Requirements); 10332 10333 assert(L->isInnermost() && "Inner loop expected."); 10334 10335 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10336 // count by optimizing for size, to minimize overheads. 10337 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10338 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10339 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10340 << "This loop is worth vectorizing only if no scalar " 10341 << "iteration overheads are incurred."); 10342 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10343 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10344 else { 10345 LLVM_DEBUG(dbgs() << "\n"); 10346 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10347 } 10348 } 10349 10350 // Check the function attributes to see if implicit floats are allowed. 10351 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10352 // an integer loop and the vector instructions selected are purely integer 10353 // vector instructions? 10354 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10355 reportVectorizationFailure( 10356 "Can't vectorize when the NoImplicitFloat attribute is used", 10357 "loop not vectorized due to NoImplicitFloat attribute", 10358 "NoImplicitFloat", ORE, L); 10359 Hints.emitRemarkWithHints(); 10360 return false; 10361 } 10362 10363 // Check if the target supports potentially unsafe FP vectorization. 10364 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10365 // for the target we're vectorizing for, to make sure none of the 10366 // additional fp-math flags can help. 10367 if (Hints.isPotentiallyUnsafe() && 10368 TTI->isFPVectorizationPotentiallyUnsafe()) { 10369 reportVectorizationFailure( 10370 "Potentially unsafe FP op prevents vectorization", 10371 "loop not vectorized due to unsafe FP support.", 10372 "UnsafeFP", ORE, L); 10373 Hints.emitRemarkWithHints(); 10374 return false; 10375 } 10376 10377 bool AllowOrderedReductions; 10378 // If the flag is set, use that instead and override the TTI behaviour. 10379 if (ForceOrderedReductions.getNumOccurrences() > 0) 10380 AllowOrderedReductions = ForceOrderedReductions; 10381 else 10382 AllowOrderedReductions = TTI->enableOrderedReductions(); 10383 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10384 ORE->emit([&]() { 10385 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10386 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10387 ExactFPMathInst->getDebugLoc(), 10388 ExactFPMathInst->getParent()) 10389 << "loop not vectorized: cannot prove it is safe to reorder " 10390 "floating-point operations"; 10391 }); 10392 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10393 "reorder floating-point operations\n"); 10394 Hints.emitRemarkWithHints(); 10395 return false; 10396 } 10397 10398 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10399 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10400 10401 // If an override option has been passed in for interleaved accesses, use it. 10402 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10403 UseInterleaved = EnableInterleavedMemAccesses; 10404 10405 // Analyze interleaved memory accesses. 10406 if (UseInterleaved) { 10407 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10408 } 10409 10410 // Use the cost model. 10411 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10412 F, &Hints, IAI); 10413 CM.collectValuesToIgnore(); 10414 CM.collectElementTypesForWidening(); 10415 10416 // Use the planner for vectorization. 10417 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10418 Requirements, ORE); 10419 10420 // Get user vectorization factor and interleave count. 10421 ElementCount UserVF = Hints.getWidth(); 10422 unsigned UserIC = Hints.getInterleave(); 10423 10424 // Plan how to best vectorize, return the best VF and its cost. 10425 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10426 10427 VectorizationFactor VF = VectorizationFactor::Disabled(); 10428 unsigned IC = 1; 10429 10430 if (MaybeVF) { 10431 VF = *MaybeVF; 10432 // Select the interleave count. 10433 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10434 } 10435 10436 // Identify the diagnostic messages that should be produced. 10437 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10438 bool VectorizeLoop = true, InterleaveLoop = true; 10439 if (VF.Width.isScalar()) { 10440 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10441 VecDiagMsg = std::make_pair( 10442 "VectorizationNotBeneficial", 10443 "the cost-model indicates that vectorization is not beneficial"); 10444 VectorizeLoop = false; 10445 } 10446 10447 if (!MaybeVF && UserIC > 1) { 10448 // Tell the user interleaving was avoided up-front, despite being explicitly 10449 // requested. 10450 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10451 "interleaving should be avoided up front\n"); 10452 IntDiagMsg = std::make_pair( 10453 "InterleavingAvoided", 10454 "Ignoring UserIC, because interleaving was avoided up front"); 10455 InterleaveLoop = false; 10456 } else if (IC == 1 && UserIC <= 1) { 10457 // Tell the user interleaving is not beneficial. 10458 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10459 IntDiagMsg = std::make_pair( 10460 "InterleavingNotBeneficial", 10461 "the cost-model indicates that interleaving is not beneficial"); 10462 InterleaveLoop = false; 10463 if (UserIC == 1) { 10464 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10465 IntDiagMsg.second += 10466 " and is explicitly disabled or interleave count is set to 1"; 10467 } 10468 } else if (IC > 1 && UserIC == 1) { 10469 // Tell the user interleaving is beneficial, but it explicitly disabled. 10470 LLVM_DEBUG( 10471 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10472 IntDiagMsg = std::make_pair( 10473 "InterleavingBeneficialButDisabled", 10474 "the cost-model indicates that interleaving is beneficial " 10475 "but is explicitly disabled or interleave count is set to 1"); 10476 InterleaveLoop = false; 10477 } 10478 10479 // Override IC if user provided an interleave count. 10480 IC = UserIC > 0 ? UserIC : IC; 10481 10482 // Emit diagnostic messages, if any. 10483 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10484 if (!VectorizeLoop && !InterleaveLoop) { 10485 // Do not vectorize or interleaving the loop. 10486 ORE->emit([&]() { 10487 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10488 L->getStartLoc(), L->getHeader()) 10489 << VecDiagMsg.second; 10490 }); 10491 ORE->emit([&]() { 10492 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10493 L->getStartLoc(), L->getHeader()) 10494 << IntDiagMsg.second; 10495 }); 10496 return false; 10497 } else if (!VectorizeLoop && InterleaveLoop) { 10498 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10499 ORE->emit([&]() { 10500 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10501 L->getStartLoc(), L->getHeader()) 10502 << VecDiagMsg.second; 10503 }); 10504 } else if (VectorizeLoop && !InterleaveLoop) { 10505 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10506 << ") in " << DebugLocStr << '\n'); 10507 ORE->emit([&]() { 10508 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10509 L->getStartLoc(), L->getHeader()) 10510 << IntDiagMsg.second; 10511 }); 10512 } else if (VectorizeLoop && InterleaveLoop) { 10513 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10514 << ") in " << DebugLocStr << '\n'); 10515 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10516 } 10517 10518 bool DisableRuntimeUnroll = false; 10519 MDNode *OrigLoopID = L->getLoopID(); 10520 { 10521 // Optimistically generate runtime checks. Drop them if they turn out to not 10522 // be profitable. Limit the scope of Checks, so the cleanup happens 10523 // immediately after vector codegeneration is done. 10524 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10525 F->getParent()->getDataLayout()); 10526 if (!VF.Width.isScalar() || IC > 1) 10527 Checks.Create(L, *LVL.getLAI(), PSE.getPredicate()); 10528 10529 using namespace ore; 10530 if (!VectorizeLoop) { 10531 assert(IC > 1 && "interleave count should not be 1 or 0"); 10532 // If we decided that it is not legal to vectorize the loop, then 10533 // interleave it. 10534 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10535 &CM, BFI, PSI, Checks); 10536 10537 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10538 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT); 10539 10540 ORE->emit([&]() { 10541 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10542 L->getHeader()) 10543 << "interleaved loop (interleaved count: " 10544 << NV("InterleaveCount", IC) << ")"; 10545 }); 10546 } else { 10547 // If we decided that it is *legal* to vectorize the loop, then do it. 10548 10549 // Consider vectorizing the epilogue too if it's profitable. 10550 VectorizationFactor EpilogueVF = 10551 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10552 if (EpilogueVF.Width.isVector()) { 10553 10554 // The first pass vectorizes the main loop and creates a scalar epilogue 10555 // to be vectorized by executing the plan (potentially with a different 10556 // factor) again shortly afterwards. 10557 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10558 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10559 EPI, &LVL, &CM, BFI, PSI, Checks); 10560 10561 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10562 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10563 DT); 10564 ++LoopsVectorized; 10565 10566 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10567 formLCSSARecursively(*L, *DT, LI, SE); 10568 10569 // Second pass vectorizes the epilogue and adjusts the control flow 10570 // edges from the first pass. 10571 EPI.MainLoopVF = EPI.EpilogueVF; 10572 EPI.MainLoopUF = EPI.EpilogueUF; 10573 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10574 ORE, EPI, &LVL, &CM, BFI, PSI, 10575 Checks); 10576 10577 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10578 10579 // Ensure that the start values for any VPReductionPHIRecipes are 10580 // updated before vectorising the epilogue loop. 10581 VPBasicBlock *Header = 10582 BestEpiPlan.getVectorLoopRegion()->getEntryBasicBlock(); 10583 for (VPRecipeBase &R : Header->phis()) { 10584 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) { 10585 if (auto *Resume = MainILV.getReductionResumeValue( 10586 ReductionPhi->getRecurrenceDescriptor())) { 10587 VPValue *StartVal = new VPValue(Resume); 10588 BestEpiPlan.addExternalDef(StartVal); 10589 ReductionPhi->setOperand(0, StartVal); 10590 } 10591 } 10592 } 10593 10594 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10595 DT); 10596 ++LoopsEpilogueVectorized; 10597 10598 if (!MainILV.areSafetyChecksAdded()) 10599 DisableRuntimeUnroll = true; 10600 } else { 10601 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10602 &LVL, &CM, BFI, PSI, Checks); 10603 10604 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10605 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT); 10606 ++LoopsVectorized; 10607 10608 // Add metadata to disable runtime unrolling a scalar loop when there 10609 // are no runtime checks about strides and memory. A scalar loop that is 10610 // rarely used is not worth unrolling. 10611 if (!LB.areSafetyChecksAdded()) 10612 DisableRuntimeUnroll = true; 10613 } 10614 // Report the vectorization decision. 10615 ORE->emit([&]() { 10616 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10617 L->getHeader()) 10618 << "vectorized loop (vectorization width: " 10619 << NV("VectorizationFactor", VF.Width) 10620 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10621 }); 10622 } 10623 10624 if (ORE->allowExtraAnalysis(LV_NAME)) 10625 checkMixedPrecision(L, ORE); 10626 } 10627 10628 Optional<MDNode *> RemainderLoopID = 10629 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10630 LLVMLoopVectorizeFollowupEpilogue}); 10631 if (RemainderLoopID.hasValue()) { 10632 L->setLoopID(RemainderLoopID.getValue()); 10633 } else { 10634 if (DisableRuntimeUnroll) 10635 AddRuntimeUnrollDisableMetaData(L); 10636 10637 // Mark the loop as already vectorized to avoid vectorizing again. 10638 Hints.setAlreadyVectorized(); 10639 } 10640 10641 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10642 return true; 10643 } 10644 10645 LoopVectorizeResult LoopVectorizePass::runImpl( 10646 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10647 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10648 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10649 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10650 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10651 SE = &SE_; 10652 LI = &LI_; 10653 TTI = &TTI_; 10654 DT = &DT_; 10655 BFI = &BFI_; 10656 TLI = TLI_; 10657 AA = &AA_; 10658 AC = &AC_; 10659 GetLAA = &GetLAA_; 10660 DB = &DB_; 10661 ORE = &ORE_; 10662 PSI = PSI_; 10663 10664 // Don't attempt if 10665 // 1. the target claims to have no vector registers, and 10666 // 2. interleaving won't help ILP. 10667 // 10668 // The second condition is necessary because, even if the target has no 10669 // vector registers, loop vectorization may still enable scalar 10670 // interleaving. 10671 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10672 TTI->getMaxInterleaveFactor(1) < 2) 10673 return LoopVectorizeResult(false, false); 10674 10675 bool Changed = false, CFGChanged = false; 10676 10677 // The vectorizer requires loops to be in simplified form. 10678 // Since simplification may add new inner loops, it has to run before the 10679 // legality and profitability checks. This means running the loop vectorizer 10680 // will simplify all loops, regardless of whether anything end up being 10681 // vectorized. 10682 for (auto &L : *LI) 10683 Changed |= CFGChanged |= 10684 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10685 10686 // Build up a worklist of inner-loops to vectorize. This is necessary as 10687 // the act of vectorizing or partially unrolling a loop creates new loops 10688 // and can invalidate iterators across the loops. 10689 SmallVector<Loop *, 8> Worklist; 10690 10691 for (Loop *L : *LI) 10692 collectSupportedLoops(*L, LI, ORE, Worklist); 10693 10694 LoopsAnalyzed += Worklist.size(); 10695 10696 // Now walk the identified inner loops. 10697 while (!Worklist.empty()) { 10698 Loop *L = Worklist.pop_back_val(); 10699 10700 // For the inner loops we actually process, form LCSSA to simplify the 10701 // transform. 10702 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10703 10704 Changed |= CFGChanged |= processLoop(L); 10705 } 10706 10707 // Process each loop nest in the function. 10708 return LoopVectorizeResult(Changed, CFGChanged); 10709 } 10710 10711 PreservedAnalyses LoopVectorizePass::run(Function &F, 10712 FunctionAnalysisManager &AM) { 10713 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10714 auto &LI = AM.getResult<LoopAnalysis>(F); 10715 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10716 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10717 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10718 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10719 auto &AA = AM.getResult<AAManager>(F); 10720 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10721 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10722 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10723 10724 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10725 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10726 [&](Loop &L) -> const LoopAccessInfo & { 10727 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10728 TLI, TTI, nullptr, nullptr, nullptr}; 10729 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10730 }; 10731 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10732 ProfileSummaryInfo *PSI = 10733 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10734 LoopVectorizeResult Result = 10735 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10736 if (!Result.MadeAnyChange) 10737 return PreservedAnalyses::all(); 10738 PreservedAnalyses PA; 10739 10740 // We currently do not preserve loopinfo/dominator analyses with outer loop 10741 // vectorization. Until this is addressed, mark these analyses as preserved 10742 // only for non-VPlan-native path. 10743 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10744 if (!EnableVPlanNativePath) { 10745 PA.preserve<LoopAnalysis>(); 10746 PA.preserve<DominatorTreeAnalysis>(); 10747 } 10748 10749 if (Result.MadeCFGChange) { 10750 // Making CFG changes likely means a loop got vectorized. Indicate that 10751 // extra simplification passes should be run. 10752 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only 10753 // be run if runtime checks have been added. 10754 AM.getResult<ShouldRunExtraVectorPasses>(F); 10755 PA.preserve<ShouldRunExtraVectorPasses>(); 10756 } else { 10757 PA.preserveSet<CFGAnalyses>(); 10758 } 10759 return PA; 10760 } 10761 10762 void LoopVectorizePass::printPipeline( 10763 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10764 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10765 OS, MapClassName2PassName); 10766 10767 OS << "<"; 10768 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10769 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10770 OS << ">"; 10771 } 10772