1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/Metadata.h" 116 #include "llvm/IR/Module.h" 117 #include "llvm/IR/Operator.h" 118 #include "llvm/IR/PatternMatch.h" 119 #include "llvm/IR/Type.h" 120 #include "llvm/IR/Use.h" 121 #include "llvm/IR/User.h" 122 #include "llvm/IR/Value.h" 123 #include "llvm/IR/ValueHandle.h" 124 #include "llvm/IR/Verifier.h" 125 #include "llvm/InitializePasses.h" 126 #include "llvm/Pass.h" 127 #include "llvm/Support/Casting.h" 128 #include "llvm/Support/CommandLine.h" 129 #include "llvm/Support/Compiler.h" 130 #include "llvm/Support/Debug.h" 131 #include "llvm/Support/ErrorHandling.h" 132 #include "llvm/Support/InstructionCost.h" 133 #include "llvm/Support/MathExtras.h" 134 #include "llvm/Support/raw_ostream.h" 135 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 136 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 137 #include "llvm/Transforms/Utils/LoopSimplify.h" 138 #include "llvm/Transforms/Utils/LoopUtils.h" 139 #include "llvm/Transforms/Utils/LoopVersioning.h" 140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 141 #include "llvm/Transforms/Utils/SizeOpts.h" 142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 143 #include <algorithm> 144 #include <cassert> 145 #include <cstdint> 146 #include <functional> 147 #include <iterator> 148 #include <limits> 149 #include <map> 150 #include <memory> 151 #include <string> 152 #include <tuple> 153 #include <utility> 154 155 using namespace llvm; 156 157 #define LV_NAME "loop-vectorize" 158 #define DEBUG_TYPE LV_NAME 159 160 #ifndef NDEBUG 161 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 162 #endif 163 164 /// @{ 165 /// Metadata attribute names 166 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 167 const char LLVMLoopVectorizeFollowupVectorized[] = 168 "llvm.loop.vectorize.followup_vectorized"; 169 const char LLVMLoopVectorizeFollowupEpilogue[] = 170 "llvm.loop.vectorize.followup_epilogue"; 171 /// @} 172 173 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 174 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 175 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 176 177 static cl::opt<bool> EnableEpilogueVectorization( 178 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 179 cl::desc("Enable vectorization of epilogue loops.")); 180 181 static cl::opt<unsigned> EpilogueVectorizationForceVF( 182 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 183 cl::desc("When epilogue vectorization is enabled, and a value greater than " 184 "1 is specified, forces the given VF for all applicable epilogue " 185 "loops.")); 186 187 static cl::opt<unsigned> EpilogueVectorizationMinVF( 188 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 189 cl::desc("Only loops with vectorization factor equal to or larger than " 190 "the specified value are considered for epilogue vectorization.")); 191 192 /// Loops with a known constant trip count below this number are vectorized only 193 /// if no scalar iteration overheads are incurred. 194 static cl::opt<unsigned> TinyTripCountVectorThreshold( 195 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 196 cl::desc("Loops with a constant trip count that is smaller than this " 197 "value are vectorized only if no scalar iteration overheads " 198 "are incurred.")); 199 200 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 201 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 202 cl::desc("The maximum allowed number of runtime memory checks with a " 203 "vectorize(enable) pragma.")); 204 205 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 206 // that predication is preferred, and this lists all options. I.e., the 207 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 208 // and predicate the instructions accordingly. If tail-folding fails, there are 209 // different fallback strategies depending on these values: 210 namespace PreferPredicateTy { 211 enum Option { 212 ScalarEpilogue = 0, 213 PredicateElseScalarEpilogue, 214 PredicateOrDontVectorize 215 }; 216 } // namespace PreferPredicateTy 217 218 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 219 "prefer-predicate-over-epilogue", 220 cl::init(PreferPredicateTy::ScalarEpilogue), 221 cl::Hidden, 222 cl::desc("Tail-folding and predication preferences over creating a scalar " 223 "epilogue loop."), 224 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 225 "scalar-epilogue", 226 "Don't tail-predicate loops, create scalar epilogue"), 227 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 228 "predicate-else-scalar-epilogue", 229 "prefer tail-folding, create scalar epilogue if tail " 230 "folding fails."), 231 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 232 "predicate-dont-vectorize", 233 "prefers tail-folding, don't attempt vectorization if " 234 "tail-folding fails."))); 235 236 static cl::opt<bool> MaximizeBandwidth( 237 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 238 cl::desc("Maximize bandwidth when selecting vectorization factor which " 239 "will be determined by the smallest type in loop.")); 240 241 static cl::opt<bool> EnableInterleavedMemAccesses( 242 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 243 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 244 245 /// An interleave-group may need masking if it resides in a block that needs 246 /// predication, or in order to mask away gaps. 247 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 248 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 249 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 250 251 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 252 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 253 cl::desc("We don't interleave loops with a estimated constant trip count " 254 "below this number")); 255 256 static cl::opt<unsigned> ForceTargetNumScalarRegs( 257 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 258 cl::desc("A flag that overrides the target's number of scalar registers.")); 259 260 static cl::opt<unsigned> ForceTargetNumVectorRegs( 261 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 262 cl::desc("A flag that overrides the target's number of vector registers.")); 263 264 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 265 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 266 cl::desc("A flag that overrides the target's max interleave factor for " 267 "scalar loops.")); 268 269 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 270 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 271 cl::desc("A flag that overrides the target's max interleave factor for " 272 "vectorized loops.")); 273 274 static cl::opt<unsigned> ForceTargetInstructionCost( 275 "force-target-instruction-cost", cl::init(0), cl::Hidden, 276 cl::desc("A flag that overrides the target's expected cost for " 277 "an instruction to a single constant value. Mostly " 278 "useful for getting consistent testing.")); 279 280 static cl::opt<bool> ForceTargetSupportsScalableVectors( 281 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 282 cl::desc( 283 "Pretend that scalable vectors are supported, even if the target does " 284 "not support them. This flag should only be used for testing.")); 285 286 static cl::opt<unsigned> SmallLoopCost( 287 "small-loop-cost", cl::init(20), cl::Hidden, 288 cl::desc( 289 "The cost of a loop that is considered 'small' by the interleaver.")); 290 291 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 292 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 293 cl::desc("Enable the use of the block frequency analysis to access PGO " 294 "heuristics minimizing code growth in cold regions and being more " 295 "aggressive in hot regions.")); 296 297 // Runtime interleave loops for load/store throughput. 298 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 299 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 300 cl::desc( 301 "Enable runtime interleaving until load/store ports are saturated")); 302 303 /// Interleave small loops with scalar reductions. 304 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 305 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 306 cl::desc("Enable interleaving for loops with small iteration counts that " 307 "contain scalar reductions to expose ILP.")); 308 309 /// The number of stores in a loop that are allowed to need predication. 310 static cl::opt<unsigned> NumberOfStoresToPredicate( 311 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 312 cl::desc("Max number of stores to be predicated behind an if.")); 313 314 static cl::opt<bool> EnableIndVarRegisterHeur( 315 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 316 cl::desc("Count the induction variable only once when interleaving")); 317 318 static cl::opt<bool> EnableCondStoresVectorization( 319 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 320 cl::desc("Enable if predication of stores during vectorization.")); 321 322 static cl::opt<unsigned> MaxNestedScalarReductionIC( 323 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 324 cl::desc("The maximum interleave count to use when interleaving a scalar " 325 "reduction in a nested loop.")); 326 327 static cl::opt<bool> 328 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 329 cl::Hidden, 330 cl::desc("Prefer in-loop vector reductions, " 331 "overriding the targets preference.")); 332 333 static cl::opt<bool> ForceOrderedReductions( 334 "force-ordered-reductions", cl::init(false), cl::Hidden, 335 cl::desc("Enable the vectorisation of loops with in-order (strict) " 336 "FP reductions")); 337 338 static cl::opt<bool> PreferPredicatedReductionSelect( 339 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 340 cl::desc( 341 "Prefer predicating a reduction operation over an after loop select.")); 342 343 cl::opt<bool> EnableVPlanNativePath( 344 "enable-vplan-native-path", cl::init(false), cl::Hidden, 345 cl::desc("Enable VPlan-native vectorization path with " 346 "support for outer loop vectorization.")); 347 348 // FIXME: Remove this switch once we have divergence analysis. Currently we 349 // assume divergent non-backedge branches when this switch is true. 350 cl::opt<bool> EnableVPlanPredication( 351 "enable-vplan-predication", cl::init(false), cl::Hidden, 352 cl::desc("Enable VPlan-native vectorization path predicator with " 353 "support for outer loop vectorization.")); 354 355 // This flag enables the stress testing of the VPlan H-CFG construction in the 356 // VPlan-native vectorization path. It must be used in conjuction with 357 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 358 // verification of the H-CFGs built. 359 static cl::opt<bool> VPlanBuildStressTest( 360 "vplan-build-stress-test", cl::init(false), cl::Hidden, 361 cl::desc( 362 "Build VPlan for every supported loop nest in the function and bail " 363 "out right after the build (stress test the VPlan H-CFG construction " 364 "in the VPlan-native vectorization path).")); 365 366 cl::opt<bool> llvm::EnableLoopInterleaving( 367 "interleave-loops", cl::init(true), cl::Hidden, 368 cl::desc("Enable loop interleaving in Loop vectorization passes")); 369 cl::opt<bool> llvm::EnableLoopVectorization( 370 "vectorize-loops", cl::init(true), cl::Hidden, 371 cl::desc("Run the Loop vectorization passes")); 372 373 cl::opt<bool> PrintVPlansInDotFormat( 374 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 375 cl::desc("Use dot format instead of plain text when dumping VPlans")); 376 377 /// A helper function that returns true if the given type is irregular. The 378 /// type is irregular if its allocated size doesn't equal the store size of an 379 /// element of the corresponding vector type. 380 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 381 // Determine if an array of N elements of type Ty is "bitcast compatible" 382 // with a <N x Ty> vector. 383 // This is only true if there is no padding between the array elements. 384 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 385 } 386 387 /// A helper function that returns the reciprocal of the block probability of 388 /// predicated blocks. If we return X, we are assuming the predicated block 389 /// will execute once for every X iterations of the loop header. 390 /// 391 /// TODO: We should use actual block probability here, if available. Currently, 392 /// we always assume predicated blocks have a 50% chance of executing. 393 static unsigned getReciprocalPredBlockProb() { return 2; } 394 395 /// A helper function that returns an integer or floating-point constant with 396 /// value C. 397 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 398 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 399 : ConstantFP::get(Ty, C); 400 } 401 402 /// Returns "best known" trip count for the specified loop \p L as defined by 403 /// the following procedure: 404 /// 1) Returns exact trip count if it is known. 405 /// 2) Returns expected trip count according to profile data if any. 406 /// 3) Returns upper bound estimate if it is known. 407 /// 4) Returns None if all of the above failed. 408 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 409 // Check if exact trip count is known. 410 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 411 return ExpectedTC; 412 413 // Check if there is an expected trip count available from profile data. 414 if (LoopVectorizeWithBlockFrequency) 415 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 416 return EstimatedTC; 417 418 // Check if upper bound estimate is known. 419 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 420 return ExpectedTC; 421 422 return None; 423 } 424 425 // Forward declare GeneratedRTChecks. 426 class GeneratedRTChecks; 427 428 namespace llvm { 429 430 AnalysisKey ShouldRunExtraVectorPasses::Key; 431 432 /// InnerLoopVectorizer vectorizes loops which contain only one basic 433 /// block to a specified vectorization factor (VF). 434 /// This class performs the widening of scalars into vectors, or multiple 435 /// scalars. This class also implements the following features: 436 /// * It inserts an epilogue loop for handling loops that don't have iteration 437 /// counts that are known to be a multiple of the vectorization factor. 438 /// * It handles the code generation for reduction variables. 439 /// * Scalarization (implementation using scalars) of un-vectorizable 440 /// instructions. 441 /// InnerLoopVectorizer does not perform any vectorization-legality 442 /// checks, and relies on the caller to check for the different legality 443 /// aspects. The InnerLoopVectorizer relies on the 444 /// LoopVectorizationLegality class to provide information about the induction 445 /// and reduction variables that were found to a given vectorization factor. 446 class InnerLoopVectorizer { 447 public: 448 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 449 LoopInfo *LI, DominatorTree *DT, 450 const TargetLibraryInfo *TLI, 451 const TargetTransformInfo *TTI, AssumptionCache *AC, 452 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 453 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 454 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 455 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 456 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 457 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 458 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 459 PSI(PSI), RTChecks(RTChecks) { 460 // Query this against the original loop and save it here because the profile 461 // of the original loop header may change as the transformation happens. 462 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 463 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 464 } 465 466 virtual ~InnerLoopVectorizer() = default; 467 468 /// Create a new empty loop that will contain vectorized instructions later 469 /// on, while the old loop will be used as the scalar remainder. Control flow 470 /// is generated around the vectorized (and scalar epilogue) loops consisting 471 /// of various checks and bypasses. Return the pre-header block of the new 472 /// loop and the start value for the canonical induction, if it is != 0. The 473 /// latter is the case when vectorizing the epilogue loop. In the case of 474 /// epilogue vectorization, this function is overriden to handle the more 475 /// complex control flow around the loops. 476 virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton(); 477 478 /// Widen a single call instruction within the innermost loop. 479 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 480 VPTransformState &State); 481 482 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 483 void fixVectorizedLoop(VPTransformState &State); 484 485 // Return true if any runtime check is added. 486 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 487 488 /// A type for vectorized values in the new loop. Each value from the 489 /// original loop, when vectorized, is represented by UF vector values in the 490 /// new unrolled loop, where UF is the unroll factor. 491 using VectorParts = SmallVector<Value *, 2>; 492 493 /// Vectorize a single vector PHINode in a block in the VPlan-native path 494 /// only. 495 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 496 VPTransformState &State); 497 498 /// A helper function to scalarize a single Instruction in the innermost loop. 499 /// Generates a sequence of scalar instances for each lane between \p MinLane 500 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 501 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p 502 /// Instr's operands. 503 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe, 504 const VPIteration &Instance, bool IfPredicateInstr, 505 VPTransformState &State); 506 507 /// Construct the vector value of a scalarized value \p V one lane at a time. 508 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 509 VPTransformState &State); 510 511 /// Try to vectorize interleaved access group \p Group with the base address 512 /// given in \p Addr, optionally masking the vector operations if \p 513 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 514 /// values in the vectorized loop. 515 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 516 ArrayRef<VPValue *> VPDefs, 517 VPTransformState &State, VPValue *Addr, 518 ArrayRef<VPValue *> StoredValues, 519 VPValue *BlockInMask = nullptr); 520 521 /// Set the debug location in the builder \p Ptr using the debug location in 522 /// \p V. If \p Ptr is None then it uses the class member's Builder. 523 void setDebugLocFromInst(const Value *V, 524 Optional<IRBuilderBase *> CustomBuilder = None); 525 526 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 527 void fixNonInductionPHIs(VPTransformState &State); 528 529 /// Returns true if the reordering of FP operations is not allowed, but we are 530 /// able to vectorize with strict in-order reductions for the given RdxDesc. 531 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc); 532 533 /// Create a broadcast instruction. This method generates a broadcast 534 /// instruction (shuffle) for loop invariant values and for the induction 535 /// value. If this is the induction variable then we extend it to N, N+1, ... 536 /// this is needed because each iteration in the loop corresponds to a SIMD 537 /// element. 538 virtual Value *getBroadcastInstrs(Value *V); 539 540 /// Add metadata from one instruction to another. 541 /// 542 /// This includes both the original MDs from \p From and additional ones (\see 543 /// addNewMetadata). Use this for *newly created* instructions in the vector 544 /// loop. 545 void addMetadata(Instruction *To, Instruction *From); 546 547 /// Similar to the previous function but it adds the metadata to a 548 /// vector of instructions. 549 void addMetadata(ArrayRef<Value *> To, Instruction *From); 550 551 // Returns the resume value (bc.merge.rdx) for a reduction as 552 // generated by fixReduction. 553 PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc); 554 555 protected: 556 friend class LoopVectorizationPlanner; 557 558 /// A small list of PHINodes. 559 using PhiVector = SmallVector<PHINode *, 4>; 560 561 /// A type for scalarized values in the new loop. Each value from the 562 /// original loop, when scalarized, is represented by UF x VF scalar values 563 /// in the new unrolled loop, where UF is the unroll factor and VF is the 564 /// vectorization factor. 565 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 566 567 /// Set up the values of the IVs correctly when exiting the vector loop. 568 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 569 Value *CountRoundDown, Value *EndValue, 570 BasicBlock *MiddleBlock, BasicBlock *VectorHeader); 571 572 /// Handle all cross-iteration phis in the header. 573 void fixCrossIterationPHIs(VPTransformState &State); 574 575 /// Create the exit value of first order recurrences in the middle block and 576 /// update their users. 577 void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR, 578 VPTransformState &State); 579 580 /// Create code for the loop exit value of the reduction. 581 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 582 583 /// Clear NSW/NUW flags from reduction instructions if necessary. 584 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 585 VPTransformState &State); 586 587 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 588 /// means we need to add the appropriate incoming value from the middle 589 /// block as exiting edges from the scalar epilogue loop (if present) are 590 /// already in place, and we exit the vector loop exclusively to the middle 591 /// block. 592 void fixLCSSAPHIs(VPTransformState &State); 593 594 /// Iteratively sink the scalarized operands of a predicated instruction into 595 /// the block that was created for it. 596 void sinkScalarOperands(Instruction *PredInst); 597 598 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 599 /// represented as. 600 void truncateToMinimalBitwidths(VPTransformState &State); 601 602 /// Returns (and creates if needed) the original loop trip count. 603 Value *getOrCreateTripCount(BasicBlock *InsertBlock); 604 605 /// Returns (and creates if needed) the trip count of the widened loop. 606 Value *getOrCreateVectorTripCount(BasicBlock *InsertBlock); 607 608 /// Returns a bitcasted value to the requested vector type. 609 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 610 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 611 const DataLayout &DL); 612 613 /// Emit a bypass check to see if the vector trip count is zero, including if 614 /// it overflows. 615 void emitMinimumIterationCountCheck(BasicBlock *Bypass); 616 617 /// Emit a bypass check to see if all of the SCEV assumptions we've 618 /// had to make are correct. Returns the block containing the checks or 619 /// nullptr if no checks have been added. 620 BasicBlock *emitSCEVChecks(BasicBlock *Bypass); 621 622 /// Emit bypass checks to check any memory assumptions we may have made. 623 /// Returns the block containing the checks or nullptr if no checks have been 624 /// added. 625 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass); 626 627 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 628 /// vector loop preheader, middle block and scalar preheader. 629 void createVectorLoopSkeleton(StringRef Prefix); 630 631 /// Create new phi nodes for the induction variables to resume iteration count 632 /// in the scalar epilogue, from where the vectorized loop left off. 633 /// In cases where the loop skeleton is more complicated (eg. epilogue 634 /// vectorization) and the resume values can come from an additional bypass 635 /// block, the \p AdditionalBypass pair provides information about the bypass 636 /// block and the end value on the edge from bypass to this loop. 637 void createInductionResumeValues( 638 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 639 640 /// Complete the loop skeleton by adding debug MDs, creating appropriate 641 /// conditional branches in the middle block, preparing the builder and 642 /// running the verifier. Return the preheader of the completed vector loop. 643 BasicBlock *completeLoopSkeleton(MDNode *OrigLoopID); 644 645 /// Add additional metadata to \p To that was not present on \p Orig. 646 /// 647 /// Currently this is used to add the noalias annotations based on the 648 /// inserted memchecks. Use this for instructions that are *cloned* into the 649 /// vector loop. 650 void addNewMetadata(Instruction *To, const Instruction *Orig); 651 652 /// Collect poison-generating recipes that may generate a poison value that is 653 /// used after vectorization, even when their operands are not poison. Those 654 /// recipes meet the following conditions: 655 /// * Contribute to the address computation of a recipe generating a widen 656 /// memory load/store (VPWidenMemoryInstructionRecipe or 657 /// VPInterleaveRecipe). 658 /// * Such a widen memory load/store has at least one underlying Instruction 659 /// that is in a basic block that needs predication and after vectorization 660 /// the generated instruction won't be predicated. 661 void collectPoisonGeneratingRecipes(VPTransformState &State); 662 663 /// Allow subclasses to override and print debug traces before/after vplan 664 /// execution, when trace information is requested. 665 virtual void printDebugTracesAtStart(){}; 666 virtual void printDebugTracesAtEnd(){}; 667 668 /// The original loop. 669 Loop *OrigLoop; 670 671 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 672 /// dynamic knowledge to simplify SCEV expressions and converts them to a 673 /// more usable form. 674 PredicatedScalarEvolution &PSE; 675 676 /// Loop Info. 677 LoopInfo *LI; 678 679 /// Dominator Tree. 680 DominatorTree *DT; 681 682 /// Alias Analysis. 683 AAResults *AA; 684 685 /// Target Library Info. 686 const TargetLibraryInfo *TLI; 687 688 /// Target Transform Info. 689 const TargetTransformInfo *TTI; 690 691 /// Assumption Cache. 692 AssumptionCache *AC; 693 694 /// Interface to emit optimization remarks. 695 OptimizationRemarkEmitter *ORE; 696 697 /// LoopVersioning. It's only set up (non-null) if memchecks were 698 /// used. 699 /// 700 /// This is currently only used to add no-alias metadata based on the 701 /// memchecks. The actually versioning is performed manually. 702 std::unique_ptr<LoopVersioning> LVer; 703 704 /// The vectorization SIMD factor to use. Each vector will have this many 705 /// vector elements. 706 ElementCount VF; 707 708 /// The vectorization unroll factor to use. Each scalar is vectorized to this 709 /// many different vector instructions. 710 unsigned UF; 711 712 /// The builder that we use 713 IRBuilder<> Builder; 714 715 // --- Vectorization state --- 716 717 /// The vector-loop preheader. 718 BasicBlock *LoopVectorPreHeader; 719 720 /// The scalar-loop preheader. 721 BasicBlock *LoopScalarPreHeader; 722 723 /// Middle Block between the vector and the scalar. 724 BasicBlock *LoopMiddleBlock; 725 726 /// The unique ExitBlock of the scalar loop if one exists. Note that 727 /// there can be multiple exiting edges reaching this block. 728 BasicBlock *LoopExitBlock; 729 730 /// The scalar loop body. 731 BasicBlock *LoopScalarBody; 732 733 /// A list of all bypass blocks. The first block is the entry of the loop. 734 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 735 736 /// Store instructions that were predicated. 737 SmallVector<Instruction *, 4> PredicatedInstructions; 738 739 /// Trip count of the original loop. 740 Value *TripCount = nullptr; 741 742 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 743 Value *VectorTripCount = nullptr; 744 745 /// The legality analysis. 746 LoopVectorizationLegality *Legal; 747 748 /// The profitablity analysis. 749 LoopVectorizationCostModel *Cost; 750 751 // Record whether runtime checks are added. 752 bool AddedSafetyChecks = false; 753 754 // Holds the end values for each induction variable. We save the end values 755 // so we can later fix-up the external users of the induction variables. 756 DenseMap<PHINode *, Value *> IVEndValues; 757 758 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 759 // fixed up at the end of vector code generation. 760 SmallVector<PHINode *, 8> OrigPHIsToFix; 761 762 /// BFI and PSI are used to check for profile guided size optimizations. 763 BlockFrequencyInfo *BFI; 764 ProfileSummaryInfo *PSI; 765 766 // Whether this loop should be optimized for size based on profile guided size 767 // optimizatios. 768 bool OptForSizeBasedOnProfile; 769 770 /// Structure to hold information about generated runtime checks, responsible 771 /// for cleaning the checks, if vectorization turns out unprofitable. 772 GeneratedRTChecks &RTChecks; 773 774 // Holds the resume values for reductions in the loops, used to set the 775 // correct start value of reduction PHIs when vectorizing the epilogue. 776 SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4> 777 ReductionResumeValues; 778 }; 779 780 class InnerLoopUnroller : public InnerLoopVectorizer { 781 public: 782 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 783 LoopInfo *LI, DominatorTree *DT, 784 const TargetLibraryInfo *TLI, 785 const TargetTransformInfo *TTI, AssumptionCache *AC, 786 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 787 LoopVectorizationLegality *LVL, 788 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 789 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 790 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 791 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 792 BFI, PSI, Check) {} 793 794 private: 795 Value *getBroadcastInstrs(Value *V) override; 796 }; 797 798 /// Encapsulate information regarding vectorization of a loop and its epilogue. 799 /// This information is meant to be updated and used across two stages of 800 /// epilogue vectorization. 801 struct EpilogueLoopVectorizationInfo { 802 ElementCount MainLoopVF = ElementCount::getFixed(0); 803 unsigned MainLoopUF = 0; 804 ElementCount EpilogueVF = ElementCount::getFixed(0); 805 unsigned EpilogueUF = 0; 806 BasicBlock *MainLoopIterationCountCheck = nullptr; 807 BasicBlock *EpilogueIterationCountCheck = nullptr; 808 BasicBlock *SCEVSafetyCheck = nullptr; 809 BasicBlock *MemSafetyCheck = nullptr; 810 Value *TripCount = nullptr; 811 Value *VectorTripCount = nullptr; 812 813 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 814 ElementCount EVF, unsigned EUF) 815 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 816 assert(EUF == 1 && 817 "A high UF for the epilogue loop is likely not beneficial."); 818 } 819 }; 820 821 /// An extension of the inner loop vectorizer that creates a skeleton for a 822 /// vectorized loop that has its epilogue (residual) also vectorized. 823 /// The idea is to run the vplan on a given loop twice, firstly to setup the 824 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 825 /// from the first step and vectorize the epilogue. This is achieved by 826 /// deriving two concrete strategy classes from this base class and invoking 827 /// them in succession from the loop vectorizer planner. 828 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 829 public: 830 InnerLoopAndEpilogueVectorizer( 831 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 832 DominatorTree *DT, const TargetLibraryInfo *TLI, 833 const TargetTransformInfo *TTI, AssumptionCache *AC, 834 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 835 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 836 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 837 GeneratedRTChecks &Checks) 838 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 839 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 840 Checks), 841 EPI(EPI) {} 842 843 // Override this function to handle the more complex control flow around the 844 // three loops. 845 std::pair<BasicBlock *, Value *> 846 createVectorizedLoopSkeleton() final override { 847 return createEpilogueVectorizedLoopSkeleton(); 848 } 849 850 /// The interface for creating a vectorized skeleton using one of two 851 /// different strategies, each corresponding to one execution of the vplan 852 /// as described above. 853 virtual std::pair<BasicBlock *, Value *> 854 createEpilogueVectorizedLoopSkeleton() = 0; 855 856 /// Holds and updates state information required to vectorize the main loop 857 /// and its epilogue in two separate passes. This setup helps us avoid 858 /// regenerating and recomputing runtime safety checks. It also helps us to 859 /// shorten the iteration-count-check path length for the cases where the 860 /// iteration count of the loop is so small that the main vector loop is 861 /// completely skipped. 862 EpilogueLoopVectorizationInfo &EPI; 863 }; 864 865 /// A specialized derived class of inner loop vectorizer that performs 866 /// vectorization of *main* loops in the process of vectorizing loops and their 867 /// epilogues. 868 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 869 public: 870 EpilogueVectorizerMainLoop( 871 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 872 DominatorTree *DT, const TargetLibraryInfo *TLI, 873 const TargetTransformInfo *TTI, AssumptionCache *AC, 874 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 875 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 876 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 877 GeneratedRTChecks &Check) 878 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 879 EPI, LVL, CM, BFI, PSI, Check) {} 880 /// Implements the interface for creating a vectorized skeleton using the 881 /// *main loop* strategy (ie the first pass of vplan execution). 882 std::pair<BasicBlock *, Value *> 883 createEpilogueVectorizedLoopSkeleton() final override; 884 885 protected: 886 /// Emits an iteration count bypass check once for the main loop (when \p 887 /// ForEpilogue is false) and once for the epilogue loop (when \p 888 /// ForEpilogue is true). 889 BasicBlock *emitMinimumIterationCountCheck(BasicBlock *Bypass, 890 bool ForEpilogue); 891 void printDebugTracesAtStart() override; 892 void printDebugTracesAtEnd() override; 893 }; 894 895 // A specialized derived class of inner loop vectorizer that performs 896 // vectorization of *epilogue* loops in the process of vectorizing loops and 897 // their epilogues. 898 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 899 public: 900 EpilogueVectorizerEpilogueLoop( 901 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 902 DominatorTree *DT, const TargetLibraryInfo *TLI, 903 const TargetTransformInfo *TTI, AssumptionCache *AC, 904 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 905 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 906 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 907 GeneratedRTChecks &Checks) 908 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 909 EPI, LVL, CM, BFI, PSI, Checks) { 910 TripCount = EPI.TripCount; 911 } 912 /// Implements the interface for creating a vectorized skeleton using the 913 /// *epilogue loop* strategy (ie the second pass of vplan execution). 914 std::pair<BasicBlock *, Value *> 915 createEpilogueVectorizedLoopSkeleton() final override; 916 917 protected: 918 /// Emits an iteration count bypass check after the main vector loop has 919 /// finished to see if there are any iterations left to execute by either 920 /// the vector epilogue or the scalar epilogue. 921 BasicBlock *emitMinimumVectorEpilogueIterCountCheck( 922 BasicBlock *Bypass, 923 BasicBlock *Insert); 924 void printDebugTracesAtStart() override; 925 void printDebugTracesAtEnd() override; 926 }; 927 } // end namespace llvm 928 929 /// Look for a meaningful debug location on the instruction or it's 930 /// operands. 931 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 932 if (!I) 933 return I; 934 935 DebugLoc Empty; 936 if (I->getDebugLoc() != Empty) 937 return I; 938 939 for (Use &Op : I->operands()) { 940 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 941 if (OpInst->getDebugLoc() != Empty) 942 return OpInst; 943 } 944 945 return I; 946 } 947 948 void InnerLoopVectorizer::setDebugLocFromInst( 949 const Value *V, Optional<IRBuilderBase *> CustomBuilder) { 950 IRBuilderBase *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 951 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 952 const DILocation *DIL = Inst->getDebugLoc(); 953 954 // When a FSDiscriminator is enabled, we don't need to add the multiply 955 // factors to the discriminators. 956 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 957 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 958 // FIXME: For scalable vectors, assume vscale=1. 959 auto NewDIL = 960 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 961 if (NewDIL) 962 B->SetCurrentDebugLocation(NewDIL.getValue()); 963 else 964 LLVM_DEBUG(dbgs() 965 << "Failed to create new discriminator: " 966 << DIL->getFilename() << " Line: " << DIL->getLine()); 967 } else 968 B->SetCurrentDebugLocation(DIL); 969 } else 970 B->SetCurrentDebugLocation(DebugLoc()); 971 } 972 973 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 974 /// is passed, the message relates to that particular instruction. 975 #ifndef NDEBUG 976 static void debugVectorizationMessage(const StringRef Prefix, 977 const StringRef DebugMsg, 978 Instruction *I) { 979 dbgs() << "LV: " << Prefix << DebugMsg; 980 if (I != nullptr) 981 dbgs() << " " << *I; 982 else 983 dbgs() << '.'; 984 dbgs() << '\n'; 985 } 986 #endif 987 988 /// Create an analysis remark that explains why vectorization failed 989 /// 990 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 991 /// RemarkName is the identifier for the remark. If \p I is passed it is an 992 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 993 /// the location of the remark. \return the remark object that can be 994 /// streamed to. 995 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 996 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 997 Value *CodeRegion = TheLoop->getHeader(); 998 DebugLoc DL = TheLoop->getStartLoc(); 999 1000 if (I) { 1001 CodeRegion = I->getParent(); 1002 // If there is no debug location attached to the instruction, revert back to 1003 // using the loop's. 1004 if (I->getDebugLoc()) 1005 DL = I->getDebugLoc(); 1006 } 1007 1008 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1009 } 1010 1011 namespace llvm { 1012 1013 /// Return a value for Step multiplied by VF. 1014 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, 1015 int64_t Step) { 1016 assert(Ty->isIntegerTy() && "Expected an integer step"); 1017 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 1018 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1019 } 1020 1021 /// Return the runtime value for VF. 1022 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) { 1023 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1024 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1025 } 1026 1027 static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy, 1028 ElementCount VF) { 1029 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 1030 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 1031 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 1032 return B.CreateUIToFP(RuntimeVF, FTy); 1033 } 1034 1035 void reportVectorizationFailure(const StringRef DebugMsg, 1036 const StringRef OREMsg, const StringRef ORETag, 1037 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1038 Instruction *I) { 1039 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1040 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1041 ORE->emit( 1042 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1043 << "loop not vectorized: " << OREMsg); 1044 } 1045 1046 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1047 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1048 Instruction *I) { 1049 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1050 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1051 ORE->emit( 1052 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1053 << Msg); 1054 } 1055 1056 } // end namespace llvm 1057 1058 #ifndef NDEBUG 1059 /// \return string containing a file name and a line # for the given loop. 1060 static std::string getDebugLocString(const Loop *L) { 1061 std::string Result; 1062 if (L) { 1063 raw_string_ostream OS(Result); 1064 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1065 LoopDbgLoc.print(OS); 1066 else 1067 // Just print the module name. 1068 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1069 OS.flush(); 1070 } 1071 return Result; 1072 } 1073 #endif 1074 1075 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1076 const Instruction *Orig) { 1077 // If the loop was versioned with memchecks, add the corresponding no-alias 1078 // metadata. 1079 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1080 LVer->annotateInstWithNoAlias(To, Orig); 1081 } 1082 1083 void InnerLoopVectorizer::collectPoisonGeneratingRecipes( 1084 VPTransformState &State) { 1085 1086 // Collect recipes in the backward slice of `Root` that may generate a poison 1087 // value that is used after vectorization. 1088 SmallPtrSet<VPRecipeBase *, 16> Visited; 1089 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1090 SmallVector<VPRecipeBase *, 16> Worklist; 1091 Worklist.push_back(Root); 1092 1093 // Traverse the backward slice of Root through its use-def chain. 1094 while (!Worklist.empty()) { 1095 VPRecipeBase *CurRec = Worklist.back(); 1096 Worklist.pop_back(); 1097 1098 if (!Visited.insert(CurRec).second) 1099 continue; 1100 1101 // Prune search if we find another recipe generating a widen memory 1102 // instruction. Widen memory instructions involved in address computation 1103 // will lead to gather/scatter instructions, which don't need to be 1104 // handled. 1105 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) || 1106 isa<VPInterleaveRecipe>(CurRec) || 1107 isa<VPScalarIVStepsRecipe>(CurRec) || 1108 isa<VPCanonicalIVPHIRecipe>(CurRec)) 1109 continue; 1110 1111 // This recipe contributes to the address computation of a widen 1112 // load/store. Collect recipe if its underlying instruction has 1113 // poison-generating flags. 1114 Instruction *Instr = CurRec->getUnderlyingInstr(); 1115 if (Instr && Instr->hasPoisonGeneratingFlags()) 1116 State.MayGeneratePoisonRecipes.insert(CurRec); 1117 1118 // Add new definitions to the worklist. 1119 for (VPValue *operand : CurRec->operands()) 1120 if (VPDef *OpDef = operand->getDef()) 1121 Worklist.push_back(cast<VPRecipeBase>(OpDef)); 1122 } 1123 }); 1124 1125 // Traverse all the recipes in the VPlan and collect the poison-generating 1126 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1127 // VPInterleaveRecipe. 1128 auto Iter = depth_first( 1129 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry())); 1130 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1131 for (VPRecipeBase &Recipe : *VPBB) { 1132 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) { 1133 Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr(); 1134 VPDef *AddrDef = WidenRec->getAddr()->getDef(); 1135 if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr && 1136 Legal->blockNeedsPredication(UnderlyingInstr->getParent())) 1137 collectPoisonGeneratingInstrsInBackwardSlice( 1138 cast<VPRecipeBase>(AddrDef)); 1139 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1140 VPDef *AddrDef = InterleaveRec->getAddr()->getDef(); 1141 if (AddrDef) { 1142 // Check if any member of the interleave group needs predication. 1143 const InterleaveGroup<Instruction> *InterGroup = 1144 InterleaveRec->getInterleaveGroup(); 1145 bool NeedPredication = false; 1146 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1147 I < NumMembers; ++I) { 1148 Instruction *Member = InterGroup->getMember(I); 1149 if (Member) 1150 NeedPredication |= 1151 Legal->blockNeedsPredication(Member->getParent()); 1152 } 1153 1154 if (NeedPredication) 1155 collectPoisonGeneratingInstrsInBackwardSlice( 1156 cast<VPRecipeBase>(AddrDef)); 1157 } 1158 } 1159 } 1160 } 1161 } 1162 1163 void InnerLoopVectorizer::addMetadata(Instruction *To, 1164 Instruction *From) { 1165 propagateMetadata(To, From); 1166 addNewMetadata(To, From); 1167 } 1168 1169 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1170 Instruction *From) { 1171 for (Value *V : To) { 1172 if (Instruction *I = dyn_cast<Instruction>(V)) 1173 addMetadata(I, From); 1174 } 1175 } 1176 1177 PHINode *InnerLoopVectorizer::getReductionResumeValue( 1178 const RecurrenceDescriptor &RdxDesc) { 1179 auto It = ReductionResumeValues.find(&RdxDesc); 1180 assert(It != ReductionResumeValues.end() && 1181 "Expected to find a resume value for the reduction."); 1182 return It->second; 1183 } 1184 1185 namespace llvm { 1186 1187 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1188 // lowered. 1189 enum ScalarEpilogueLowering { 1190 1191 // The default: allowing scalar epilogues. 1192 CM_ScalarEpilogueAllowed, 1193 1194 // Vectorization with OptForSize: don't allow epilogues. 1195 CM_ScalarEpilogueNotAllowedOptSize, 1196 1197 // A special case of vectorisation with OptForSize: loops with a very small 1198 // trip count are considered for vectorization under OptForSize, thereby 1199 // making sure the cost of their loop body is dominant, free of runtime 1200 // guards and scalar iteration overheads. 1201 CM_ScalarEpilogueNotAllowedLowTripLoop, 1202 1203 // Loop hint predicate indicating an epilogue is undesired. 1204 CM_ScalarEpilogueNotNeededUsePredicate, 1205 1206 // Directive indicating we must either tail fold or not vectorize 1207 CM_ScalarEpilogueNotAllowedUsePredicate 1208 }; 1209 1210 /// ElementCountComparator creates a total ordering for ElementCount 1211 /// for the purposes of using it in a set structure. 1212 struct ElementCountComparator { 1213 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1214 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1215 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1216 } 1217 }; 1218 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1219 1220 /// LoopVectorizationCostModel - estimates the expected speedups due to 1221 /// vectorization. 1222 /// In many cases vectorization is not profitable. This can happen because of 1223 /// a number of reasons. In this class we mainly attempt to predict the 1224 /// expected speedup/slowdowns due to the supported instruction set. We use the 1225 /// TargetTransformInfo to query the different backends for the cost of 1226 /// different operations. 1227 class LoopVectorizationCostModel { 1228 public: 1229 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1230 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1231 LoopVectorizationLegality *Legal, 1232 const TargetTransformInfo &TTI, 1233 const TargetLibraryInfo *TLI, DemandedBits *DB, 1234 AssumptionCache *AC, 1235 OptimizationRemarkEmitter *ORE, const Function *F, 1236 const LoopVectorizeHints *Hints, 1237 InterleavedAccessInfo &IAI) 1238 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1239 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1240 Hints(Hints), InterleaveInfo(IAI) {} 1241 1242 /// \return An upper bound for the vectorization factors (both fixed and 1243 /// scalable). If the factors are 0, vectorization and interleaving should be 1244 /// avoided up front. 1245 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1246 1247 /// \return True if runtime checks are required for vectorization, and false 1248 /// otherwise. 1249 bool runtimeChecksRequired(); 1250 1251 /// \return The most profitable vectorization factor and the cost of that VF. 1252 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1253 /// then this vectorization factor will be selected if vectorization is 1254 /// possible. 1255 VectorizationFactor 1256 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1257 1258 VectorizationFactor 1259 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1260 const LoopVectorizationPlanner &LVP); 1261 1262 /// Setup cost-based decisions for user vectorization factor. 1263 /// \return true if the UserVF is a feasible VF to be chosen. 1264 bool selectUserVectorizationFactor(ElementCount UserVF) { 1265 collectUniformsAndScalars(UserVF); 1266 collectInstsToScalarize(UserVF); 1267 return expectedCost(UserVF).first.isValid(); 1268 } 1269 1270 /// \return The size (in bits) of the smallest and widest types in the code 1271 /// that needs to be vectorized. We ignore values that remain scalar such as 1272 /// 64 bit loop indices. 1273 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1274 1275 /// \return The desired interleave count. 1276 /// If interleave count has been specified by metadata it will be returned. 1277 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1278 /// are the selected vectorization factor and the cost of the selected VF. 1279 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1280 1281 /// Memory access instruction may be vectorized in more than one way. 1282 /// Form of instruction after vectorization depends on cost. 1283 /// This function takes cost-based decisions for Load/Store instructions 1284 /// and collects them in a map. This decisions map is used for building 1285 /// the lists of loop-uniform and loop-scalar instructions. 1286 /// The calculated cost is saved with widening decision in order to 1287 /// avoid redundant calculations. 1288 void setCostBasedWideningDecision(ElementCount VF); 1289 1290 /// A struct that represents some properties of the register usage 1291 /// of a loop. 1292 struct RegisterUsage { 1293 /// Holds the number of loop invariant values that are used in the loop. 1294 /// The key is ClassID of target-provided register class. 1295 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1296 /// Holds the maximum number of concurrent live intervals in the loop. 1297 /// The key is ClassID of target-provided register class. 1298 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1299 }; 1300 1301 /// \return Returns information about the register usages of the loop for the 1302 /// given vectorization factors. 1303 SmallVector<RegisterUsage, 8> 1304 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1305 1306 /// Collect values we want to ignore in the cost model. 1307 void collectValuesToIgnore(); 1308 1309 /// Collect all element types in the loop for which widening is needed. 1310 void collectElementTypesForWidening(); 1311 1312 /// Split reductions into those that happen in the loop, and those that happen 1313 /// outside. In loop reductions are collected into InLoopReductionChains. 1314 void collectInLoopReductions(); 1315 1316 /// Returns true if we should use strict in-order reductions for the given 1317 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1318 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1319 /// of FP operations. 1320 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1321 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1322 } 1323 1324 /// \returns The smallest bitwidth each instruction can be represented with. 1325 /// The vector equivalents of these instructions should be truncated to this 1326 /// type. 1327 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1328 return MinBWs; 1329 } 1330 1331 /// \returns True if it is more profitable to scalarize instruction \p I for 1332 /// vectorization factor \p VF. 1333 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1334 assert(VF.isVector() && 1335 "Profitable to scalarize relevant only for VF > 1."); 1336 1337 // Cost model is not run in the VPlan-native path - return conservative 1338 // result until this changes. 1339 if (EnableVPlanNativePath) 1340 return false; 1341 1342 auto Scalars = InstsToScalarize.find(VF); 1343 assert(Scalars != InstsToScalarize.end() && 1344 "VF not yet analyzed for scalarization profitability"); 1345 return Scalars->second.find(I) != Scalars->second.end(); 1346 } 1347 1348 /// Returns true if \p I is known to be uniform after vectorization. 1349 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1350 if (VF.isScalar()) 1351 return true; 1352 1353 // Cost model is not run in the VPlan-native path - return conservative 1354 // result until this changes. 1355 if (EnableVPlanNativePath) 1356 return false; 1357 1358 auto UniformsPerVF = Uniforms.find(VF); 1359 assert(UniformsPerVF != Uniforms.end() && 1360 "VF not yet analyzed for uniformity"); 1361 return UniformsPerVF->second.count(I); 1362 } 1363 1364 /// Returns true if \p I is known to be scalar after vectorization. 1365 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1366 if (VF.isScalar()) 1367 return true; 1368 1369 // Cost model is not run in the VPlan-native path - return conservative 1370 // result until this changes. 1371 if (EnableVPlanNativePath) 1372 return false; 1373 1374 auto ScalarsPerVF = Scalars.find(VF); 1375 assert(ScalarsPerVF != Scalars.end() && 1376 "Scalar values are not calculated for VF"); 1377 return ScalarsPerVF->second.count(I); 1378 } 1379 1380 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1381 /// for vectorization factor \p VF. 1382 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1383 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1384 !isProfitableToScalarize(I, VF) && 1385 !isScalarAfterVectorization(I, VF); 1386 } 1387 1388 /// Decision that was taken during cost calculation for memory instruction. 1389 enum InstWidening { 1390 CM_Unknown, 1391 CM_Widen, // For consecutive accesses with stride +1. 1392 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1393 CM_Interleave, 1394 CM_GatherScatter, 1395 CM_Scalarize 1396 }; 1397 1398 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1399 /// instruction \p I and vector width \p VF. 1400 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1401 InstructionCost Cost) { 1402 assert(VF.isVector() && "Expected VF >=2"); 1403 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1404 } 1405 1406 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1407 /// interleaving group \p Grp and vector width \p VF. 1408 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1409 ElementCount VF, InstWidening W, 1410 InstructionCost Cost) { 1411 assert(VF.isVector() && "Expected VF >=2"); 1412 /// Broadcast this decicion to all instructions inside the group. 1413 /// But the cost will be assigned to one instruction only. 1414 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1415 if (auto *I = Grp->getMember(i)) { 1416 if (Grp->getInsertPos() == I) 1417 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1418 else 1419 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1420 } 1421 } 1422 } 1423 1424 /// Return the cost model decision for the given instruction \p I and vector 1425 /// width \p VF. Return CM_Unknown if this instruction did not pass 1426 /// through the cost modeling. 1427 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1428 assert(VF.isVector() && "Expected VF to be a vector VF"); 1429 // Cost model is not run in the VPlan-native path - return conservative 1430 // result until this changes. 1431 if (EnableVPlanNativePath) 1432 return CM_GatherScatter; 1433 1434 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1435 auto Itr = WideningDecisions.find(InstOnVF); 1436 if (Itr == WideningDecisions.end()) 1437 return CM_Unknown; 1438 return Itr->second.first; 1439 } 1440 1441 /// Return the vectorization cost for the given instruction \p I and vector 1442 /// width \p VF. 1443 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1444 assert(VF.isVector() && "Expected VF >=2"); 1445 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1446 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1447 "The cost is not calculated"); 1448 return WideningDecisions[InstOnVF].second; 1449 } 1450 1451 /// Return True if instruction \p I is an optimizable truncate whose operand 1452 /// is an induction variable. Such a truncate will be removed by adding a new 1453 /// induction variable with the destination type. 1454 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1455 // If the instruction is not a truncate, return false. 1456 auto *Trunc = dyn_cast<TruncInst>(I); 1457 if (!Trunc) 1458 return false; 1459 1460 // Get the source and destination types of the truncate. 1461 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1462 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1463 1464 // If the truncate is free for the given types, return false. Replacing a 1465 // free truncate with an induction variable would add an induction variable 1466 // update instruction to each iteration of the loop. We exclude from this 1467 // check the primary induction variable since it will need an update 1468 // instruction regardless. 1469 Value *Op = Trunc->getOperand(0); 1470 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1471 return false; 1472 1473 // If the truncated value is not an induction variable, return false. 1474 return Legal->isInductionPhi(Op); 1475 } 1476 1477 /// Collects the instructions to scalarize for each predicated instruction in 1478 /// the loop. 1479 void collectInstsToScalarize(ElementCount VF); 1480 1481 /// Collect Uniform and Scalar values for the given \p VF. 1482 /// The sets depend on CM decision for Load/Store instructions 1483 /// that may be vectorized as interleave, gather-scatter or scalarized. 1484 void collectUniformsAndScalars(ElementCount VF) { 1485 // Do the analysis once. 1486 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1487 return; 1488 setCostBasedWideningDecision(VF); 1489 collectLoopUniforms(VF); 1490 collectLoopScalars(VF); 1491 } 1492 1493 /// Returns true if the target machine supports masked store operation 1494 /// for the given \p DataType and kind of access to \p Ptr. 1495 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1496 return Legal->isConsecutivePtr(DataType, Ptr) && 1497 TTI.isLegalMaskedStore(DataType, Alignment); 1498 } 1499 1500 /// Returns true if the target machine supports masked load operation 1501 /// for the given \p DataType and kind of access to \p Ptr. 1502 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1503 return Legal->isConsecutivePtr(DataType, Ptr) && 1504 TTI.isLegalMaskedLoad(DataType, Alignment); 1505 } 1506 1507 /// Returns true if the target machine can represent \p V as a masked gather 1508 /// or scatter operation. 1509 bool isLegalGatherOrScatter(Value *V, 1510 ElementCount VF = ElementCount::getFixed(1)) { 1511 bool LI = isa<LoadInst>(V); 1512 bool SI = isa<StoreInst>(V); 1513 if (!LI && !SI) 1514 return false; 1515 auto *Ty = getLoadStoreType(V); 1516 Align Align = getLoadStoreAlignment(V); 1517 if (VF.isVector()) 1518 Ty = VectorType::get(Ty, VF); 1519 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1520 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1521 } 1522 1523 /// Returns true if the target machine supports all of the reduction 1524 /// variables found for the given VF. 1525 bool canVectorizeReductions(ElementCount VF) const { 1526 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1527 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1528 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1529 })); 1530 } 1531 1532 /// Returns true if \p I is an instruction that will be scalarized with 1533 /// predication when vectorizing \p I with vectorization factor \p VF. Such 1534 /// instructions include conditional stores and instructions that may divide 1535 /// by zero. 1536 bool isScalarWithPredication(Instruction *I, ElementCount VF) const; 1537 1538 // Returns true if \p I is an instruction that will be predicated either 1539 // through scalar predication or masked load/store or masked gather/scatter. 1540 // \p VF is the vectorization factor that will be used to vectorize \p I. 1541 // Superset of instructions that return true for isScalarWithPredication. 1542 bool isPredicatedInst(Instruction *I, ElementCount VF, 1543 bool IsKnownUniform = false) { 1544 // When we know the load is uniform and the original scalar loop was not 1545 // predicated we don't need to mark it as a predicated instruction. Any 1546 // vectorised blocks created when tail-folding are something artificial we 1547 // have introduced and we know there is always at least one active lane. 1548 // That's why we call Legal->blockNeedsPredication here because it doesn't 1549 // query tail-folding. 1550 if (IsKnownUniform && isa<LoadInst>(I) && 1551 !Legal->blockNeedsPredication(I->getParent())) 1552 return false; 1553 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1554 return false; 1555 // Loads and stores that need some form of masked operation are predicated 1556 // instructions. 1557 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1558 return Legal->isMaskRequired(I); 1559 return isScalarWithPredication(I, VF); 1560 } 1561 1562 /// Returns true if \p I is a memory instruction with consecutive memory 1563 /// access that can be widened. 1564 bool 1565 memoryInstructionCanBeWidened(Instruction *I, 1566 ElementCount VF = ElementCount::getFixed(1)); 1567 1568 /// Returns true if \p I is a memory instruction in an interleaved-group 1569 /// of memory accesses that can be vectorized with wide vector loads/stores 1570 /// and shuffles. 1571 bool 1572 interleavedAccessCanBeWidened(Instruction *I, 1573 ElementCount VF = ElementCount::getFixed(1)); 1574 1575 /// Check if \p Instr belongs to any interleaved access group. 1576 bool isAccessInterleaved(Instruction *Instr) { 1577 return InterleaveInfo.isInterleaved(Instr); 1578 } 1579 1580 /// Get the interleaved access group that \p Instr belongs to. 1581 const InterleaveGroup<Instruction> * 1582 getInterleavedAccessGroup(Instruction *Instr) { 1583 return InterleaveInfo.getInterleaveGroup(Instr); 1584 } 1585 1586 /// Returns true if we're required to use a scalar epilogue for at least 1587 /// the final iteration of the original loop. 1588 bool requiresScalarEpilogue(ElementCount VF) const { 1589 if (!isScalarEpilogueAllowed()) 1590 return false; 1591 // If we might exit from anywhere but the latch, must run the exiting 1592 // iteration in scalar form. 1593 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1594 return true; 1595 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1596 } 1597 1598 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1599 /// loop hint annotation. 1600 bool isScalarEpilogueAllowed() const { 1601 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1602 } 1603 1604 /// Returns true if all loop blocks should be masked to fold tail loop. 1605 bool foldTailByMasking() const { return FoldTailByMasking; } 1606 1607 /// Returns true if the instructions in this block requires predication 1608 /// for any reason, e.g. because tail folding now requires a predicate 1609 /// or because the block in the original loop was predicated. 1610 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1611 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1612 } 1613 1614 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1615 /// nodes to the chain of instructions representing the reductions. Uses a 1616 /// MapVector to ensure deterministic iteration order. 1617 using ReductionChainMap = 1618 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1619 1620 /// Return the chain of instructions representing an inloop reduction. 1621 const ReductionChainMap &getInLoopReductionChains() const { 1622 return InLoopReductionChains; 1623 } 1624 1625 /// Returns true if the Phi is part of an inloop reduction. 1626 bool isInLoopReduction(PHINode *Phi) const { 1627 return InLoopReductionChains.count(Phi); 1628 } 1629 1630 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1631 /// with factor VF. Return the cost of the instruction, including 1632 /// scalarization overhead if it's needed. 1633 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1634 1635 /// Estimate cost of a call instruction CI if it were vectorized with factor 1636 /// VF. Return the cost of the instruction, including scalarization overhead 1637 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1638 /// scalarized - 1639 /// i.e. either vector version isn't available, or is too expensive. 1640 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1641 bool &NeedToScalarize) const; 1642 1643 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1644 /// that of B. 1645 bool isMoreProfitable(const VectorizationFactor &A, 1646 const VectorizationFactor &B) const; 1647 1648 /// Invalidates decisions already taken by the cost model. 1649 void invalidateCostModelingDecisions() { 1650 WideningDecisions.clear(); 1651 Uniforms.clear(); 1652 Scalars.clear(); 1653 } 1654 1655 private: 1656 unsigned NumPredStores = 0; 1657 1658 /// Convenience function that returns the value of vscale_range iff 1659 /// vscale_range.min == vscale_range.max or otherwise returns the value 1660 /// returned by the corresponding TLI method. 1661 Optional<unsigned> getVScaleForTuning() const; 1662 1663 /// \return An upper bound for the vectorization factors for both 1664 /// fixed and scalable vectorization, where the minimum-known number of 1665 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1666 /// disabled or unsupported, then the scalable part will be equal to 1667 /// ElementCount::getScalable(0). 1668 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1669 ElementCount UserVF, 1670 bool FoldTailByMasking); 1671 1672 /// \return the maximized element count based on the targets vector 1673 /// registers and the loop trip-count, but limited to a maximum safe VF. 1674 /// This is a helper function of computeFeasibleMaxVF. 1675 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1676 /// issue that occurred on one of the buildbots which cannot be reproduced 1677 /// without having access to the properietary compiler (see comments on 1678 /// D98509). The issue is currently under investigation and this workaround 1679 /// will be removed as soon as possible. 1680 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1681 unsigned SmallestType, 1682 unsigned WidestType, 1683 const ElementCount &MaxSafeVF, 1684 bool FoldTailByMasking); 1685 1686 /// \return the maximum legal scalable VF, based on the safe max number 1687 /// of elements. 1688 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1689 1690 /// The vectorization cost is a combination of the cost itself and a boolean 1691 /// indicating whether any of the contributing operations will actually 1692 /// operate on vector values after type legalization in the backend. If this 1693 /// latter value is false, then all operations will be scalarized (i.e. no 1694 /// vectorization has actually taken place). 1695 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1696 1697 /// Returns the expected execution cost. The unit of the cost does 1698 /// not matter because we use the 'cost' units to compare different 1699 /// vector widths. The cost that is returned is *not* normalized by 1700 /// the factor width. If \p Invalid is not nullptr, this function 1701 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1702 /// each instruction that has an Invalid cost for the given VF. 1703 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1704 VectorizationCostTy 1705 expectedCost(ElementCount VF, 1706 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1707 1708 /// Returns the execution time cost of an instruction for a given vector 1709 /// width. Vector width of one means scalar. 1710 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1711 1712 /// The cost-computation logic from getInstructionCost which provides 1713 /// the vector type as an output parameter. 1714 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1715 Type *&VectorTy); 1716 1717 /// Return the cost of instructions in an inloop reduction pattern, if I is 1718 /// part of that pattern. 1719 Optional<InstructionCost> 1720 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1721 TTI::TargetCostKind CostKind); 1722 1723 /// Calculate vectorization cost of memory instruction \p I. 1724 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1725 1726 /// The cost computation for scalarized memory instruction. 1727 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1728 1729 /// The cost computation for interleaving group of memory instructions. 1730 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1731 1732 /// The cost computation for Gather/Scatter instruction. 1733 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1734 1735 /// The cost computation for widening instruction \p I with consecutive 1736 /// memory access. 1737 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1738 1739 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1740 /// Load: scalar load + broadcast. 1741 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1742 /// element) 1743 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1744 1745 /// Estimate the overhead of scalarizing an instruction. This is a 1746 /// convenience wrapper for the type-based getScalarizationOverhead API. 1747 InstructionCost getScalarizationOverhead(Instruction *I, 1748 ElementCount VF) const; 1749 1750 /// Returns whether the instruction is a load or store and will be a emitted 1751 /// as a vector operation. 1752 bool isConsecutiveLoadOrStore(Instruction *I); 1753 1754 /// Returns true if an artificially high cost for emulated masked memrefs 1755 /// should be used. 1756 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF); 1757 1758 /// Map of scalar integer values to the smallest bitwidth they can be legally 1759 /// represented as. The vector equivalents of these values should be truncated 1760 /// to this type. 1761 MapVector<Instruction *, uint64_t> MinBWs; 1762 1763 /// A type representing the costs for instructions if they were to be 1764 /// scalarized rather than vectorized. The entries are Instruction-Cost 1765 /// pairs. 1766 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1767 1768 /// A set containing all BasicBlocks that are known to present after 1769 /// vectorization as a predicated block. 1770 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1771 1772 /// Records whether it is allowed to have the original scalar loop execute at 1773 /// least once. This may be needed as a fallback loop in case runtime 1774 /// aliasing/dependence checks fail, or to handle the tail/remainder 1775 /// iterations when the trip count is unknown or doesn't divide by the VF, 1776 /// or as a peel-loop to handle gaps in interleave-groups. 1777 /// Under optsize and when the trip count is very small we don't allow any 1778 /// iterations to execute in the scalar loop. 1779 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1780 1781 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1782 bool FoldTailByMasking = false; 1783 1784 /// A map holding scalar costs for different vectorization factors. The 1785 /// presence of a cost for an instruction in the mapping indicates that the 1786 /// instruction will be scalarized when vectorizing with the associated 1787 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1788 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1789 1790 /// Holds the instructions known to be uniform after vectorization. 1791 /// The data is collected per VF. 1792 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1793 1794 /// Holds the instructions known to be scalar after vectorization. 1795 /// The data is collected per VF. 1796 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1797 1798 /// Holds the instructions (address computations) that are forced to be 1799 /// scalarized. 1800 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1801 1802 /// PHINodes of the reductions that should be expanded in-loop along with 1803 /// their associated chains of reduction operations, in program order from top 1804 /// (PHI) to bottom 1805 ReductionChainMap InLoopReductionChains; 1806 1807 /// A Map of inloop reduction operations and their immediate chain operand. 1808 /// FIXME: This can be removed once reductions can be costed correctly in 1809 /// vplan. This was added to allow quick lookup to the inloop operations, 1810 /// without having to loop through InLoopReductionChains. 1811 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1812 1813 /// Returns the expected difference in cost from scalarizing the expression 1814 /// feeding a predicated instruction \p PredInst. The instructions to 1815 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1816 /// non-negative return value implies the expression will be scalarized. 1817 /// Currently, only single-use chains are considered for scalarization. 1818 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1819 ElementCount VF); 1820 1821 /// Collect the instructions that are uniform after vectorization. An 1822 /// instruction is uniform if we represent it with a single scalar value in 1823 /// the vectorized loop corresponding to each vector iteration. Examples of 1824 /// uniform instructions include pointer operands of consecutive or 1825 /// interleaved memory accesses. Note that although uniformity implies an 1826 /// instruction will be scalar, the reverse is not true. In general, a 1827 /// scalarized instruction will be represented by VF scalar values in the 1828 /// vectorized loop, each corresponding to an iteration of the original 1829 /// scalar loop. 1830 void collectLoopUniforms(ElementCount VF); 1831 1832 /// Collect the instructions that are scalar after vectorization. An 1833 /// instruction is scalar if it is known to be uniform or will be scalarized 1834 /// during vectorization. collectLoopScalars should only add non-uniform nodes 1835 /// to the list if they are used by a load/store instruction that is marked as 1836 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by 1837 /// VF values in the vectorized loop, each corresponding to an iteration of 1838 /// the original scalar loop. 1839 void collectLoopScalars(ElementCount VF); 1840 1841 /// Keeps cost model vectorization decision and cost for instructions. 1842 /// Right now it is used for memory instructions only. 1843 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1844 std::pair<InstWidening, InstructionCost>>; 1845 1846 DecisionList WideningDecisions; 1847 1848 /// Returns true if \p V is expected to be vectorized and it needs to be 1849 /// extracted. 1850 bool needsExtract(Value *V, ElementCount VF) const { 1851 Instruction *I = dyn_cast<Instruction>(V); 1852 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1853 TheLoop->isLoopInvariant(I)) 1854 return false; 1855 1856 // Assume we can vectorize V (and hence we need extraction) if the 1857 // scalars are not computed yet. This can happen, because it is called 1858 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1859 // the scalars are collected. That should be a safe assumption in most 1860 // cases, because we check if the operands have vectorizable types 1861 // beforehand in LoopVectorizationLegality. 1862 return Scalars.find(VF) == Scalars.end() || 1863 !isScalarAfterVectorization(I, VF); 1864 }; 1865 1866 /// Returns a range containing only operands needing to be extracted. 1867 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1868 ElementCount VF) const { 1869 return SmallVector<Value *, 4>(make_filter_range( 1870 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1871 } 1872 1873 /// Determines if we have the infrastructure to vectorize loop \p L and its 1874 /// epilogue, assuming the main loop is vectorized by \p VF. 1875 bool isCandidateForEpilogueVectorization(const Loop &L, 1876 const ElementCount VF) const; 1877 1878 /// Returns true if epilogue vectorization is considered profitable, and 1879 /// false otherwise. 1880 /// \p VF is the vectorization factor chosen for the original loop. 1881 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1882 1883 public: 1884 /// The loop that we evaluate. 1885 Loop *TheLoop; 1886 1887 /// Predicated scalar evolution analysis. 1888 PredicatedScalarEvolution &PSE; 1889 1890 /// Loop Info analysis. 1891 LoopInfo *LI; 1892 1893 /// Vectorization legality. 1894 LoopVectorizationLegality *Legal; 1895 1896 /// Vector target information. 1897 const TargetTransformInfo &TTI; 1898 1899 /// Target Library Info. 1900 const TargetLibraryInfo *TLI; 1901 1902 /// Demanded bits analysis. 1903 DemandedBits *DB; 1904 1905 /// Assumption cache. 1906 AssumptionCache *AC; 1907 1908 /// Interface to emit optimization remarks. 1909 OptimizationRemarkEmitter *ORE; 1910 1911 const Function *TheFunction; 1912 1913 /// Loop Vectorize Hint. 1914 const LoopVectorizeHints *Hints; 1915 1916 /// The interleave access information contains groups of interleaved accesses 1917 /// with the same stride and close to each other. 1918 InterleavedAccessInfo &InterleaveInfo; 1919 1920 /// Values to ignore in the cost model. 1921 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1922 1923 /// Values to ignore in the cost model when VF > 1. 1924 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1925 1926 /// All element types found in the loop. 1927 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1928 1929 /// Profitable vector factors. 1930 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1931 }; 1932 } // end namespace llvm 1933 1934 /// Helper struct to manage generating runtime checks for vectorization. 1935 /// 1936 /// The runtime checks are created up-front in temporary blocks to allow better 1937 /// estimating the cost and un-linked from the existing IR. After deciding to 1938 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1939 /// temporary blocks are completely removed. 1940 class GeneratedRTChecks { 1941 /// Basic block which contains the generated SCEV checks, if any. 1942 BasicBlock *SCEVCheckBlock = nullptr; 1943 1944 /// The value representing the result of the generated SCEV checks. If it is 1945 /// nullptr, either no SCEV checks have been generated or they have been used. 1946 Value *SCEVCheckCond = nullptr; 1947 1948 /// Basic block which contains the generated memory runtime checks, if any. 1949 BasicBlock *MemCheckBlock = nullptr; 1950 1951 /// The value representing the result of the generated memory runtime checks. 1952 /// If it is nullptr, either no memory runtime checks have been generated or 1953 /// they have been used. 1954 Value *MemRuntimeCheckCond = nullptr; 1955 1956 DominatorTree *DT; 1957 LoopInfo *LI; 1958 1959 SCEVExpander SCEVExp; 1960 SCEVExpander MemCheckExp; 1961 1962 public: 1963 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1964 const DataLayout &DL) 1965 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1966 MemCheckExp(SE, DL, "scev.check") {} 1967 1968 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1969 /// accurately estimate the cost of the runtime checks. The blocks are 1970 /// un-linked from the IR and is added back during vector code generation. If 1971 /// there is no vector code generation, the check blocks are removed 1972 /// completely. 1973 void Create(Loop *L, const LoopAccessInfo &LAI, 1974 const SCEVPredicate &Pred) { 1975 1976 BasicBlock *LoopHeader = L->getHeader(); 1977 BasicBlock *Preheader = L->getLoopPreheader(); 1978 1979 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1980 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1981 // may be used by SCEVExpander. The blocks will be un-linked from their 1982 // predecessors and removed from LI & DT at the end of the function. 1983 if (!Pred.isAlwaysTrue()) { 1984 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1985 nullptr, "vector.scevcheck"); 1986 1987 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1988 &Pred, SCEVCheckBlock->getTerminator()); 1989 } 1990 1991 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1992 if (RtPtrChecking.Need) { 1993 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1994 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1995 "vector.memcheck"); 1996 1997 MemRuntimeCheckCond = 1998 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 1999 RtPtrChecking.getChecks(), MemCheckExp); 2000 assert(MemRuntimeCheckCond && 2001 "no RT checks generated although RtPtrChecking " 2002 "claimed checks are required"); 2003 } 2004 2005 if (!MemCheckBlock && !SCEVCheckBlock) 2006 return; 2007 2008 // Unhook the temporary block with the checks, update various places 2009 // accordingly. 2010 if (SCEVCheckBlock) 2011 SCEVCheckBlock->replaceAllUsesWith(Preheader); 2012 if (MemCheckBlock) 2013 MemCheckBlock->replaceAllUsesWith(Preheader); 2014 2015 if (SCEVCheckBlock) { 2016 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2017 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 2018 Preheader->getTerminator()->eraseFromParent(); 2019 } 2020 if (MemCheckBlock) { 2021 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2022 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2023 Preheader->getTerminator()->eraseFromParent(); 2024 } 2025 2026 DT->changeImmediateDominator(LoopHeader, Preheader); 2027 if (MemCheckBlock) { 2028 DT->eraseNode(MemCheckBlock); 2029 LI->removeBlock(MemCheckBlock); 2030 } 2031 if (SCEVCheckBlock) { 2032 DT->eraseNode(SCEVCheckBlock); 2033 LI->removeBlock(SCEVCheckBlock); 2034 } 2035 } 2036 2037 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2038 /// unused. 2039 ~GeneratedRTChecks() { 2040 SCEVExpanderCleaner SCEVCleaner(SCEVExp); 2041 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp); 2042 if (!SCEVCheckCond) 2043 SCEVCleaner.markResultUsed(); 2044 2045 if (!MemRuntimeCheckCond) 2046 MemCheckCleaner.markResultUsed(); 2047 2048 if (MemRuntimeCheckCond) { 2049 auto &SE = *MemCheckExp.getSE(); 2050 // Memory runtime check generation creates compares that use expanded 2051 // values. Remove them before running the SCEVExpanderCleaners. 2052 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2053 if (MemCheckExp.isInsertedInstruction(&I)) 2054 continue; 2055 SE.forgetValue(&I); 2056 I.eraseFromParent(); 2057 } 2058 } 2059 MemCheckCleaner.cleanup(); 2060 SCEVCleaner.cleanup(); 2061 2062 if (SCEVCheckCond) 2063 SCEVCheckBlock->eraseFromParent(); 2064 if (MemRuntimeCheckCond) 2065 MemCheckBlock->eraseFromParent(); 2066 } 2067 2068 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2069 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2070 /// depending on the generated condition. 2071 BasicBlock *emitSCEVChecks(BasicBlock *Bypass, 2072 BasicBlock *LoopVectorPreHeader, 2073 BasicBlock *LoopExitBlock) { 2074 if (!SCEVCheckCond) 2075 return nullptr; 2076 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2077 if (C->isZero()) 2078 return nullptr; 2079 2080 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2081 2082 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2083 // Create new preheader for vector loop. 2084 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2085 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2086 2087 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2088 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2089 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2090 SCEVCheckBlock); 2091 2092 DT->addNewBlock(SCEVCheckBlock, Pred); 2093 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2094 2095 ReplaceInstWithInst( 2096 SCEVCheckBlock->getTerminator(), 2097 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2098 // Mark the check as used, to prevent it from being removed during cleanup. 2099 SCEVCheckCond = nullptr; 2100 return SCEVCheckBlock; 2101 } 2102 2103 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2104 /// the branches to branch to the vector preheader or \p Bypass, depending on 2105 /// the generated condition. 2106 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass, 2107 BasicBlock *LoopVectorPreHeader) { 2108 // Check if we generated code that checks in runtime if arrays overlap. 2109 if (!MemRuntimeCheckCond) 2110 return nullptr; 2111 2112 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2113 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2114 MemCheckBlock); 2115 2116 DT->addNewBlock(MemCheckBlock, Pred); 2117 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2118 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2119 2120 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2121 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2122 2123 ReplaceInstWithInst( 2124 MemCheckBlock->getTerminator(), 2125 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2126 MemCheckBlock->getTerminator()->setDebugLoc( 2127 Pred->getTerminator()->getDebugLoc()); 2128 2129 // Mark the check as used, to prevent it from being removed during cleanup. 2130 MemRuntimeCheckCond = nullptr; 2131 return MemCheckBlock; 2132 } 2133 }; 2134 2135 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2136 // vectorization. The loop needs to be annotated with #pragma omp simd 2137 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2138 // vector length information is not provided, vectorization is not considered 2139 // explicit. Interleave hints are not allowed either. These limitations will be 2140 // relaxed in the future. 2141 // Please, note that we are currently forced to abuse the pragma 'clang 2142 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2143 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2144 // provides *explicit vectorization hints* (LV can bypass legal checks and 2145 // assume that vectorization is legal). However, both hints are implemented 2146 // using the same metadata (llvm.loop.vectorize, processed by 2147 // LoopVectorizeHints). This will be fixed in the future when the native IR 2148 // representation for pragma 'omp simd' is introduced. 2149 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2150 OptimizationRemarkEmitter *ORE) { 2151 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2152 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2153 2154 // Only outer loops with an explicit vectorization hint are supported. 2155 // Unannotated outer loops are ignored. 2156 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2157 return false; 2158 2159 Function *Fn = OuterLp->getHeader()->getParent(); 2160 if (!Hints.allowVectorization(Fn, OuterLp, 2161 true /*VectorizeOnlyWhenForced*/)) { 2162 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2163 return false; 2164 } 2165 2166 if (Hints.getInterleave() > 1) { 2167 // TODO: Interleave support is future work. 2168 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2169 "outer loops.\n"); 2170 Hints.emitRemarkWithHints(); 2171 return false; 2172 } 2173 2174 return true; 2175 } 2176 2177 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2178 OptimizationRemarkEmitter *ORE, 2179 SmallVectorImpl<Loop *> &V) { 2180 // Collect inner loops and outer loops without irreducible control flow. For 2181 // now, only collect outer loops that have explicit vectorization hints. If we 2182 // are stress testing the VPlan H-CFG construction, we collect the outermost 2183 // loop of every loop nest. 2184 if (L.isInnermost() || VPlanBuildStressTest || 2185 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2186 LoopBlocksRPO RPOT(&L); 2187 RPOT.perform(LI); 2188 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2189 V.push_back(&L); 2190 // TODO: Collect inner loops inside marked outer loops in case 2191 // vectorization fails for the outer loop. Do not invoke 2192 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2193 // already known to be reducible. We can use an inherited attribute for 2194 // that. 2195 return; 2196 } 2197 } 2198 for (Loop *InnerL : L) 2199 collectSupportedLoops(*InnerL, LI, ORE, V); 2200 } 2201 2202 namespace { 2203 2204 /// The LoopVectorize Pass. 2205 struct LoopVectorize : public FunctionPass { 2206 /// Pass identification, replacement for typeid 2207 static char ID; 2208 2209 LoopVectorizePass Impl; 2210 2211 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2212 bool VectorizeOnlyWhenForced = false) 2213 : FunctionPass(ID), 2214 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2215 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2216 } 2217 2218 bool runOnFunction(Function &F) override { 2219 if (skipFunction(F)) 2220 return false; 2221 2222 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2223 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2224 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2225 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2226 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2227 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2228 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2229 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2230 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2231 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2232 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2233 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2234 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2235 2236 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2237 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2238 2239 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2240 GetLAA, *ORE, PSI).MadeAnyChange; 2241 } 2242 2243 void getAnalysisUsage(AnalysisUsage &AU) const override { 2244 AU.addRequired<AssumptionCacheTracker>(); 2245 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2246 AU.addRequired<DominatorTreeWrapperPass>(); 2247 AU.addRequired<LoopInfoWrapperPass>(); 2248 AU.addRequired<ScalarEvolutionWrapperPass>(); 2249 AU.addRequired<TargetTransformInfoWrapperPass>(); 2250 AU.addRequired<AAResultsWrapperPass>(); 2251 AU.addRequired<LoopAccessLegacyAnalysis>(); 2252 AU.addRequired<DemandedBitsWrapperPass>(); 2253 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2254 AU.addRequired<InjectTLIMappingsLegacy>(); 2255 2256 // We currently do not preserve loopinfo/dominator analyses with outer loop 2257 // vectorization. Until this is addressed, mark these analyses as preserved 2258 // only for non-VPlan-native path. 2259 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2260 if (!EnableVPlanNativePath) { 2261 AU.addPreserved<LoopInfoWrapperPass>(); 2262 AU.addPreserved<DominatorTreeWrapperPass>(); 2263 } 2264 2265 AU.addPreserved<BasicAAWrapperPass>(); 2266 AU.addPreserved<GlobalsAAWrapperPass>(); 2267 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2268 } 2269 }; 2270 2271 } // end anonymous namespace 2272 2273 //===----------------------------------------------------------------------===// 2274 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2275 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2276 //===----------------------------------------------------------------------===// 2277 2278 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2279 // We need to place the broadcast of invariant variables outside the loop, 2280 // but only if it's proven safe to do so. Else, broadcast will be inside 2281 // vector loop body. 2282 Instruction *Instr = dyn_cast<Instruction>(V); 2283 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2284 (!Instr || 2285 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2286 // Place the code for broadcasting invariant variables in the new preheader. 2287 IRBuilder<>::InsertPointGuard Guard(Builder); 2288 if (SafeToHoist) 2289 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2290 2291 // Broadcast the scalar into all locations in the vector. 2292 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2293 2294 return Shuf; 2295 } 2296 2297 /// This function adds 2298 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 2299 /// to each vector element of Val. The sequence starts at StartIndex. 2300 /// \p Opcode is relevant for FP induction variable. 2301 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step, 2302 Instruction::BinaryOps BinOp, ElementCount VF, 2303 IRBuilderBase &Builder) { 2304 assert(VF.isVector() && "only vector VFs are supported"); 2305 2306 // Create and check the types. 2307 auto *ValVTy = cast<VectorType>(Val->getType()); 2308 ElementCount VLen = ValVTy->getElementCount(); 2309 2310 Type *STy = Val->getType()->getScalarType(); 2311 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2312 "Induction Step must be an integer or FP"); 2313 assert(Step->getType() == STy && "Step has wrong type"); 2314 2315 SmallVector<Constant *, 8> Indices; 2316 2317 // Create a vector of consecutive numbers from zero to VF. 2318 VectorType *InitVecValVTy = ValVTy; 2319 if (STy->isFloatingPointTy()) { 2320 Type *InitVecValSTy = 2321 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2322 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2323 } 2324 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2325 2326 // Splat the StartIdx 2327 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2328 2329 if (STy->isIntegerTy()) { 2330 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2331 Step = Builder.CreateVectorSplat(VLen, Step); 2332 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2333 // FIXME: The newly created binary instructions should contain nsw/nuw 2334 // flags, which can be found from the original scalar operations. 2335 Step = Builder.CreateMul(InitVec, Step); 2336 return Builder.CreateAdd(Val, Step, "induction"); 2337 } 2338 2339 // Floating point induction. 2340 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2341 "Binary Opcode should be specified for FP induction"); 2342 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2343 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2344 2345 Step = Builder.CreateVectorSplat(VLen, Step); 2346 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2347 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2348 } 2349 2350 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 2351 /// variable on which to base the steps, \p Step is the size of the step. 2352 static void buildScalarSteps(Value *ScalarIV, Value *Step, 2353 const InductionDescriptor &ID, VPValue *Def, 2354 VPTransformState &State) { 2355 IRBuilderBase &Builder = State.Builder; 2356 // We shouldn't have to build scalar steps if we aren't vectorizing. 2357 assert(State.VF.isVector() && "VF should be greater than one"); 2358 // Get the value type and ensure it and the step have the same integer type. 2359 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2360 assert(ScalarIVTy == Step->getType() && 2361 "Val and Step should have the same type"); 2362 2363 // We build scalar steps for both integer and floating-point induction 2364 // variables. Here, we determine the kind of arithmetic we will perform. 2365 Instruction::BinaryOps AddOp; 2366 Instruction::BinaryOps MulOp; 2367 if (ScalarIVTy->isIntegerTy()) { 2368 AddOp = Instruction::Add; 2369 MulOp = Instruction::Mul; 2370 } else { 2371 AddOp = ID.getInductionOpcode(); 2372 MulOp = Instruction::FMul; 2373 } 2374 2375 // Determine the number of scalars we need to generate for each unroll 2376 // iteration. 2377 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def); 2378 unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue(); 2379 // Compute the scalar steps and save the results in State. 2380 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2381 ScalarIVTy->getScalarSizeInBits()); 2382 Type *VecIVTy = nullptr; 2383 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2384 if (!FirstLaneOnly && State.VF.isScalable()) { 2385 VecIVTy = VectorType::get(ScalarIVTy, State.VF); 2386 UnitStepVec = 2387 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF)); 2388 SplatStep = Builder.CreateVectorSplat(State.VF, Step); 2389 SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV); 2390 } 2391 2392 for (unsigned Part = 0; Part < State.UF; ++Part) { 2393 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part); 2394 2395 if (!FirstLaneOnly && State.VF.isScalable()) { 2396 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0); 2397 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2398 if (ScalarIVTy->isFloatingPointTy()) 2399 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2400 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2401 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2402 State.set(Def, Add, Part); 2403 // It's useful to record the lane values too for the known minimum number 2404 // of elements so we do those below. This improves the code quality when 2405 // trying to extract the first element, for example. 2406 } 2407 2408 if (ScalarIVTy->isFloatingPointTy()) 2409 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2410 2411 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2412 Value *StartIdx = Builder.CreateBinOp( 2413 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2414 // The step returned by `createStepForVF` is a runtime-evaluated value 2415 // when VF is scalable. Otherwise, it should be folded into a Constant. 2416 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) && 2417 "Expected StartIdx to be folded to a constant when VF is not " 2418 "scalable"); 2419 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2420 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2421 State.set(Def, Add, VPIteration(Part, Lane)); 2422 } 2423 } 2424 } 2425 2426 // Generate code for the induction step. Note that induction steps are 2427 // required to be loop-invariant 2428 static Value *CreateStepValue(const SCEV *Step, ScalarEvolution &SE, 2429 Instruction *InsertBefore, 2430 Loop *OrigLoop = nullptr) { 2431 const DataLayout &DL = SE.getDataLayout(); 2432 assert((!OrigLoop || SE.isLoopInvariant(Step, OrigLoop)) && 2433 "Induction step should be loop invariant"); 2434 if (auto *E = dyn_cast<SCEVUnknown>(Step)) 2435 return E->getValue(); 2436 2437 SCEVExpander Exp(SE, DL, "induction"); 2438 return Exp.expandCodeFor(Step, Step->getType(), InsertBefore); 2439 } 2440 2441 /// Compute the transformed value of Index at offset StartValue using step 2442 /// StepValue. 2443 /// For integer induction, returns StartValue + Index * StepValue. 2444 /// For pointer induction, returns StartValue[Index * StepValue]. 2445 /// FIXME: The newly created binary instructions should contain nsw/nuw 2446 /// flags, which can be found from the original scalar operations. 2447 static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index, 2448 Value *StartValue, Value *Step, 2449 const InductionDescriptor &ID) { 2450 assert(Index->getType()->getScalarType() == Step->getType() && 2451 "Index scalar type does not match StepValue type"); 2452 2453 // Note: the IR at this point is broken. We cannot use SE to create any new 2454 // SCEV and then expand it, hoping that SCEV's simplification will give us 2455 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2456 // lead to various SCEV crashes. So all we can do is to use builder and rely 2457 // on InstCombine for future simplifications. Here we handle some trivial 2458 // cases only. 2459 auto CreateAdd = [&B](Value *X, Value *Y) { 2460 assert(X->getType() == Y->getType() && "Types don't match!"); 2461 if (auto *CX = dyn_cast<ConstantInt>(X)) 2462 if (CX->isZero()) 2463 return Y; 2464 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2465 if (CY->isZero()) 2466 return X; 2467 return B.CreateAdd(X, Y); 2468 }; 2469 2470 // We allow X to be a vector type, in which case Y will potentially be 2471 // splatted into a vector with the same element count. 2472 auto CreateMul = [&B](Value *X, Value *Y) { 2473 assert(X->getType()->getScalarType() == Y->getType() && 2474 "Types don't match!"); 2475 if (auto *CX = dyn_cast<ConstantInt>(X)) 2476 if (CX->isOne()) 2477 return Y; 2478 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2479 if (CY->isOne()) 2480 return X; 2481 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 2482 if (XVTy && !isa<VectorType>(Y->getType())) 2483 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 2484 return B.CreateMul(X, Y); 2485 }; 2486 2487 switch (ID.getKind()) { 2488 case InductionDescriptor::IK_IntInduction: { 2489 assert(!isa<VectorType>(Index->getType()) && 2490 "Vector indices not supported for integer inductions yet"); 2491 assert(Index->getType() == StartValue->getType() && 2492 "Index type does not match StartValue type"); 2493 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne()) 2494 return B.CreateSub(StartValue, Index); 2495 auto *Offset = CreateMul(Index, Step); 2496 return CreateAdd(StartValue, Offset); 2497 } 2498 case InductionDescriptor::IK_PtrInduction: { 2499 assert(isa<Constant>(Step) && 2500 "Expected constant step for pointer induction"); 2501 return B.CreateGEP(ID.getElementType(), StartValue, CreateMul(Index, Step)); 2502 } 2503 case InductionDescriptor::IK_FpInduction: { 2504 assert(!isa<VectorType>(Index->getType()) && 2505 "Vector indices not supported for FP inductions yet"); 2506 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2507 auto InductionBinOp = ID.getInductionBinOp(); 2508 assert(InductionBinOp && 2509 (InductionBinOp->getOpcode() == Instruction::FAdd || 2510 InductionBinOp->getOpcode() == Instruction::FSub) && 2511 "Original bin op should be defined for FP induction"); 2512 2513 Value *MulExp = B.CreateFMul(Step, Index); 2514 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2515 "induction"); 2516 } 2517 case InductionDescriptor::IK_NoInduction: 2518 return nullptr; 2519 } 2520 llvm_unreachable("invalid enum"); 2521 } 2522 2523 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2524 const VPIteration &Instance, 2525 VPTransformState &State) { 2526 Value *ScalarInst = State.get(Def, Instance); 2527 Value *VectorValue = State.get(Def, Instance.Part); 2528 VectorValue = Builder.CreateInsertElement( 2529 VectorValue, ScalarInst, 2530 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2531 State.set(Def, VectorValue, Instance.Part); 2532 } 2533 2534 // Return whether we allow using masked interleave-groups (for dealing with 2535 // strided loads/stores that reside in predicated blocks, or for dealing 2536 // with gaps). 2537 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2538 // If an override option has been passed in for interleaved accesses, use it. 2539 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2540 return EnableMaskedInterleavedMemAccesses; 2541 2542 return TTI.enableMaskedInterleavedAccessVectorization(); 2543 } 2544 2545 // Try to vectorize the interleave group that \p Instr belongs to. 2546 // 2547 // E.g. Translate following interleaved load group (factor = 3): 2548 // for (i = 0; i < N; i+=3) { 2549 // R = Pic[i]; // Member of index 0 2550 // G = Pic[i+1]; // Member of index 1 2551 // B = Pic[i+2]; // Member of index 2 2552 // ... // do something to R, G, B 2553 // } 2554 // To: 2555 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2556 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2557 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2558 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2559 // 2560 // Or translate following interleaved store group (factor = 3): 2561 // for (i = 0; i < N; i+=3) { 2562 // ... do something to R, G, B 2563 // Pic[i] = R; // Member of index 0 2564 // Pic[i+1] = G; // Member of index 1 2565 // Pic[i+2] = B; // Member of index 2 2566 // } 2567 // To: 2568 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2569 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2570 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2571 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2572 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2573 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2574 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2575 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2576 VPValue *BlockInMask) { 2577 Instruction *Instr = Group->getInsertPos(); 2578 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2579 2580 // Prepare for the vector type of the interleaved load/store. 2581 Type *ScalarTy = getLoadStoreType(Instr); 2582 unsigned InterleaveFactor = Group->getFactor(); 2583 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2584 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2585 2586 // Prepare for the new pointers. 2587 SmallVector<Value *, 2> AddrParts; 2588 unsigned Index = Group->getIndex(Instr); 2589 2590 // TODO: extend the masked interleaved-group support to reversed access. 2591 assert((!BlockInMask || !Group->isReverse()) && 2592 "Reversed masked interleave-group not supported."); 2593 2594 // If the group is reverse, adjust the index to refer to the last vector lane 2595 // instead of the first. We adjust the index from the first vector lane, 2596 // rather than directly getting the pointer for lane VF - 1, because the 2597 // pointer operand of the interleaved access is supposed to be uniform. For 2598 // uniform instructions, we're only required to generate a value for the 2599 // first vector lane in each unroll iteration. 2600 if (Group->isReverse()) 2601 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2602 2603 for (unsigned Part = 0; Part < UF; Part++) { 2604 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2605 setDebugLocFromInst(AddrPart); 2606 2607 // Notice current instruction could be any index. Need to adjust the address 2608 // to the member of index 0. 2609 // 2610 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2611 // b = A[i]; // Member of index 0 2612 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2613 // 2614 // E.g. A[i+1] = a; // Member of index 1 2615 // A[i] = b; // Member of index 0 2616 // A[i+2] = c; // Member of index 2 (Current instruction) 2617 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2618 2619 bool InBounds = false; 2620 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2621 InBounds = gep->isInBounds(); 2622 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2623 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2624 2625 // Cast to the vector pointer type. 2626 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2627 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2628 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2629 } 2630 2631 setDebugLocFromInst(Instr); 2632 Value *PoisonVec = PoisonValue::get(VecTy); 2633 2634 Value *MaskForGaps = nullptr; 2635 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2636 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2637 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2638 } 2639 2640 // Vectorize the interleaved load group. 2641 if (isa<LoadInst>(Instr)) { 2642 // For each unroll part, create a wide load for the group. 2643 SmallVector<Value *, 2> NewLoads; 2644 for (unsigned Part = 0; Part < UF; Part++) { 2645 Instruction *NewLoad; 2646 if (BlockInMask || MaskForGaps) { 2647 assert(useMaskedInterleavedAccesses(*TTI) && 2648 "masked interleaved groups are not allowed."); 2649 Value *GroupMask = MaskForGaps; 2650 if (BlockInMask) { 2651 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2652 Value *ShuffledMask = Builder.CreateShuffleVector( 2653 BlockInMaskPart, 2654 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2655 "interleaved.mask"); 2656 GroupMask = MaskForGaps 2657 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2658 MaskForGaps) 2659 : ShuffledMask; 2660 } 2661 NewLoad = 2662 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2663 GroupMask, PoisonVec, "wide.masked.vec"); 2664 } 2665 else 2666 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2667 Group->getAlign(), "wide.vec"); 2668 Group->addMetadata(NewLoad); 2669 NewLoads.push_back(NewLoad); 2670 } 2671 2672 // For each member in the group, shuffle out the appropriate data from the 2673 // wide loads. 2674 unsigned J = 0; 2675 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2676 Instruction *Member = Group->getMember(I); 2677 2678 // Skip the gaps in the group. 2679 if (!Member) 2680 continue; 2681 2682 auto StrideMask = 2683 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2684 for (unsigned Part = 0; Part < UF; Part++) { 2685 Value *StridedVec = Builder.CreateShuffleVector( 2686 NewLoads[Part], StrideMask, "strided.vec"); 2687 2688 // If this member has different type, cast the result type. 2689 if (Member->getType() != ScalarTy) { 2690 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2691 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2692 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2693 } 2694 2695 if (Group->isReverse()) 2696 StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse"); 2697 2698 State.set(VPDefs[J], StridedVec, Part); 2699 } 2700 ++J; 2701 } 2702 return; 2703 } 2704 2705 // The sub vector type for current instruction. 2706 auto *SubVT = VectorType::get(ScalarTy, VF); 2707 2708 // Vectorize the interleaved store group. 2709 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2710 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2711 "masked interleaved groups are not allowed."); 2712 assert((!MaskForGaps || !VF.isScalable()) && 2713 "masking gaps for scalable vectors is not yet supported."); 2714 for (unsigned Part = 0; Part < UF; Part++) { 2715 // Collect the stored vector from each member. 2716 SmallVector<Value *, 4> StoredVecs; 2717 for (unsigned i = 0; i < InterleaveFactor; i++) { 2718 assert((Group->getMember(i) || MaskForGaps) && 2719 "Fail to get a member from an interleaved store group"); 2720 Instruction *Member = Group->getMember(i); 2721 2722 // Skip the gaps in the group. 2723 if (!Member) { 2724 Value *Undef = PoisonValue::get(SubVT); 2725 StoredVecs.push_back(Undef); 2726 continue; 2727 } 2728 2729 Value *StoredVec = State.get(StoredValues[i], Part); 2730 2731 if (Group->isReverse()) 2732 StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse"); 2733 2734 // If this member has different type, cast it to a unified type. 2735 2736 if (StoredVec->getType() != SubVT) 2737 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2738 2739 StoredVecs.push_back(StoredVec); 2740 } 2741 2742 // Concatenate all vectors into a wide vector. 2743 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2744 2745 // Interleave the elements in the wide vector. 2746 Value *IVec = Builder.CreateShuffleVector( 2747 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2748 "interleaved.vec"); 2749 2750 Instruction *NewStoreInstr; 2751 if (BlockInMask || MaskForGaps) { 2752 Value *GroupMask = MaskForGaps; 2753 if (BlockInMask) { 2754 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2755 Value *ShuffledMask = Builder.CreateShuffleVector( 2756 BlockInMaskPart, 2757 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2758 "interleaved.mask"); 2759 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2760 ShuffledMask, MaskForGaps) 2761 : ShuffledMask; 2762 } 2763 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2764 Group->getAlign(), GroupMask); 2765 } else 2766 NewStoreInstr = 2767 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2768 2769 Group->addMetadata(NewStoreInstr); 2770 } 2771 } 2772 2773 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2774 VPReplicateRecipe *RepRecipe, 2775 const VPIteration &Instance, 2776 bool IfPredicateInstr, 2777 VPTransformState &State) { 2778 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2779 2780 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2781 // the first lane and part. 2782 if (isa<NoAliasScopeDeclInst>(Instr)) 2783 if (!Instance.isFirstIteration()) 2784 return; 2785 2786 setDebugLocFromInst(Instr); 2787 2788 // Does this instruction return a value ? 2789 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2790 2791 Instruction *Cloned = Instr->clone(); 2792 if (!IsVoidRetTy) 2793 Cloned->setName(Instr->getName() + ".cloned"); 2794 2795 // If the scalarized instruction contributes to the address computation of a 2796 // widen masked load/store which was in a basic block that needed predication 2797 // and is not predicated after vectorization, we can't propagate 2798 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized 2799 // instruction could feed a poison value to the base address of the widen 2800 // load/store. 2801 if (State.MayGeneratePoisonRecipes.contains(RepRecipe)) 2802 Cloned->dropPoisonGeneratingFlags(); 2803 2804 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 2805 Builder.GetInsertPoint()); 2806 // Replace the operands of the cloned instructions with their scalar 2807 // equivalents in the new loop. 2808 for (auto &I : enumerate(RepRecipe->operands())) { 2809 auto InputInstance = Instance; 2810 VPValue *Operand = I.value(); 2811 VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand); 2812 if (OperandR && OperandR->isUniform()) 2813 InputInstance.Lane = VPLane::getFirstLane(); 2814 Cloned->setOperand(I.index(), State.get(Operand, InputInstance)); 2815 } 2816 addNewMetadata(Cloned, Instr); 2817 2818 // Place the cloned scalar in the new loop. 2819 Builder.Insert(Cloned); 2820 2821 State.set(RepRecipe, Cloned, Instance); 2822 2823 // If we just cloned a new assumption, add it the assumption cache. 2824 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 2825 AC->registerAssumption(II); 2826 2827 // End if-block. 2828 if (IfPredicateInstr) 2829 PredicatedInstructions.push_back(Cloned); 2830 } 2831 2832 Value *InnerLoopVectorizer::getOrCreateTripCount(BasicBlock *InsertBlock) { 2833 if (TripCount) 2834 return TripCount; 2835 2836 assert(InsertBlock); 2837 IRBuilder<> Builder(InsertBlock->getTerminator()); 2838 // Find the loop boundaries. 2839 ScalarEvolution *SE = PSE.getSE(); 2840 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2841 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 2842 "Invalid loop count"); 2843 2844 Type *IdxTy = Legal->getWidestInductionType(); 2845 assert(IdxTy && "No type for induction"); 2846 2847 // The exit count might have the type of i64 while the phi is i32. This can 2848 // happen if we have an induction variable that is sign extended before the 2849 // compare. The only way that we get a backedge taken count is that the 2850 // induction variable was signed and as such will not overflow. In such a case 2851 // truncation is legal. 2852 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2853 IdxTy->getPrimitiveSizeInBits()) 2854 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2855 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2856 2857 // Get the total trip count from the count by adding 1. 2858 const SCEV *ExitCount = SE->getAddExpr( 2859 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2860 2861 const DataLayout &DL = InsertBlock->getModule()->getDataLayout(); 2862 2863 // Expand the trip count and place the new instructions in the preheader. 2864 // Notice that the pre-header does not change, only the loop body. 2865 SCEVExpander Exp(*SE, DL, "induction"); 2866 2867 // Count holds the overall loop count (N). 2868 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2869 InsertBlock->getTerminator()); 2870 2871 if (TripCount->getType()->isPointerTy()) 2872 TripCount = 2873 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2874 InsertBlock->getTerminator()); 2875 2876 return TripCount; 2877 } 2878 2879 Value * 2880 InnerLoopVectorizer::getOrCreateVectorTripCount(BasicBlock *InsertBlock) { 2881 if (VectorTripCount) 2882 return VectorTripCount; 2883 2884 Value *TC = getOrCreateTripCount(InsertBlock); 2885 IRBuilder<> Builder(InsertBlock->getTerminator()); 2886 2887 Type *Ty = TC->getType(); 2888 // This is where we can make the step a runtime constant. 2889 Value *Step = createStepForVF(Builder, Ty, VF, UF); 2890 2891 // If the tail is to be folded by masking, round the number of iterations N 2892 // up to a multiple of Step instead of rounding down. This is done by first 2893 // adding Step-1 and then rounding down. Note that it's ok if this addition 2894 // overflows: the vector induction variable will eventually wrap to zero given 2895 // that it starts at zero and its Step is a power of two; the loop will then 2896 // exit, with the last early-exit vector comparison also producing all-true. 2897 if (Cost->foldTailByMasking()) { 2898 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 2899 "VF*UF must be a power of 2 when folding tail by masking"); 2900 Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF); 2901 TC = Builder.CreateAdd( 2902 TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up"); 2903 } 2904 2905 // Now we need to generate the expression for the part of the loop that the 2906 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2907 // iterations are not required for correctness, or N - Step, otherwise. Step 2908 // is equal to the vectorization factor (number of SIMD elements) times the 2909 // unroll factor (number of SIMD instructions). 2910 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2911 2912 // There are cases where we *must* run at least one iteration in the remainder 2913 // loop. See the cost model for when this can happen. If the step evenly 2914 // divides the trip count, we set the remainder to be equal to the step. If 2915 // the step does not evenly divide the trip count, no adjustment is necessary 2916 // since there will already be scalar iterations. Note that the minimum 2917 // iterations check ensures that N >= Step. 2918 if (Cost->requiresScalarEpilogue(VF)) { 2919 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2920 R = Builder.CreateSelect(IsZero, Step, R); 2921 } 2922 2923 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2924 2925 return VectorTripCount; 2926 } 2927 2928 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2929 const DataLayout &DL) { 2930 // Verify that V is a vector type with same number of elements as DstVTy. 2931 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 2932 unsigned VF = DstFVTy->getNumElements(); 2933 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 2934 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2935 Type *SrcElemTy = SrcVecTy->getElementType(); 2936 Type *DstElemTy = DstFVTy->getElementType(); 2937 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2938 "Vector elements must have same size"); 2939 2940 // Do a direct cast if element types are castable. 2941 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2942 return Builder.CreateBitOrPointerCast(V, DstFVTy); 2943 } 2944 // V cannot be directly casted to desired vector type. 2945 // May happen when V is a floating point vector but DstVTy is a vector of 2946 // pointers or vice-versa. Handle this using a two-step bitcast using an 2947 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2948 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2949 "Only one type should be a pointer type"); 2950 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2951 "Only one type should be a floating point type"); 2952 Type *IntTy = 2953 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2954 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 2955 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2956 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 2957 } 2958 2959 void InnerLoopVectorizer::emitMinimumIterationCountCheck(BasicBlock *Bypass) { 2960 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 2961 // Reuse existing vector loop preheader for TC checks. 2962 // Note that new preheader block is generated for vector loop. 2963 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 2964 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 2965 2966 // Generate code to check if the loop's trip count is less than VF * UF, or 2967 // equal to it in case a scalar epilogue is required; this implies that the 2968 // vector trip count is zero. This check also covers the case where adding one 2969 // to the backedge-taken count overflowed leading to an incorrect trip count 2970 // of zero. In this case we will also jump to the scalar loop. 2971 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 2972 : ICmpInst::ICMP_ULT; 2973 2974 // If tail is to be folded, vector loop takes care of all iterations. 2975 Value *CheckMinIters = Builder.getFalse(); 2976 if (!Cost->foldTailByMasking()) { 2977 Value *Step = createStepForVF(Builder, Count->getType(), VF, UF); 2978 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 2979 } 2980 // Create new preheader for vector loop. 2981 LoopVectorPreHeader = 2982 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 2983 "vector.ph"); 2984 2985 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 2986 DT->getNode(Bypass)->getIDom()) && 2987 "TC check is expected to dominate Bypass"); 2988 2989 // Update dominator for Bypass & LoopExit (if needed). 2990 DT->changeImmediateDominator(Bypass, TCCheckBlock); 2991 if (!Cost->requiresScalarEpilogue(VF)) 2992 // If there is an epilogue which must run, there's no edge from the 2993 // middle block to exit blocks and thus no need to update the immediate 2994 // dominator of the exit blocks. 2995 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 2996 2997 ReplaceInstWithInst( 2998 TCCheckBlock->getTerminator(), 2999 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3000 LoopBypassBlocks.push_back(TCCheckBlock); 3001 } 3002 3003 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) { 3004 3005 BasicBlock *const SCEVCheckBlock = 3006 RTChecks.emitSCEVChecks(Bypass, LoopVectorPreHeader, LoopExitBlock); 3007 if (!SCEVCheckBlock) 3008 return nullptr; 3009 3010 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3011 (OptForSizeBasedOnProfile && 3012 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3013 "Cannot SCEV check stride or overflow when optimizing for size"); 3014 3015 3016 // Update dominator only if this is first RT check. 3017 if (LoopBypassBlocks.empty()) { 3018 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3019 if (!Cost->requiresScalarEpilogue(VF)) 3020 // If there is an epilogue which must run, there's no edge from the 3021 // middle block to exit blocks and thus no need to update the immediate 3022 // dominator of the exit blocks. 3023 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3024 } 3025 3026 LoopBypassBlocks.push_back(SCEVCheckBlock); 3027 AddedSafetyChecks = true; 3028 return SCEVCheckBlock; 3029 } 3030 3031 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) { 3032 // VPlan-native path does not do any analysis for runtime checks currently. 3033 if (EnableVPlanNativePath) 3034 return nullptr; 3035 3036 BasicBlock *const MemCheckBlock = 3037 RTChecks.emitMemRuntimeChecks(Bypass, LoopVectorPreHeader); 3038 3039 // Check if we generated code that checks in runtime if arrays overlap. We put 3040 // the checks into a separate block to make the more common case of few 3041 // elements faster. 3042 if (!MemCheckBlock) 3043 return nullptr; 3044 3045 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3046 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3047 "Cannot emit memory checks when optimizing for size, unless forced " 3048 "to vectorize."); 3049 ORE->emit([&]() { 3050 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3051 OrigLoop->getStartLoc(), 3052 OrigLoop->getHeader()) 3053 << "Code-size may be reduced by not forcing " 3054 "vectorization, or by source-code modifications " 3055 "eliminating the need for runtime checks " 3056 "(e.g., adding 'restrict')."; 3057 }); 3058 } 3059 3060 LoopBypassBlocks.push_back(MemCheckBlock); 3061 3062 AddedSafetyChecks = true; 3063 3064 // We currently don't use LoopVersioning for the actual loop cloning but we 3065 // still use it to add the noalias metadata. 3066 LVer = std::make_unique<LoopVersioning>( 3067 *Legal->getLAI(), 3068 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3069 DT, PSE.getSE()); 3070 LVer->prepareNoAliasMetadata(); 3071 return MemCheckBlock; 3072 } 3073 3074 void InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3075 LoopScalarBody = OrigLoop->getHeader(); 3076 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3077 assert(LoopVectorPreHeader && "Invalid loop structure"); 3078 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3079 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3080 "multiple exit loop without required epilogue?"); 3081 3082 LoopMiddleBlock = 3083 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3084 LI, nullptr, Twine(Prefix) + "middle.block"); 3085 LoopScalarPreHeader = 3086 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3087 nullptr, Twine(Prefix) + "scalar.ph"); 3088 3089 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3090 3091 // Set up the middle block terminator. Two cases: 3092 // 1) If we know that we must execute the scalar epilogue, emit an 3093 // unconditional branch. 3094 // 2) Otherwise, we must have a single unique exit block (due to how we 3095 // implement the multiple exit case). In this case, set up a conditonal 3096 // branch from the middle block to the loop scalar preheader, and the 3097 // exit block. completeLoopSkeleton will update the condition to use an 3098 // iteration check, if required to decide whether to execute the remainder. 3099 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3100 BranchInst::Create(LoopScalarPreHeader) : 3101 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3102 Builder.getTrue()); 3103 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3104 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3105 3106 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, LI, 3107 nullptr, Twine(Prefix) + "vector.body"); 3108 3109 // Update dominator for loop exit. 3110 if (!Cost->requiresScalarEpilogue(VF)) 3111 // If there is an epilogue which must run, there's no edge from the 3112 // middle block to exit blocks and thus no need to update the immediate 3113 // dominator of the exit blocks. 3114 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3115 } 3116 3117 void InnerLoopVectorizer::createInductionResumeValues( 3118 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3119 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3120 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3121 "Inconsistent information about additional bypass."); 3122 3123 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3124 assert(VectorTripCount && "Expected valid arguments"); 3125 // We are going to resume the execution of the scalar loop. 3126 // Go over all of the induction variables that we found and fix the 3127 // PHIs that are left in the scalar version of the loop. 3128 // The starting values of PHI nodes depend on the counter of the last 3129 // iteration in the vectorized loop. 3130 // If we come from a bypass edge then we need to start from the original 3131 // start value. 3132 Instruction *OldInduction = Legal->getPrimaryInduction(); 3133 for (auto &InductionEntry : Legal->getInductionVars()) { 3134 PHINode *OrigPhi = InductionEntry.first; 3135 InductionDescriptor II = InductionEntry.second; 3136 3137 // Create phi nodes to merge from the backedge-taken check block. 3138 PHINode *BCResumeVal = 3139 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3140 LoopScalarPreHeader->getTerminator()); 3141 // Copy original phi DL over to the new one. 3142 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3143 Value *&EndValue = IVEndValues[OrigPhi]; 3144 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3145 if (OrigPhi == OldInduction) { 3146 // We know what the end value is. 3147 EndValue = VectorTripCount; 3148 } else { 3149 IRBuilder<> B(LoopVectorPreHeader->getTerminator()); 3150 3151 // Fast-math-flags propagate from the original induction instruction. 3152 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3153 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3154 3155 Type *StepType = II.getStep()->getType(); 3156 Instruction::CastOps CastOp = 3157 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3158 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3159 Value *Step = 3160 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3161 EndValue = emitTransformedIndex(B, CRD, II.getStartValue(), Step, II); 3162 EndValue->setName("ind.end"); 3163 3164 // Compute the end value for the additional bypass (if applicable). 3165 if (AdditionalBypass.first) { 3166 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3167 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3168 StepType, true); 3169 Value *Step = 3170 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3171 CRD = 3172 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3173 EndValueFromAdditionalBypass = 3174 emitTransformedIndex(B, CRD, II.getStartValue(), Step, II); 3175 EndValueFromAdditionalBypass->setName("ind.end"); 3176 } 3177 } 3178 // The new PHI merges the original incoming value, in case of a bypass, 3179 // or the value at the end of the vectorized loop. 3180 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3181 3182 // Fix the scalar body counter (PHI node). 3183 // The old induction's phi node in the scalar body needs the truncated 3184 // value. 3185 for (BasicBlock *BB : LoopBypassBlocks) 3186 BCResumeVal->addIncoming(II.getStartValue(), BB); 3187 3188 if (AdditionalBypass.first) 3189 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3190 EndValueFromAdditionalBypass); 3191 3192 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3193 } 3194 } 3195 3196 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(MDNode *OrigLoopID) { 3197 // The trip counts should be cached by now. 3198 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 3199 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3200 3201 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3202 3203 // Add a check in the middle block to see if we have completed 3204 // all of the iterations in the first vector loop. Three cases: 3205 // 1) If we require a scalar epilogue, there is no conditional branch as 3206 // we unconditionally branch to the scalar preheader. Do nothing. 3207 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3208 // Thus if tail is to be folded, we know we don't need to run the 3209 // remainder and we can use the previous value for the condition (true). 3210 // 3) Otherwise, construct a runtime check. 3211 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3212 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3213 Count, VectorTripCount, "cmp.n", 3214 LoopMiddleBlock->getTerminator()); 3215 3216 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3217 // of the corresponding compare because they may have ended up with 3218 // different line numbers and we want to avoid awkward line stepping while 3219 // debugging. Eg. if the compare has got a line number inside the loop. 3220 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3221 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3222 } 3223 3224 #ifdef EXPENSIVE_CHECKS 3225 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3226 #endif 3227 3228 return LoopVectorPreHeader; 3229 } 3230 3231 std::pair<BasicBlock *, Value *> 3232 InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3233 /* 3234 In this function we generate a new loop. The new loop will contain 3235 the vectorized instructions while the old loop will continue to run the 3236 scalar remainder. 3237 3238 [ ] <-- loop iteration number check. 3239 / | 3240 / v 3241 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3242 | / | 3243 | / v 3244 || [ ] <-- vector pre header. 3245 |/ | 3246 | v 3247 | [ ] \ 3248 | [ ]_| <-- vector loop. 3249 | | 3250 | v 3251 \ -[ ] <--- middle-block. 3252 \/ | 3253 /\ v 3254 | ->[ ] <--- new preheader. 3255 | | 3256 (opt) v <-- edge from middle to exit iff epilogue is not required. 3257 | [ ] \ 3258 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3259 \ | 3260 \ v 3261 >[ ] <-- exit block(s). 3262 ... 3263 */ 3264 3265 // Get the metadata of the original loop before it gets modified. 3266 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3267 3268 // Workaround! Compute the trip count of the original loop and cache it 3269 // before we start modifying the CFG. This code has a systemic problem 3270 // wherein it tries to run analysis over partially constructed IR; this is 3271 // wrong, and not simply for SCEV. The trip count of the original loop 3272 // simply happens to be prone to hitting this in practice. In theory, we 3273 // can hit the same issue for any SCEV, or ValueTracking query done during 3274 // mutation. See PR49900. 3275 getOrCreateTripCount(OrigLoop->getLoopPreheader()); 3276 3277 // Create an empty vector loop, and prepare basic blocks for the runtime 3278 // checks. 3279 createVectorLoopSkeleton(""); 3280 3281 // Now, compare the new count to zero. If it is zero skip the vector loop and 3282 // jump to the scalar loop. This check also covers the case where the 3283 // backedge-taken count is uint##_max: adding one to it will overflow leading 3284 // to an incorrect trip count of zero. In this (rare) case we will also jump 3285 // to the scalar loop. 3286 emitMinimumIterationCountCheck(LoopScalarPreHeader); 3287 3288 // Generate the code to check any assumptions that we've made for SCEV 3289 // expressions. 3290 emitSCEVChecks(LoopScalarPreHeader); 3291 3292 // Generate the code that checks in runtime if arrays overlap. We put the 3293 // checks into a separate block to make the more common case of few elements 3294 // faster. 3295 emitMemRuntimeChecks(LoopScalarPreHeader); 3296 3297 // Emit phis for the new starting index of the scalar loop. 3298 createInductionResumeValues(); 3299 3300 return {completeLoopSkeleton(OrigLoopID), nullptr}; 3301 } 3302 3303 // Fix up external users of the induction variable. At this point, we are 3304 // in LCSSA form, with all external PHIs that use the IV having one input value, 3305 // coming from the remainder loop. We need those PHIs to also have a correct 3306 // value for the IV when arriving directly from the middle block. 3307 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3308 const InductionDescriptor &II, 3309 Value *CountRoundDown, Value *EndValue, 3310 BasicBlock *MiddleBlock, 3311 BasicBlock *VectorHeader) { 3312 // There are two kinds of external IV usages - those that use the value 3313 // computed in the last iteration (the PHI) and those that use the penultimate 3314 // value (the value that feeds into the phi from the loop latch). 3315 // We allow both, but they, obviously, have different values. 3316 3317 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3318 3319 DenseMap<Value *, Value *> MissingVals; 3320 3321 // An external user of the last iteration's value should see the value that 3322 // the remainder loop uses to initialize its own IV. 3323 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3324 for (User *U : PostInc->users()) { 3325 Instruction *UI = cast<Instruction>(U); 3326 if (!OrigLoop->contains(UI)) { 3327 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3328 MissingVals[UI] = EndValue; 3329 } 3330 } 3331 3332 // An external user of the penultimate value need to see EndValue - Step. 3333 // The simplest way to get this is to recompute it from the constituent SCEVs, 3334 // that is Start + (Step * (CRD - 1)). 3335 for (User *U : OrigPhi->users()) { 3336 auto *UI = cast<Instruction>(U); 3337 if (!OrigLoop->contains(UI)) { 3338 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3339 3340 IRBuilder<> B(MiddleBlock->getTerminator()); 3341 3342 // Fast-math-flags propagate from the original induction instruction. 3343 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3344 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3345 3346 Value *CountMinusOne = B.CreateSub( 3347 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3348 Value *CMO = 3349 !II.getStep()->getType()->isIntegerTy() 3350 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3351 II.getStep()->getType()) 3352 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3353 CMO->setName("cast.cmo"); 3354 3355 Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(), 3356 VectorHeader->getTerminator()); 3357 Value *Escape = 3358 emitTransformedIndex(B, CMO, II.getStartValue(), Step, II); 3359 Escape->setName("ind.escape"); 3360 MissingVals[UI] = Escape; 3361 } 3362 } 3363 3364 for (auto &I : MissingVals) { 3365 PHINode *PHI = cast<PHINode>(I.first); 3366 // One corner case we have to handle is two IVs "chasing" each-other, 3367 // that is %IV2 = phi [...], [ %IV1, %latch ] 3368 // In this case, if IV1 has an external use, we need to avoid adding both 3369 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3370 // don't already have an incoming value for the middle block. 3371 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3372 PHI->addIncoming(I.second, MiddleBlock); 3373 } 3374 } 3375 3376 namespace { 3377 3378 struct CSEDenseMapInfo { 3379 static bool canHandle(const Instruction *I) { 3380 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3381 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3382 } 3383 3384 static inline Instruction *getEmptyKey() { 3385 return DenseMapInfo<Instruction *>::getEmptyKey(); 3386 } 3387 3388 static inline Instruction *getTombstoneKey() { 3389 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3390 } 3391 3392 static unsigned getHashValue(const Instruction *I) { 3393 assert(canHandle(I) && "Unknown instruction!"); 3394 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3395 I->value_op_end())); 3396 } 3397 3398 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3399 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3400 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3401 return LHS == RHS; 3402 return LHS->isIdenticalTo(RHS); 3403 } 3404 }; 3405 3406 } // end anonymous namespace 3407 3408 ///Perform cse of induction variable instructions. 3409 static void cse(BasicBlock *BB) { 3410 // Perform simple cse. 3411 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3412 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3413 if (!CSEDenseMapInfo::canHandle(&In)) 3414 continue; 3415 3416 // Check if we can replace this instruction with any of the 3417 // visited instructions. 3418 if (Instruction *V = CSEMap.lookup(&In)) { 3419 In.replaceAllUsesWith(V); 3420 In.eraseFromParent(); 3421 continue; 3422 } 3423 3424 CSEMap[&In] = &In; 3425 } 3426 } 3427 3428 InstructionCost 3429 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3430 bool &NeedToScalarize) const { 3431 Function *F = CI->getCalledFunction(); 3432 Type *ScalarRetTy = CI->getType(); 3433 SmallVector<Type *, 4> Tys, ScalarTys; 3434 for (auto &ArgOp : CI->args()) 3435 ScalarTys.push_back(ArgOp->getType()); 3436 3437 // Estimate cost of scalarized vector call. The source operands are assumed 3438 // to be vectors, so we need to extract individual elements from there, 3439 // execute VF scalar calls, and then gather the result into the vector return 3440 // value. 3441 InstructionCost ScalarCallCost = 3442 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3443 if (VF.isScalar()) 3444 return ScalarCallCost; 3445 3446 // Compute corresponding vector type for return value and arguments. 3447 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3448 for (Type *ScalarTy : ScalarTys) 3449 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3450 3451 // Compute costs of unpacking argument values for the scalar calls and 3452 // packing the return values to a vector. 3453 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3454 3455 InstructionCost Cost = 3456 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3457 3458 // If we can't emit a vector call for this function, then the currently found 3459 // cost is the cost we need to return. 3460 NeedToScalarize = true; 3461 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3462 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3463 3464 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3465 return Cost; 3466 3467 // If the corresponding vector cost is cheaper, return its cost. 3468 InstructionCost VectorCallCost = 3469 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3470 if (VectorCallCost < Cost) { 3471 NeedToScalarize = false; 3472 Cost = VectorCallCost; 3473 } 3474 return Cost; 3475 } 3476 3477 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3478 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3479 return Elt; 3480 return VectorType::get(Elt, VF); 3481 } 3482 3483 InstructionCost 3484 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3485 ElementCount VF) const { 3486 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3487 assert(ID && "Expected intrinsic call!"); 3488 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3489 FastMathFlags FMF; 3490 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3491 FMF = FPMO->getFastMathFlags(); 3492 3493 SmallVector<const Value *> Arguments(CI->args()); 3494 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3495 SmallVector<Type *> ParamTys; 3496 std::transform(FTy->param_begin(), FTy->param_end(), 3497 std::back_inserter(ParamTys), 3498 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3499 3500 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3501 dyn_cast<IntrinsicInst>(CI)); 3502 return TTI.getIntrinsicInstrCost(CostAttrs, 3503 TargetTransformInfo::TCK_RecipThroughput); 3504 } 3505 3506 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3507 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3508 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3509 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3510 } 3511 3512 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3513 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3514 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3515 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3516 } 3517 3518 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3519 // For every instruction `I` in MinBWs, truncate the operands, create a 3520 // truncated version of `I` and reextend its result. InstCombine runs 3521 // later and will remove any ext/trunc pairs. 3522 SmallPtrSet<Value *, 4> Erased; 3523 for (const auto &KV : Cost->getMinimalBitwidths()) { 3524 // If the value wasn't vectorized, we must maintain the original scalar 3525 // type. The absence of the value from State indicates that it 3526 // wasn't vectorized. 3527 // FIXME: Should not rely on getVPValue at this point. 3528 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3529 if (!State.hasAnyVectorValue(Def)) 3530 continue; 3531 for (unsigned Part = 0; Part < UF; ++Part) { 3532 Value *I = State.get(Def, Part); 3533 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3534 continue; 3535 Type *OriginalTy = I->getType(); 3536 Type *ScalarTruncatedTy = 3537 IntegerType::get(OriginalTy->getContext(), KV.second); 3538 auto *TruncatedTy = VectorType::get( 3539 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3540 if (TruncatedTy == OriginalTy) 3541 continue; 3542 3543 IRBuilder<> B(cast<Instruction>(I)); 3544 auto ShrinkOperand = [&](Value *V) -> Value * { 3545 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3546 if (ZI->getSrcTy() == TruncatedTy) 3547 return ZI->getOperand(0); 3548 return B.CreateZExtOrTrunc(V, TruncatedTy); 3549 }; 3550 3551 // The actual instruction modification depends on the instruction type, 3552 // unfortunately. 3553 Value *NewI = nullptr; 3554 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3555 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3556 ShrinkOperand(BO->getOperand(1))); 3557 3558 // Any wrapping introduced by shrinking this operation shouldn't be 3559 // considered undefined behavior. So, we can't unconditionally copy 3560 // arithmetic wrapping flags to NewI. 3561 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3562 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3563 NewI = 3564 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3565 ShrinkOperand(CI->getOperand(1))); 3566 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3567 NewI = B.CreateSelect(SI->getCondition(), 3568 ShrinkOperand(SI->getTrueValue()), 3569 ShrinkOperand(SI->getFalseValue())); 3570 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3571 switch (CI->getOpcode()) { 3572 default: 3573 llvm_unreachable("Unhandled cast!"); 3574 case Instruction::Trunc: 3575 NewI = ShrinkOperand(CI->getOperand(0)); 3576 break; 3577 case Instruction::SExt: 3578 NewI = B.CreateSExtOrTrunc( 3579 CI->getOperand(0), 3580 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3581 break; 3582 case Instruction::ZExt: 3583 NewI = B.CreateZExtOrTrunc( 3584 CI->getOperand(0), 3585 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3586 break; 3587 } 3588 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3589 auto Elements0 = 3590 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 3591 auto *O0 = B.CreateZExtOrTrunc( 3592 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3593 auto Elements1 = 3594 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 3595 auto *O1 = B.CreateZExtOrTrunc( 3596 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3597 3598 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3599 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3600 // Don't do anything with the operands, just extend the result. 3601 continue; 3602 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3603 auto Elements = 3604 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 3605 auto *O0 = B.CreateZExtOrTrunc( 3606 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3607 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3608 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3609 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3610 auto Elements = 3611 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 3612 auto *O0 = B.CreateZExtOrTrunc( 3613 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3614 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3615 } else { 3616 // If we don't know what to do, be conservative and don't do anything. 3617 continue; 3618 } 3619 3620 // Lastly, extend the result. 3621 NewI->takeName(cast<Instruction>(I)); 3622 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3623 I->replaceAllUsesWith(Res); 3624 cast<Instruction>(I)->eraseFromParent(); 3625 Erased.insert(I); 3626 State.reset(Def, Res, Part); 3627 } 3628 } 3629 3630 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3631 for (const auto &KV : Cost->getMinimalBitwidths()) { 3632 // If the value wasn't vectorized, we must maintain the original scalar 3633 // type. The absence of the value from State indicates that it 3634 // wasn't vectorized. 3635 // FIXME: Should not rely on getVPValue at this point. 3636 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3637 if (!State.hasAnyVectorValue(Def)) 3638 continue; 3639 for (unsigned Part = 0; Part < UF; ++Part) { 3640 Value *I = State.get(Def, Part); 3641 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3642 if (Inst && Inst->use_empty()) { 3643 Value *NewI = Inst->getOperand(0); 3644 Inst->eraseFromParent(); 3645 State.reset(Def, NewI, Part); 3646 } 3647 } 3648 } 3649 } 3650 3651 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 3652 // Insert truncates and extends for any truncated instructions as hints to 3653 // InstCombine. 3654 if (VF.isVector()) 3655 truncateToMinimalBitwidths(State); 3656 3657 // Fix widened non-induction PHIs by setting up the PHI operands. 3658 if (OrigPHIsToFix.size()) { 3659 assert(EnableVPlanNativePath && 3660 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3661 fixNonInductionPHIs(State); 3662 } 3663 3664 // At this point every instruction in the original loop is widened to a 3665 // vector form. Now we need to fix the recurrences in the loop. These PHI 3666 // nodes are currently empty because we did not want to introduce cycles. 3667 // This is the second stage of vectorizing recurrences. 3668 fixCrossIterationPHIs(State); 3669 3670 // Forget the original basic block. 3671 PSE.getSE()->forgetLoop(OrigLoop); 3672 3673 Loop *VectorLoop = LI->getLoopFor(State.CFG.PrevBB); 3674 // If we inserted an edge from the middle block to the unique exit block, 3675 // update uses outside the loop (phis) to account for the newly inserted 3676 // edge. 3677 if (!Cost->requiresScalarEpilogue(VF)) { 3678 // Fix-up external users of the induction variables. 3679 for (auto &Entry : Legal->getInductionVars()) 3680 fixupIVUsers(Entry.first, Entry.second, 3681 getOrCreateVectorTripCount(VectorLoop->getLoopPreheader()), 3682 IVEndValues[Entry.first], LoopMiddleBlock, 3683 VectorLoop->getHeader()); 3684 3685 fixLCSSAPHIs(State); 3686 } 3687 3688 for (Instruction *PI : PredicatedInstructions) 3689 sinkScalarOperands(&*PI); 3690 3691 // Remove redundant induction instructions. 3692 cse(VectorLoop->getHeader()); 3693 3694 // Set/update profile weights for the vector and remainder loops as original 3695 // loop iterations are now distributed among them. Note that original loop 3696 // represented by LoopScalarBody becomes remainder loop after vectorization. 3697 // 3698 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3699 // end up getting slightly roughened result but that should be OK since 3700 // profile is not inherently precise anyway. Note also possible bypass of 3701 // vector code caused by legality checks is ignored, assigning all the weight 3702 // to the vector loop, optimistically. 3703 // 3704 // For scalable vectorization we can't know at compile time how many iterations 3705 // of the loop are handled in one vector iteration, so instead assume a pessimistic 3706 // vscale of '1'. 3707 setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), VectorLoop, 3708 LI->getLoopFor(LoopScalarBody), 3709 VF.getKnownMinValue() * UF); 3710 } 3711 3712 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 3713 // In order to support recurrences we need to be able to vectorize Phi nodes. 3714 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3715 // stage #2: We now need to fix the recurrences by adding incoming edges to 3716 // the currently empty PHI nodes. At this point every instruction in the 3717 // original loop is widened to a vector form so we can use them to construct 3718 // the incoming edges. 3719 VPBasicBlock *Header = 3720 State.Plan->getVectorLoopRegion()->getEntryBasicBlock(); 3721 for (VPRecipeBase &R : Header->phis()) { 3722 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 3723 fixReduction(ReductionPhi, State); 3724 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 3725 fixFirstOrderRecurrence(FOR, State); 3726 } 3727 } 3728 3729 void InnerLoopVectorizer::fixFirstOrderRecurrence( 3730 VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) { 3731 // This is the second phase of vectorizing first-order recurrences. An 3732 // overview of the transformation is described below. Suppose we have the 3733 // following loop. 3734 // 3735 // for (int i = 0; i < n; ++i) 3736 // b[i] = a[i] - a[i - 1]; 3737 // 3738 // There is a first-order recurrence on "a". For this loop, the shorthand 3739 // scalar IR looks like: 3740 // 3741 // scalar.ph: 3742 // s_init = a[-1] 3743 // br scalar.body 3744 // 3745 // scalar.body: 3746 // i = phi [0, scalar.ph], [i+1, scalar.body] 3747 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3748 // s2 = a[i] 3749 // b[i] = s2 - s1 3750 // br cond, scalar.body, ... 3751 // 3752 // In this example, s1 is a recurrence because it's value depends on the 3753 // previous iteration. In the first phase of vectorization, we created a 3754 // vector phi v1 for s1. We now complete the vectorization and produce the 3755 // shorthand vector IR shown below (for VF = 4, UF = 1). 3756 // 3757 // vector.ph: 3758 // v_init = vector(..., ..., ..., a[-1]) 3759 // br vector.body 3760 // 3761 // vector.body 3762 // i = phi [0, vector.ph], [i+4, vector.body] 3763 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3764 // v2 = a[i, i+1, i+2, i+3]; 3765 // v3 = vector(v1(3), v2(0, 1, 2)) 3766 // b[i, i+1, i+2, i+3] = v2 - v3 3767 // br cond, vector.body, middle.block 3768 // 3769 // middle.block: 3770 // x = v2(3) 3771 // br scalar.ph 3772 // 3773 // scalar.ph: 3774 // s_init = phi [x, middle.block], [a[-1], otherwise] 3775 // br scalar.body 3776 // 3777 // After execution completes the vector loop, we extract the next value of 3778 // the recurrence (x) to use as the initial value in the scalar loop. 3779 3780 // Extract the last vector element in the middle block. This will be the 3781 // initial value for the recurrence when jumping to the scalar loop. 3782 VPValue *PreviousDef = PhiR->getBackedgeValue(); 3783 Value *Incoming = State.get(PreviousDef, UF - 1); 3784 auto *ExtractForScalar = Incoming; 3785 auto *IdxTy = Builder.getInt32Ty(); 3786 if (VF.isVector()) { 3787 auto *One = ConstantInt::get(IdxTy, 1); 3788 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3789 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3790 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 3791 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 3792 "vector.recur.extract"); 3793 } 3794 // Extract the second last element in the middle block if the 3795 // Phi is used outside the loop. We need to extract the phi itself 3796 // and not the last element (the phi update in the current iteration). This 3797 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3798 // when the scalar loop is not run at all. 3799 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3800 if (VF.isVector()) { 3801 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3802 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 3803 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3804 Incoming, Idx, "vector.recur.extract.for.phi"); 3805 } else if (UF > 1) 3806 // When loop is unrolled without vectorizing, initialize 3807 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 3808 // of `Incoming`. This is analogous to the vectorized case above: extracting 3809 // the second last element when VF > 1. 3810 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 3811 3812 // Fix the initial value of the original recurrence in the scalar loop. 3813 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3814 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 3815 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3816 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 3817 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3818 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3819 Start->addIncoming(Incoming, BB); 3820 } 3821 3822 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 3823 Phi->setName("scalar.recur"); 3824 3825 // Finally, fix users of the recurrence outside the loop. The users will need 3826 // either the last value of the scalar recurrence or the last value of the 3827 // vector recurrence we extracted in the middle block. Since the loop is in 3828 // LCSSA form, we just need to find all the phi nodes for the original scalar 3829 // recurrence in the exit block, and then add an edge for the middle block. 3830 // Note that LCSSA does not imply single entry when the original scalar loop 3831 // had multiple exiting edges (as we always run the last iteration in the 3832 // scalar epilogue); in that case, there is no edge from middle to exit and 3833 // and thus no phis which needed updated. 3834 if (!Cost->requiresScalarEpilogue(VF)) 3835 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 3836 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) 3837 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3838 } 3839 3840 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 3841 VPTransformState &State) { 3842 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 3843 // Get it's reduction variable descriptor. 3844 assert(Legal->isReductionVariable(OrigPhi) && 3845 "Unable to find the reduction variable"); 3846 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 3847 3848 RecurKind RK = RdxDesc.getRecurrenceKind(); 3849 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3850 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3851 setDebugLocFromInst(ReductionStartValue); 3852 3853 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 3854 // This is the vector-clone of the value that leaves the loop. 3855 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 3856 3857 // Wrap flags are in general invalid after vectorization, clear them. 3858 clearReductionWrapFlags(RdxDesc, State); 3859 3860 // Before each round, move the insertion point right between 3861 // the PHIs and the values we are going to write. 3862 // This allows us to write both PHINodes and the extractelement 3863 // instructions. 3864 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3865 3866 setDebugLocFromInst(LoopExitInst); 3867 3868 Type *PhiTy = OrigPhi->getType(); 3869 BasicBlock *VectorLoopLatch = 3870 LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch(); 3871 // If tail is folded by masking, the vector value to leave the loop should be 3872 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 3873 // instead of the former. For an inloop reduction the reduction will already 3874 // be predicated, and does not need to be handled here. 3875 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 3876 for (unsigned Part = 0; Part < UF; ++Part) { 3877 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 3878 Value *Sel = nullptr; 3879 for (User *U : VecLoopExitInst->users()) { 3880 if (isa<SelectInst>(U)) { 3881 assert(!Sel && "Reduction exit feeding two selects"); 3882 Sel = U; 3883 } else 3884 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 3885 } 3886 assert(Sel && "Reduction exit feeds no select"); 3887 State.reset(LoopExitInstDef, Sel, Part); 3888 3889 // If the target can create a predicated operator for the reduction at no 3890 // extra cost in the loop (for example a predicated vadd), it can be 3891 // cheaper for the select to remain in the loop than be sunk out of it, 3892 // and so use the select value for the phi instead of the old 3893 // LoopExitValue. 3894 if (PreferPredicatedReductionSelect || 3895 TTI->preferPredicatedReductionSelect( 3896 RdxDesc.getOpcode(), PhiTy, 3897 TargetTransformInfo::ReductionFlags())) { 3898 auto *VecRdxPhi = 3899 cast<PHINode>(State.get(PhiR, Part)); 3900 VecRdxPhi->setIncomingValueForBlock(VectorLoopLatch, Sel); 3901 } 3902 } 3903 } 3904 3905 // If the vector reduction can be performed in a smaller type, we truncate 3906 // then extend the loop exit value to enable InstCombine to evaluate the 3907 // entire expression in the smaller type. 3908 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 3909 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 3910 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3911 Builder.SetInsertPoint(VectorLoopLatch->getTerminator()); 3912 VectorParts RdxParts(UF); 3913 for (unsigned Part = 0; Part < UF; ++Part) { 3914 RdxParts[Part] = State.get(LoopExitInstDef, Part); 3915 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3916 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3917 : Builder.CreateZExt(Trunc, VecTy); 3918 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 3919 if (U != Trunc) { 3920 U->replaceUsesOfWith(RdxParts[Part], Extnd); 3921 RdxParts[Part] = Extnd; 3922 } 3923 } 3924 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3925 for (unsigned Part = 0; Part < UF; ++Part) { 3926 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3927 State.reset(LoopExitInstDef, RdxParts[Part], Part); 3928 } 3929 } 3930 3931 // Reduce all of the unrolled parts into a single vector. 3932 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 3933 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 3934 3935 // The middle block terminator has already been assigned a DebugLoc here (the 3936 // OrigLoop's single latch terminator). We want the whole middle block to 3937 // appear to execute on this line because: (a) it is all compiler generated, 3938 // (b) these instructions are always executed after evaluating the latch 3939 // conditional branch, and (c) other passes may add new predecessors which 3940 // terminate on this line. This is the easiest way to ensure we don't 3941 // accidentally cause an extra step back into the loop while debugging. 3942 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 3943 if (PhiR->isOrdered()) 3944 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 3945 else { 3946 // Floating-point operations should have some FMF to enable the reduction. 3947 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 3948 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 3949 for (unsigned Part = 1; Part < UF; ++Part) { 3950 Value *RdxPart = State.get(LoopExitInstDef, Part); 3951 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 3952 ReducedPartRdx = Builder.CreateBinOp( 3953 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 3954 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 3955 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 3956 ReducedPartRdx, RdxPart); 3957 else 3958 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 3959 } 3960 } 3961 3962 // Create the reduction after the loop. Note that inloop reductions create the 3963 // target reduction in the loop using a Reduction recipe. 3964 if (VF.isVector() && !PhiR->isInLoop()) { 3965 ReducedPartRdx = 3966 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 3967 // If the reduction can be performed in a smaller type, we need to extend 3968 // the reduction to the wider type before we branch to the original loop. 3969 if (PhiTy != RdxDesc.getRecurrenceType()) 3970 ReducedPartRdx = RdxDesc.isSigned() 3971 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 3972 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 3973 } 3974 3975 PHINode *ResumePhi = 3976 dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue()); 3977 3978 // Create a phi node that merges control-flow from the backedge-taken check 3979 // block and the middle block. 3980 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 3981 LoopScalarPreHeader->getTerminator()); 3982 3983 // If we are fixing reductions in the epilogue loop then we should already 3984 // have created a bc.merge.rdx Phi after the main vector body. Ensure that 3985 // we carry over the incoming values correctly. 3986 for (auto *Incoming : predecessors(LoopScalarPreHeader)) { 3987 if (Incoming == LoopMiddleBlock) 3988 BCBlockPhi->addIncoming(ReducedPartRdx, Incoming); 3989 else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming)) 3990 BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming), 3991 Incoming); 3992 else 3993 BCBlockPhi->addIncoming(ReductionStartValue, Incoming); 3994 } 3995 3996 // Set the resume value for this reduction 3997 ReductionResumeValues.insert({&RdxDesc, BCBlockPhi}); 3998 3999 // Now, we need to fix the users of the reduction variable 4000 // inside and outside of the scalar remainder loop. 4001 4002 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4003 // in the exit blocks. See comment on analogous loop in 4004 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4005 if (!Cost->requiresScalarEpilogue(VF)) 4006 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4007 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) 4008 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4009 4010 // Fix the scalar loop reduction variable with the incoming reduction sum 4011 // from the vector body and from the backedge value. 4012 int IncomingEdgeBlockIdx = 4013 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4014 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4015 // Pick the other block. 4016 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4017 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4018 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4019 } 4020 4021 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4022 VPTransformState &State) { 4023 RecurKind RK = RdxDesc.getRecurrenceKind(); 4024 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4025 return; 4026 4027 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4028 assert(LoopExitInstr && "null loop exit instruction"); 4029 SmallVector<Instruction *, 8> Worklist; 4030 SmallPtrSet<Instruction *, 8> Visited; 4031 Worklist.push_back(LoopExitInstr); 4032 Visited.insert(LoopExitInstr); 4033 4034 while (!Worklist.empty()) { 4035 Instruction *Cur = Worklist.pop_back_val(); 4036 if (isa<OverflowingBinaryOperator>(Cur)) 4037 for (unsigned Part = 0; Part < UF; ++Part) { 4038 // FIXME: Should not rely on getVPValue at this point. 4039 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4040 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4041 } 4042 4043 for (User *U : Cur->users()) { 4044 Instruction *UI = cast<Instruction>(U); 4045 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4046 Visited.insert(UI).second) 4047 Worklist.push_back(UI); 4048 } 4049 } 4050 } 4051 4052 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4053 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4054 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4055 // Some phis were already hand updated by the reduction and recurrence 4056 // code above, leave them alone. 4057 continue; 4058 4059 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4060 // Non-instruction incoming values will have only one value. 4061 4062 VPLane Lane = VPLane::getFirstLane(); 4063 if (isa<Instruction>(IncomingValue) && 4064 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4065 VF)) 4066 Lane = VPLane::getLastLaneForVF(VF); 4067 4068 // Can be a loop invariant incoming value or the last scalar value to be 4069 // extracted from the vectorized loop. 4070 // FIXME: Should not rely on getVPValue at this point. 4071 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4072 Value *lastIncomingValue = 4073 OrigLoop->isLoopInvariant(IncomingValue) 4074 ? IncomingValue 4075 : State.get(State.Plan->getVPValue(IncomingValue, true), 4076 VPIteration(UF - 1, Lane)); 4077 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4078 } 4079 } 4080 4081 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4082 // The basic block and loop containing the predicated instruction. 4083 auto *PredBB = PredInst->getParent(); 4084 auto *VectorLoop = LI->getLoopFor(PredBB); 4085 4086 // Initialize a worklist with the operands of the predicated instruction. 4087 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4088 4089 // Holds instructions that we need to analyze again. An instruction may be 4090 // reanalyzed if we don't yet know if we can sink it or not. 4091 SmallVector<Instruction *, 8> InstsToReanalyze; 4092 4093 // Returns true if a given use occurs in the predicated block. Phi nodes use 4094 // their operands in their corresponding predecessor blocks. 4095 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4096 auto *I = cast<Instruction>(U.getUser()); 4097 BasicBlock *BB = I->getParent(); 4098 if (auto *Phi = dyn_cast<PHINode>(I)) 4099 BB = Phi->getIncomingBlock( 4100 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4101 return BB == PredBB; 4102 }; 4103 4104 // Iteratively sink the scalarized operands of the predicated instruction 4105 // into the block we created for it. When an instruction is sunk, it's 4106 // operands are then added to the worklist. The algorithm ends after one pass 4107 // through the worklist doesn't sink a single instruction. 4108 bool Changed; 4109 do { 4110 // Add the instructions that need to be reanalyzed to the worklist, and 4111 // reset the changed indicator. 4112 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4113 InstsToReanalyze.clear(); 4114 Changed = false; 4115 4116 while (!Worklist.empty()) { 4117 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4118 4119 // We can't sink an instruction if it is a phi node, is not in the loop, 4120 // or may have side effects. 4121 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4122 I->mayHaveSideEffects()) 4123 continue; 4124 4125 // If the instruction is already in PredBB, check if we can sink its 4126 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4127 // sinking the scalar instruction I, hence it appears in PredBB; but it 4128 // may have failed to sink I's operands (recursively), which we try 4129 // (again) here. 4130 if (I->getParent() == PredBB) { 4131 Worklist.insert(I->op_begin(), I->op_end()); 4132 continue; 4133 } 4134 4135 // It's legal to sink the instruction if all its uses occur in the 4136 // predicated block. Otherwise, there's nothing to do yet, and we may 4137 // need to reanalyze the instruction. 4138 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4139 InstsToReanalyze.push_back(I); 4140 continue; 4141 } 4142 4143 // Move the instruction to the beginning of the predicated block, and add 4144 // it's operands to the worklist. 4145 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4146 Worklist.insert(I->op_begin(), I->op_end()); 4147 4148 // The sinking may have enabled other instructions to be sunk, so we will 4149 // need to iterate. 4150 Changed = true; 4151 } 4152 } while (Changed); 4153 } 4154 4155 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4156 for (PHINode *OrigPhi : OrigPHIsToFix) { 4157 VPWidenPHIRecipe *VPPhi = 4158 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4159 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4160 // Make sure the builder has a valid insert point. 4161 Builder.SetInsertPoint(NewPhi); 4162 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4163 VPValue *Inc = VPPhi->getIncomingValue(i); 4164 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4165 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4166 } 4167 } 4168 } 4169 4170 bool InnerLoopVectorizer::useOrderedReductions( 4171 const RecurrenceDescriptor &RdxDesc) { 4172 return Cost->useOrderedReductions(RdxDesc); 4173 } 4174 4175 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4176 VPWidenPHIRecipe *PhiR, 4177 VPTransformState &State) { 4178 assert(EnableVPlanNativePath && 4179 "Non-native vplans are not expected to have VPWidenPHIRecipes."); 4180 // Currently we enter here in the VPlan-native path for non-induction 4181 // PHIs where all control flow is uniform. We simply widen these PHIs. 4182 // Create a vector phi with no operands - the vector phi operands will be 4183 // set at the end of vector code generation. 4184 Type *VecTy = (State.VF.isScalar()) 4185 ? PN->getType() 4186 : VectorType::get(PN->getType(), State.VF); 4187 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4188 State.set(PhiR, VecPhi, 0); 4189 OrigPHIsToFix.push_back(cast<PHINode>(PN)); 4190 } 4191 4192 /// A helper function for checking whether an integer division-related 4193 /// instruction may divide by zero (in which case it must be predicated if 4194 /// executed conditionally in the scalar code). 4195 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4196 /// Non-zero divisors that are non compile-time constants will not be 4197 /// converted into multiplication, so we will still end up scalarizing 4198 /// the division, but can do so w/o predication. 4199 static bool mayDivideByZero(Instruction &I) { 4200 assert((I.getOpcode() == Instruction::UDiv || 4201 I.getOpcode() == Instruction::SDiv || 4202 I.getOpcode() == Instruction::URem || 4203 I.getOpcode() == Instruction::SRem) && 4204 "Unexpected instruction"); 4205 Value *Divisor = I.getOperand(1); 4206 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4207 return !CInt || CInt->isZero(); 4208 } 4209 4210 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4211 VPUser &ArgOperands, 4212 VPTransformState &State) { 4213 assert(!isa<DbgInfoIntrinsic>(I) && 4214 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4215 setDebugLocFromInst(&I); 4216 4217 Module *M = I.getParent()->getParent()->getParent(); 4218 auto *CI = cast<CallInst>(&I); 4219 4220 SmallVector<Type *, 4> Tys; 4221 for (Value *ArgOperand : CI->args()) 4222 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4223 4224 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4225 4226 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4227 // version of the instruction. 4228 // Is it beneficial to perform intrinsic call compared to lib call? 4229 bool NeedToScalarize = false; 4230 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4231 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4232 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4233 assert((UseVectorIntrinsic || !NeedToScalarize) && 4234 "Instruction should be scalarized elsewhere."); 4235 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4236 "Either the intrinsic cost or vector call cost must be valid"); 4237 4238 for (unsigned Part = 0; Part < UF; ++Part) { 4239 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 4240 SmallVector<Value *, 4> Args; 4241 for (auto &I : enumerate(ArgOperands.operands())) { 4242 // Some intrinsics have a scalar argument - don't replace it with a 4243 // vector. 4244 Value *Arg; 4245 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4246 Arg = State.get(I.value(), Part); 4247 else { 4248 Arg = State.get(I.value(), VPIteration(0, 0)); 4249 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 4250 TysForDecl.push_back(Arg->getType()); 4251 } 4252 Args.push_back(Arg); 4253 } 4254 4255 Function *VectorF; 4256 if (UseVectorIntrinsic) { 4257 // Use vector version of the intrinsic. 4258 if (VF.isVector()) 4259 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4260 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4261 assert(VectorF && "Can't retrieve vector intrinsic."); 4262 } else { 4263 // Use vector version of the function call. 4264 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4265 #ifndef NDEBUG 4266 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4267 "Can't create vector function."); 4268 #endif 4269 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4270 } 4271 SmallVector<OperandBundleDef, 1> OpBundles; 4272 CI->getOperandBundlesAsDefs(OpBundles); 4273 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4274 4275 if (isa<FPMathOperator>(V)) 4276 V->copyFastMathFlags(CI); 4277 4278 State.set(Def, V, Part); 4279 addMetadata(V, &I); 4280 } 4281 } 4282 4283 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4284 // We should not collect Scalars more than once per VF. Right now, this 4285 // function is called from collectUniformsAndScalars(), which already does 4286 // this check. Collecting Scalars for VF=1 does not make any sense. 4287 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4288 "This function should not be visited twice for the same VF"); 4289 4290 // This avoids any chances of creating a REPLICATE recipe during planning 4291 // since that would result in generation of scalarized code during execution, 4292 // which is not supported for scalable vectors. 4293 if (VF.isScalable()) { 4294 Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4295 return; 4296 } 4297 4298 SmallSetVector<Instruction *, 8> Worklist; 4299 4300 // These sets are used to seed the analysis with pointers used by memory 4301 // accesses that will remain scalar. 4302 SmallSetVector<Instruction *, 8> ScalarPtrs; 4303 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4304 auto *Latch = TheLoop->getLoopLatch(); 4305 4306 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4307 // The pointer operands of loads and stores will be scalar as long as the 4308 // memory access is not a gather or scatter operation. The value operand of a 4309 // store will remain scalar if the store is scalarized. 4310 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4311 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4312 assert(WideningDecision != CM_Unknown && 4313 "Widening decision should be ready at this moment"); 4314 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4315 if (Ptr == Store->getValueOperand()) 4316 return WideningDecision == CM_Scalarize; 4317 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4318 "Ptr is neither a value or pointer operand"); 4319 return WideningDecision != CM_GatherScatter; 4320 }; 4321 4322 // A helper that returns true if the given value is a bitcast or 4323 // getelementptr instruction contained in the loop. 4324 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4325 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4326 isa<GetElementPtrInst>(V)) && 4327 !TheLoop->isLoopInvariant(V); 4328 }; 4329 4330 // A helper that evaluates a memory access's use of a pointer. If the use will 4331 // be a scalar use and the pointer is only used by memory accesses, we place 4332 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4333 // PossibleNonScalarPtrs. 4334 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4335 // We only care about bitcast and getelementptr instructions contained in 4336 // the loop. 4337 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4338 return; 4339 4340 // If the pointer has already been identified as scalar (e.g., if it was 4341 // also identified as uniform), there's nothing to do. 4342 auto *I = cast<Instruction>(Ptr); 4343 if (Worklist.count(I)) 4344 return; 4345 4346 // If the use of the pointer will be a scalar use, and all users of the 4347 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4348 // place the pointer in PossibleNonScalarPtrs. 4349 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4350 return isa<LoadInst>(U) || isa<StoreInst>(U); 4351 })) 4352 ScalarPtrs.insert(I); 4353 else 4354 PossibleNonScalarPtrs.insert(I); 4355 }; 4356 4357 // We seed the scalars analysis with three classes of instructions: (1) 4358 // instructions marked uniform-after-vectorization and (2) bitcast, 4359 // getelementptr and (pointer) phi instructions used by memory accesses 4360 // requiring a scalar use. 4361 // 4362 // (1) Add to the worklist all instructions that have been identified as 4363 // uniform-after-vectorization. 4364 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4365 4366 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4367 // memory accesses requiring a scalar use. The pointer operands of loads and 4368 // stores will be scalar as long as the memory accesses is not a gather or 4369 // scatter operation. The value operand of a store will remain scalar if the 4370 // store is scalarized. 4371 for (auto *BB : TheLoop->blocks()) 4372 for (auto &I : *BB) { 4373 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4374 evaluatePtrUse(Load, Load->getPointerOperand()); 4375 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4376 evaluatePtrUse(Store, Store->getPointerOperand()); 4377 evaluatePtrUse(Store, Store->getValueOperand()); 4378 } 4379 } 4380 for (auto *I : ScalarPtrs) 4381 if (!PossibleNonScalarPtrs.count(I)) { 4382 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4383 Worklist.insert(I); 4384 } 4385 4386 // Insert the forced scalars. 4387 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4388 // induction variable when the PHI user is scalarized. 4389 auto ForcedScalar = ForcedScalars.find(VF); 4390 if (ForcedScalar != ForcedScalars.end()) 4391 for (auto *I : ForcedScalar->second) 4392 Worklist.insert(I); 4393 4394 // Expand the worklist by looking through any bitcasts and getelementptr 4395 // instructions we've already identified as scalar. This is similar to the 4396 // expansion step in collectLoopUniforms(); however, here we're only 4397 // expanding to include additional bitcasts and getelementptr instructions. 4398 unsigned Idx = 0; 4399 while (Idx != Worklist.size()) { 4400 Instruction *Dst = Worklist[Idx++]; 4401 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4402 continue; 4403 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4404 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4405 auto *J = cast<Instruction>(U); 4406 return !TheLoop->contains(J) || Worklist.count(J) || 4407 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4408 isScalarUse(J, Src)); 4409 })) { 4410 Worklist.insert(Src); 4411 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4412 } 4413 } 4414 4415 // An induction variable will remain scalar if all users of the induction 4416 // variable and induction variable update remain scalar. 4417 for (auto &Induction : Legal->getInductionVars()) { 4418 auto *Ind = Induction.first; 4419 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4420 4421 // If tail-folding is applied, the primary induction variable will be used 4422 // to feed a vector compare. 4423 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 4424 continue; 4425 4426 // Returns true if \p Indvar is a pointer induction that is used directly by 4427 // load/store instruction \p I. 4428 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar, 4429 Instruction *I) { 4430 return Induction.second.getKind() == 4431 InductionDescriptor::IK_PtrInduction && 4432 (isa<LoadInst>(I) || isa<StoreInst>(I)) && 4433 Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar); 4434 }; 4435 4436 // Determine if all users of the induction variable are scalar after 4437 // vectorization. 4438 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4439 auto *I = cast<Instruction>(U); 4440 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4441 IsDirectLoadStoreFromPtrIndvar(Ind, I); 4442 }); 4443 if (!ScalarInd) 4444 continue; 4445 4446 // Determine if all users of the induction variable update instruction are 4447 // scalar after vectorization. 4448 auto ScalarIndUpdate = 4449 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4450 auto *I = cast<Instruction>(U); 4451 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4452 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I); 4453 }); 4454 if (!ScalarIndUpdate) 4455 continue; 4456 4457 // The induction variable and its update instruction will remain scalar. 4458 Worklist.insert(Ind); 4459 Worklist.insert(IndUpdate); 4460 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4461 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4462 << "\n"); 4463 } 4464 4465 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4466 } 4467 4468 bool LoopVectorizationCostModel::isScalarWithPredication( 4469 Instruction *I, ElementCount VF) const { 4470 if (!blockNeedsPredicationForAnyReason(I->getParent())) 4471 return false; 4472 switch(I->getOpcode()) { 4473 default: 4474 break; 4475 case Instruction::Load: 4476 case Instruction::Store: { 4477 if (!Legal->isMaskRequired(I)) 4478 return false; 4479 auto *Ptr = getLoadStorePointerOperand(I); 4480 auto *Ty = getLoadStoreType(I); 4481 Type *VTy = Ty; 4482 if (VF.isVector()) 4483 VTy = VectorType::get(Ty, VF); 4484 const Align Alignment = getLoadStoreAlignment(I); 4485 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4486 TTI.isLegalMaskedGather(VTy, Alignment)) 4487 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4488 TTI.isLegalMaskedScatter(VTy, Alignment)); 4489 } 4490 case Instruction::UDiv: 4491 case Instruction::SDiv: 4492 case Instruction::SRem: 4493 case Instruction::URem: 4494 return mayDivideByZero(*I); 4495 } 4496 return false; 4497 } 4498 4499 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 4500 Instruction *I, ElementCount VF) { 4501 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4502 assert(getWideningDecision(I, VF) == CM_Unknown && 4503 "Decision should not be set yet."); 4504 auto *Group = getInterleavedAccessGroup(I); 4505 assert(Group && "Must have a group."); 4506 4507 // If the instruction's allocated size doesn't equal it's type size, it 4508 // requires padding and will be scalarized. 4509 auto &DL = I->getModule()->getDataLayout(); 4510 auto *ScalarTy = getLoadStoreType(I); 4511 if (hasIrregularType(ScalarTy, DL)) 4512 return false; 4513 4514 // If the group involves a non-integral pointer, we may not be able to 4515 // losslessly cast all values to a common type. 4516 unsigned InterleaveFactor = Group->getFactor(); 4517 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy); 4518 for (unsigned i = 0; i < InterleaveFactor; i++) { 4519 Instruction *Member = Group->getMember(i); 4520 if (!Member) 4521 continue; 4522 auto *MemberTy = getLoadStoreType(Member); 4523 bool MemberNI = DL.isNonIntegralPointerType(MemberTy); 4524 // Don't coerce non-integral pointers to integers or vice versa. 4525 if (MemberNI != ScalarNI) { 4526 // TODO: Consider adding special nullptr value case here 4527 return false; 4528 } else if (MemberNI && ScalarNI && 4529 ScalarTy->getPointerAddressSpace() != 4530 MemberTy->getPointerAddressSpace()) { 4531 return false; 4532 } 4533 } 4534 4535 // Check if masking is required. 4536 // A Group may need masking for one of two reasons: it resides in a block that 4537 // needs predication, or it was decided to use masking to deal with gaps 4538 // (either a gap at the end of a load-access that may result in a speculative 4539 // load, or any gaps in a store-access). 4540 bool PredicatedAccessRequiresMasking = 4541 blockNeedsPredicationForAnyReason(I->getParent()) && 4542 Legal->isMaskRequired(I); 4543 bool LoadAccessWithGapsRequiresEpilogMasking = 4544 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 4545 !isScalarEpilogueAllowed(); 4546 bool StoreAccessWithGapsRequiresMasking = 4547 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 4548 if (!PredicatedAccessRequiresMasking && 4549 !LoadAccessWithGapsRequiresEpilogMasking && 4550 !StoreAccessWithGapsRequiresMasking) 4551 return true; 4552 4553 // If masked interleaving is required, we expect that the user/target had 4554 // enabled it, because otherwise it either wouldn't have been created or 4555 // it should have been invalidated by the CostModel. 4556 assert(useMaskedInterleavedAccesses(TTI) && 4557 "Masked interleave-groups for predicated accesses are not enabled."); 4558 4559 if (Group->isReverse()) 4560 return false; 4561 4562 auto *Ty = getLoadStoreType(I); 4563 const Align Alignment = getLoadStoreAlignment(I); 4564 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 4565 : TTI.isLegalMaskedStore(Ty, Alignment); 4566 } 4567 4568 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 4569 Instruction *I, ElementCount VF) { 4570 // Get and ensure we have a valid memory instruction. 4571 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 4572 4573 auto *Ptr = getLoadStorePointerOperand(I); 4574 auto *ScalarTy = getLoadStoreType(I); 4575 4576 // In order to be widened, the pointer should be consecutive, first of all. 4577 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 4578 return false; 4579 4580 // If the instruction is a store located in a predicated block, it will be 4581 // scalarized. 4582 if (isScalarWithPredication(I, VF)) 4583 return false; 4584 4585 // If the instruction's allocated size doesn't equal it's type size, it 4586 // requires padding and will be scalarized. 4587 auto &DL = I->getModule()->getDataLayout(); 4588 if (hasIrregularType(ScalarTy, DL)) 4589 return false; 4590 4591 return true; 4592 } 4593 4594 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 4595 // We should not collect Uniforms more than once per VF. Right now, 4596 // this function is called from collectUniformsAndScalars(), which 4597 // already does this check. Collecting Uniforms for VF=1 does not make any 4598 // sense. 4599 4600 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 4601 "This function should not be visited twice for the same VF"); 4602 4603 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4604 // not analyze again. Uniforms.count(VF) will return 1. 4605 Uniforms[VF].clear(); 4606 4607 // We now know that the loop is vectorizable! 4608 // Collect instructions inside the loop that will remain uniform after 4609 // vectorization. 4610 4611 // Global values, params and instructions outside of current loop are out of 4612 // scope. 4613 auto isOutOfScope = [&](Value *V) -> bool { 4614 Instruction *I = dyn_cast<Instruction>(V); 4615 return (!I || !TheLoop->contains(I)); 4616 }; 4617 4618 // Worklist containing uniform instructions demanding lane 0. 4619 SetVector<Instruction *> Worklist; 4620 BasicBlock *Latch = TheLoop->getLoopLatch(); 4621 4622 // Add uniform instructions demanding lane 0 to the worklist. Instructions 4623 // that are scalar with predication must not be considered uniform after 4624 // vectorization, because that would create an erroneous replicating region 4625 // where only a single instance out of VF should be formed. 4626 // TODO: optimize such seldom cases if found important, see PR40816. 4627 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 4628 if (isOutOfScope(I)) { 4629 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 4630 << *I << "\n"); 4631 return; 4632 } 4633 if (isScalarWithPredication(I, VF)) { 4634 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 4635 << *I << "\n"); 4636 return; 4637 } 4638 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 4639 Worklist.insert(I); 4640 }; 4641 4642 // Start with the conditional branch. If the branch condition is an 4643 // instruction contained in the loop that is only used by the branch, it is 4644 // uniform. 4645 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4646 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 4647 addToWorklistIfAllowed(Cmp); 4648 4649 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 4650 InstWidening WideningDecision = getWideningDecision(I, VF); 4651 assert(WideningDecision != CM_Unknown && 4652 "Widening decision should be ready at this moment"); 4653 4654 // A uniform memory op is itself uniform. We exclude uniform stores 4655 // here as they demand the last lane, not the first one. 4656 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 4657 assert(WideningDecision == CM_Scalarize); 4658 return true; 4659 } 4660 4661 return (WideningDecision == CM_Widen || 4662 WideningDecision == CM_Widen_Reverse || 4663 WideningDecision == CM_Interleave); 4664 }; 4665 4666 4667 // Returns true if Ptr is the pointer operand of a memory access instruction 4668 // I, and I is known to not require scalarization. 4669 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4670 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4671 }; 4672 4673 // Holds a list of values which are known to have at least one uniform use. 4674 // Note that there may be other uses which aren't uniform. A "uniform use" 4675 // here is something which only demands lane 0 of the unrolled iterations; 4676 // it does not imply that all lanes produce the same value (e.g. this is not 4677 // the usual meaning of uniform) 4678 SetVector<Value *> HasUniformUse; 4679 4680 // Scan the loop for instructions which are either a) known to have only 4681 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 4682 for (auto *BB : TheLoop->blocks()) 4683 for (auto &I : *BB) { 4684 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 4685 switch (II->getIntrinsicID()) { 4686 case Intrinsic::sideeffect: 4687 case Intrinsic::experimental_noalias_scope_decl: 4688 case Intrinsic::assume: 4689 case Intrinsic::lifetime_start: 4690 case Intrinsic::lifetime_end: 4691 if (TheLoop->hasLoopInvariantOperands(&I)) 4692 addToWorklistIfAllowed(&I); 4693 break; 4694 default: 4695 break; 4696 } 4697 } 4698 4699 // ExtractValue instructions must be uniform, because the operands are 4700 // known to be loop-invariant. 4701 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 4702 assert(isOutOfScope(EVI->getAggregateOperand()) && 4703 "Expected aggregate value to be loop invariant"); 4704 addToWorklistIfAllowed(EVI); 4705 continue; 4706 } 4707 4708 // If there's no pointer operand, there's nothing to do. 4709 auto *Ptr = getLoadStorePointerOperand(&I); 4710 if (!Ptr) 4711 continue; 4712 4713 // A uniform memory op is itself uniform. We exclude uniform stores 4714 // here as they demand the last lane, not the first one. 4715 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 4716 addToWorklistIfAllowed(&I); 4717 4718 if (isUniformDecision(&I, VF)) { 4719 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 4720 HasUniformUse.insert(Ptr); 4721 } 4722 } 4723 4724 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 4725 // demanding) users. Since loops are assumed to be in LCSSA form, this 4726 // disallows uses outside the loop as well. 4727 for (auto *V : HasUniformUse) { 4728 if (isOutOfScope(V)) 4729 continue; 4730 auto *I = cast<Instruction>(V); 4731 auto UsersAreMemAccesses = 4732 llvm::all_of(I->users(), [&](User *U) -> bool { 4733 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 4734 }); 4735 if (UsersAreMemAccesses) 4736 addToWorklistIfAllowed(I); 4737 } 4738 4739 // Expand Worklist in topological order: whenever a new instruction 4740 // is added , its users should be already inside Worklist. It ensures 4741 // a uniform instruction will only be used by uniform instructions. 4742 unsigned idx = 0; 4743 while (idx != Worklist.size()) { 4744 Instruction *I = Worklist[idx++]; 4745 4746 for (auto OV : I->operand_values()) { 4747 // isOutOfScope operands cannot be uniform instructions. 4748 if (isOutOfScope(OV)) 4749 continue; 4750 // First order recurrence Phi's should typically be considered 4751 // non-uniform. 4752 auto *OP = dyn_cast<PHINode>(OV); 4753 if (OP && Legal->isFirstOrderRecurrence(OP)) 4754 continue; 4755 // If all the users of the operand are uniform, then add the 4756 // operand into the uniform worklist. 4757 auto *OI = cast<Instruction>(OV); 4758 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4759 auto *J = cast<Instruction>(U); 4760 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 4761 })) 4762 addToWorklistIfAllowed(OI); 4763 } 4764 } 4765 4766 // For an instruction to be added into Worklist above, all its users inside 4767 // the loop should also be in Worklist. However, this condition cannot be 4768 // true for phi nodes that form a cyclic dependence. We must process phi 4769 // nodes separately. An induction variable will remain uniform if all users 4770 // of the induction variable and induction variable update remain uniform. 4771 // The code below handles both pointer and non-pointer induction variables. 4772 for (auto &Induction : Legal->getInductionVars()) { 4773 auto *Ind = Induction.first; 4774 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4775 4776 // Determine if all users of the induction variable are uniform after 4777 // vectorization. 4778 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4779 auto *I = cast<Instruction>(U); 4780 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4781 isVectorizedMemAccessUse(I, Ind); 4782 }); 4783 if (!UniformInd) 4784 continue; 4785 4786 // Determine if all users of the induction variable update instruction are 4787 // uniform after vectorization. 4788 auto UniformIndUpdate = 4789 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4790 auto *I = cast<Instruction>(U); 4791 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4792 isVectorizedMemAccessUse(I, IndUpdate); 4793 }); 4794 if (!UniformIndUpdate) 4795 continue; 4796 4797 // The induction variable and its update instruction will remain uniform. 4798 addToWorklistIfAllowed(Ind); 4799 addToWorklistIfAllowed(IndUpdate); 4800 } 4801 4802 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4803 } 4804 4805 bool LoopVectorizationCostModel::runtimeChecksRequired() { 4806 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 4807 4808 if (Legal->getRuntimePointerChecking()->Need) { 4809 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 4810 "runtime pointer checks needed. Enable vectorization of this " 4811 "loop with '#pragma clang loop vectorize(enable)' when " 4812 "compiling with -Os/-Oz", 4813 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4814 return true; 4815 } 4816 4817 if (!PSE.getPredicate().isAlwaysTrue()) { 4818 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 4819 "runtime SCEV checks needed. Enable vectorization of this " 4820 "loop with '#pragma clang loop vectorize(enable)' when " 4821 "compiling with -Os/-Oz", 4822 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4823 return true; 4824 } 4825 4826 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4827 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4828 reportVectorizationFailure("Runtime stride check for small trip count", 4829 "runtime stride == 1 checks needed. Enable vectorization of " 4830 "this loop without such check by compiling with -Os/-Oz", 4831 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4832 return true; 4833 } 4834 4835 return false; 4836 } 4837 4838 ElementCount 4839 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 4840 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 4841 return ElementCount::getScalable(0); 4842 4843 if (Hints->isScalableVectorizationDisabled()) { 4844 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 4845 "ScalableVectorizationDisabled", ORE, TheLoop); 4846 return ElementCount::getScalable(0); 4847 } 4848 4849 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 4850 4851 auto MaxScalableVF = ElementCount::getScalable( 4852 std::numeric_limits<ElementCount::ScalarTy>::max()); 4853 4854 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 4855 // FIXME: While for scalable vectors this is currently sufficient, this should 4856 // be replaced by a more detailed mechanism that filters out specific VFs, 4857 // instead of invalidating vectorization for a whole set of VFs based on the 4858 // MaxVF. 4859 4860 // Disable scalable vectorization if the loop contains unsupported reductions. 4861 if (!canVectorizeReductions(MaxScalableVF)) { 4862 reportVectorizationInfo( 4863 "Scalable vectorization not supported for the reduction " 4864 "operations found in this loop.", 4865 "ScalableVFUnfeasible", ORE, TheLoop); 4866 return ElementCount::getScalable(0); 4867 } 4868 4869 // Disable scalable vectorization if the loop contains any instructions 4870 // with element types not supported for scalable vectors. 4871 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 4872 return !Ty->isVoidTy() && 4873 !this->TTI.isElementTypeLegalForScalableVector(Ty); 4874 })) { 4875 reportVectorizationInfo("Scalable vectorization is not supported " 4876 "for all element types found in this loop.", 4877 "ScalableVFUnfeasible", ORE, TheLoop); 4878 return ElementCount::getScalable(0); 4879 } 4880 4881 if (Legal->isSafeForAnyVectorWidth()) 4882 return MaxScalableVF; 4883 4884 // Limit MaxScalableVF by the maximum safe dependence distance. 4885 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 4886 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) 4887 MaxVScale = 4888 TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax(); 4889 MaxScalableVF = ElementCount::getScalable( 4890 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 4891 if (!MaxScalableVF) 4892 reportVectorizationInfo( 4893 "Max legal vector width too small, scalable vectorization " 4894 "unfeasible.", 4895 "ScalableVFUnfeasible", ORE, TheLoop); 4896 4897 return MaxScalableVF; 4898 } 4899 4900 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF( 4901 unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) { 4902 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4903 unsigned SmallestType, WidestType; 4904 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 4905 4906 // Get the maximum safe dependence distance in bits computed by LAA. 4907 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 4908 // the memory accesses that is most restrictive (involved in the smallest 4909 // dependence distance). 4910 unsigned MaxSafeElements = 4911 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 4912 4913 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 4914 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 4915 4916 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 4917 << ".\n"); 4918 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 4919 << ".\n"); 4920 4921 // First analyze the UserVF, fall back if the UserVF should be ignored. 4922 if (UserVF) { 4923 auto MaxSafeUserVF = 4924 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 4925 4926 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 4927 // If `VF=vscale x N` is safe, then so is `VF=N` 4928 if (UserVF.isScalable()) 4929 return FixedScalableVFPair( 4930 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 4931 else 4932 return UserVF; 4933 } 4934 4935 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 4936 4937 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 4938 // is better to ignore the hint and let the compiler choose a suitable VF. 4939 if (!UserVF.isScalable()) { 4940 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4941 << " is unsafe, clamping to max safe VF=" 4942 << MaxSafeFixedVF << ".\n"); 4943 ORE->emit([&]() { 4944 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4945 TheLoop->getStartLoc(), 4946 TheLoop->getHeader()) 4947 << "User-specified vectorization factor " 4948 << ore::NV("UserVectorizationFactor", UserVF) 4949 << " is unsafe, clamping to maximum safe vectorization factor " 4950 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 4951 }); 4952 return MaxSafeFixedVF; 4953 } 4954 4955 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 4956 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4957 << " is ignored because scalable vectors are not " 4958 "available.\n"); 4959 ORE->emit([&]() { 4960 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4961 TheLoop->getStartLoc(), 4962 TheLoop->getHeader()) 4963 << "User-specified vectorization factor " 4964 << ore::NV("UserVectorizationFactor", UserVF) 4965 << " is ignored because the target does not support scalable " 4966 "vectors. The compiler will pick a more suitable value."; 4967 }); 4968 } else { 4969 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4970 << " is unsafe. Ignoring scalable UserVF.\n"); 4971 ORE->emit([&]() { 4972 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4973 TheLoop->getStartLoc(), 4974 TheLoop->getHeader()) 4975 << "User-specified vectorization factor " 4976 << ore::NV("UserVectorizationFactor", UserVF) 4977 << " is unsafe. Ignoring the hint to let the compiler pick a " 4978 "more suitable value."; 4979 }); 4980 } 4981 } 4982 4983 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 4984 << " / " << WidestType << " bits.\n"); 4985 4986 FixedScalableVFPair Result(ElementCount::getFixed(1), 4987 ElementCount::getScalable(0)); 4988 if (auto MaxVF = 4989 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 4990 MaxSafeFixedVF, FoldTailByMasking)) 4991 Result.FixedVF = MaxVF; 4992 4993 if (auto MaxVF = 4994 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 4995 MaxSafeScalableVF, FoldTailByMasking)) 4996 if (MaxVF.isScalable()) { 4997 Result.ScalableVF = MaxVF; 4998 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 4999 << "\n"); 5000 } 5001 5002 return Result; 5003 } 5004 5005 FixedScalableVFPair 5006 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5007 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5008 // TODO: It may by useful to do since it's still likely to be dynamically 5009 // uniform if the target can skip. 5010 reportVectorizationFailure( 5011 "Not inserting runtime ptr check for divergent target", 5012 "runtime pointer checks needed. Not enabled for divergent target", 5013 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5014 return FixedScalableVFPair::getNone(); 5015 } 5016 5017 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5018 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5019 if (TC == 1) { 5020 reportVectorizationFailure("Single iteration (non) loop", 5021 "loop trip count is one, irrelevant for vectorization", 5022 "SingleIterationLoop", ORE, TheLoop); 5023 return FixedScalableVFPair::getNone(); 5024 } 5025 5026 switch (ScalarEpilogueStatus) { 5027 case CM_ScalarEpilogueAllowed: 5028 return computeFeasibleMaxVF(TC, UserVF, false); 5029 case CM_ScalarEpilogueNotAllowedUsePredicate: 5030 LLVM_FALLTHROUGH; 5031 case CM_ScalarEpilogueNotNeededUsePredicate: 5032 LLVM_DEBUG( 5033 dbgs() << "LV: vector predicate hint/switch found.\n" 5034 << "LV: Not allowing scalar epilogue, creating predicated " 5035 << "vector loop.\n"); 5036 break; 5037 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5038 // fallthrough as a special case of OptForSize 5039 case CM_ScalarEpilogueNotAllowedOptSize: 5040 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5041 LLVM_DEBUG( 5042 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5043 else 5044 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5045 << "count.\n"); 5046 5047 // Bail if runtime checks are required, which are not good when optimising 5048 // for size. 5049 if (runtimeChecksRequired()) 5050 return FixedScalableVFPair::getNone(); 5051 5052 break; 5053 } 5054 5055 // The only loops we can vectorize without a scalar epilogue, are loops with 5056 // a bottom-test and a single exiting block. We'd have to handle the fact 5057 // that not every instruction executes on the last iteration. This will 5058 // require a lane mask which varies through the vector loop body. (TODO) 5059 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5060 // If there was a tail-folding hint/switch, but we can't fold the tail by 5061 // masking, fallback to a vectorization with a scalar epilogue. 5062 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5063 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5064 "scalar epilogue instead.\n"); 5065 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5066 return computeFeasibleMaxVF(TC, UserVF, false); 5067 } 5068 return FixedScalableVFPair::getNone(); 5069 } 5070 5071 // Now try the tail folding 5072 5073 // Invalidate interleave groups that require an epilogue if we can't mask 5074 // the interleave-group. 5075 if (!useMaskedInterleavedAccesses(TTI)) { 5076 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5077 "No decisions should have been taken at this point"); 5078 // Note: There is no need to invalidate any cost modeling decisions here, as 5079 // non where taken so far. 5080 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5081 } 5082 5083 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true); 5084 // Avoid tail folding if the trip count is known to be a multiple of any VF 5085 // we chose. 5086 // FIXME: The condition below pessimises the case for fixed-width vectors, 5087 // when scalable VFs are also candidates for vectorization. 5088 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5089 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5090 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5091 "MaxFixedVF must be a power of 2"); 5092 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5093 : MaxFixedVF.getFixedValue(); 5094 ScalarEvolution *SE = PSE.getSE(); 5095 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5096 const SCEV *ExitCount = SE->getAddExpr( 5097 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5098 const SCEV *Rem = SE->getURemExpr( 5099 SE->applyLoopGuards(ExitCount, TheLoop), 5100 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5101 if (Rem->isZero()) { 5102 // Accept MaxFixedVF if we do not have a tail. 5103 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5104 return MaxFactors; 5105 } 5106 } 5107 5108 // For scalable vectors don't use tail folding for low trip counts or 5109 // optimizing for code size. We only permit this if the user has explicitly 5110 // requested it. 5111 if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate && 5112 ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate && 5113 MaxFactors.ScalableVF.isVector()) 5114 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5115 5116 // If we don't know the precise trip count, or if the trip count that we 5117 // found modulo the vectorization factor is not zero, try to fold the tail 5118 // by masking. 5119 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5120 if (Legal->prepareToFoldTailByMasking()) { 5121 FoldTailByMasking = true; 5122 return MaxFactors; 5123 } 5124 5125 // If there was a tail-folding hint/switch, but we can't fold the tail by 5126 // masking, fallback to a vectorization with a scalar epilogue. 5127 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5128 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5129 "scalar epilogue instead.\n"); 5130 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5131 return MaxFactors; 5132 } 5133 5134 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5135 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5136 return FixedScalableVFPair::getNone(); 5137 } 5138 5139 if (TC == 0) { 5140 reportVectorizationFailure( 5141 "Unable to calculate the loop count due to complex control flow", 5142 "unable to calculate the loop count due to complex control flow", 5143 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5144 return FixedScalableVFPair::getNone(); 5145 } 5146 5147 reportVectorizationFailure( 5148 "Cannot optimize for size and vectorize at the same time.", 5149 "cannot optimize for size and vectorize at the same time. " 5150 "Enable vectorization of this loop with '#pragma clang loop " 5151 "vectorize(enable)' when compiling with -Os/-Oz", 5152 "NoTailLoopWithOptForSize", ORE, TheLoop); 5153 return FixedScalableVFPair::getNone(); 5154 } 5155 5156 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5157 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5158 const ElementCount &MaxSafeVF, bool FoldTailByMasking) { 5159 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5160 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5161 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5162 : TargetTransformInfo::RGK_FixedWidthVector); 5163 5164 // Convenience function to return the minimum of two ElementCounts. 5165 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5166 assert((LHS.isScalable() == RHS.isScalable()) && 5167 "Scalable flags must match"); 5168 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5169 }; 5170 5171 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5172 // Note that both WidestRegister and WidestType may not be a powers of 2. 5173 auto MaxVectorElementCount = ElementCount::get( 5174 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5175 ComputeScalableMaxVF); 5176 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5177 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5178 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5179 5180 if (!MaxVectorElementCount) { 5181 LLVM_DEBUG(dbgs() << "LV: The target has no " 5182 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5183 << " vector registers.\n"); 5184 return ElementCount::getFixed(1); 5185 } 5186 5187 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5188 if (ConstTripCount && 5189 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5190 (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) { 5191 // If loop trip count (TC) is known at compile time there is no point in 5192 // choosing VF greater than TC (as done in the loop below). Select maximum 5193 // power of two which doesn't exceed TC. 5194 // If MaxVectorElementCount is scalable, we only fall back on a fixed VF 5195 // when the TC is less than or equal to the known number of lanes. 5196 auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount); 5197 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not " 5198 "exceeding the constant trip count: " 5199 << ClampedConstTripCount << "\n"); 5200 return ElementCount::getFixed(ClampedConstTripCount); 5201 } 5202 5203 TargetTransformInfo::RegisterKind RegKind = 5204 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5205 : TargetTransformInfo::RGK_FixedWidthVector; 5206 ElementCount MaxVF = MaxVectorElementCount; 5207 if (MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 && 5208 TTI.shouldMaximizeVectorBandwidth(RegKind))) { 5209 auto MaxVectorElementCountMaxBW = ElementCount::get( 5210 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5211 ComputeScalableMaxVF); 5212 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5213 5214 // Collect all viable vectorization factors larger than the default MaxVF 5215 // (i.e. MaxVectorElementCount). 5216 SmallVector<ElementCount, 8> VFs; 5217 for (ElementCount VS = MaxVectorElementCount * 2; 5218 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5219 VFs.push_back(VS); 5220 5221 // For each VF calculate its register usage. 5222 auto RUs = calculateRegisterUsage(VFs); 5223 5224 // Select the largest VF which doesn't require more registers than existing 5225 // ones. 5226 for (int i = RUs.size() - 1; i >= 0; --i) { 5227 bool Selected = true; 5228 for (auto &pair : RUs[i].MaxLocalUsers) { 5229 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5230 if (pair.second > TargetNumRegisters) 5231 Selected = false; 5232 } 5233 if (Selected) { 5234 MaxVF = VFs[i]; 5235 break; 5236 } 5237 } 5238 if (ElementCount MinVF = 5239 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5240 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5241 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5242 << ") with target's minimum: " << MinVF << '\n'); 5243 MaxVF = MinVF; 5244 } 5245 } 5246 5247 // Invalidate any widening decisions we might have made, in case the loop 5248 // requires prediction (decided later), but we have already made some 5249 // load/store widening decisions. 5250 invalidateCostModelingDecisions(); 5251 } 5252 return MaxVF; 5253 } 5254 5255 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const { 5256 if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) { 5257 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange); 5258 auto Min = Attr.getVScaleRangeMin(); 5259 auto Max = Attr.getVScaleRangeMax(); 5260 if (Max && Min == Max) 5261 return Max; 5262 } 5263 5264 return TTI.getVScaleForTuning(); 5265 } 5266 5267 bool LoopVectorizationCostModel::isMoreProfitable( 5268 const VectorizationFactor &A, const VectorizationFactor &B) const { 5269 InstructionCost CostA = A.Cost; 5270 InstructionCost CostB = B.Cost; 5271 5272 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 5273 5274 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 5275 MaxTripCount) { 5276 // If we are folding the tail and the trip count is a known (possibly small) 5277 // constant, the trip count will be rounded up to an integer number of 5278 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 5279 // which we compare directly. When not folding the tail, the total cost will 5280 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 5281 // approximated with the per-lane cost below instead of using the tripcount 5282 // as here. 5283 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 5284 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 5285 return RTCostA < RTCostB; 5286 } 5287 5288 // Improve estimate for the vector width if it is scalable. 5289 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 5290 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 5291 if (Optional<unsigned> VScale = getVScaleForTuning()) { 5292 if (A.Width.isScalable()) 5293 EstimatedWidthA *= VScale.getValue(); 5294 if (B.Width.isScalable()) 5295 EstimatedWidthB *= VScale.getValue(); 5296 } 5297 5298 // Assume vscale may be larger than 1 (or the value being tuned for), 5299 // so that scalable vectorization is slightly favorable over fixed-width 5300 // vectorization. 5301 if (A.Width.isScalable() && !B.Width.isScalable()) 5302 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 5303 5304 // To avoid the need for FP division: 5305 // (CostA / A.Width) < (CostB / B.Width) 5306 // <=> (CostA * B.Width) < (CostB * A.Width) 5307 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 5308 } 5309 5310 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 5311 const ElementCountSet &VFCandidates) { 5312 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5313 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5314 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5315 assert(VFCandidates.count(ElementCount::getFixed(1)) && 5316 "Expected Scalar VF to be a candidate"); 5317 5318 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 5319 VectorizationFactor ChosenFactor = ScalarCost; 5320 5321 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5322 if (ForceVectorization && VFCandidates.size() > 1) { 5323 // Ignore scalar width, because the user explicitly wants vectorization. 5324 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5325 // evaluation. 5326 ChosenFactor.Cost = InstructionCost::getMax(); 5327 } 5328 5329 SmallVector<InstructionVFPair> InvalidCosts; 5330 for (const auto &i : VFCandidates) { 5331 // The cost for scalar VF=1 is already calculated, so ignore it. 5332 if (i.isScalar()) 5333 continue; 5334 5335 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 5336 VectorizationFactor Candidate(i, C.first); 5337 5338 #ifndef NDEBUG 5339 unsigned AssumedMinimumVscale = 1; 5340 if (Optional<unsigned> VScale = getVScaleForTuning()) 5341 AssumedMinimumVscale = VScale.getValue(); 5342 unsigned Width = 5343 Candidate.Width.isScalable() 5344 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 5345 : Candidate.Width.getFixedValue(); 5346 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5347 << " costs: " << (Candidate.Cost / Width)); 5348 if (i.isScalable()) 5349 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 5350 << AssumedMinimumVscale << ")"); 5351 LLVM_DEBUG(dbgs() << ".\n"); 5352 #endif 5353 5354 if (!C.second && !ForceVectorization) { 5355 LLVM_DEBUG( 5356 dbgs() << "LV: Not considering vector loop of width " << i 5357 << " because it will not generate any vector instructions.\n"); 5358 continue; 5359 } 5360 5361 // If profitable add it to ProfitableVF list. 5362 if (isMoreProfitable(Candidate, ScalarCost)) 5363 ProfitableVFs.push_back(Candidate); 5364 5365 if (isMoreProfitable(Candidate, ChosenFactor)) 5366 ChosenFactor = Candidate; 5367 } 5368 5369 // Emit a report of VFs with invalid costs in the loop. 5370 if (!InvalidCosts.empty()) { 5371 // Group the remarks per instruction, keeping the instruction order from 5372 // InvalidCosts. 5373 std::map<Instruction *, unsigned> Numbering; 5374 unsigned I = 0; 5375 for (auto &Pair : InvalidCosts) 5376 if (!Numbering.count(Pair.first)) 5377 Numbering[Pair.first] = I++; 5378 5379 // Sort the list, first on instruction(number) then on VF. 5380 llvm::sort(InvalidCosts, 5381 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 5382 if (Numbering[A.first] != Numbering[B.first]) 5383 return Numbering[A.first] < Numbering[B.first]; 5384 ElementCountComparator ECC; 5385 return ECC(A.second, B.second); 5386 }); 5387 5388 // For a list of ordered instruction-vf pairs: 5389 // [(load, vf1), (load, vf2), (store, vf1)] 5390 // Group the instructions together to emit separate remarks for: 5391 // load (vf1, vf2) 5392 // store (vf1) 5393 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 5394 auto Subset = ArrayRef<InstructionVFPair>(); 5395 do { 5396 if (Subset.empty()) 5397 Subset = Tail.take_front(1); 5398 5399 Instruction *I = Subset.front().first; 5400 5401 // If the next instruction is different, or if there are no other pairs, 5402 // emit a remark for the collated subset. e.g. 5403 // [(load, vf1), (load, vf2))] 5404 // to emit: 5405 // remark: invalid costs for 'load' at VF=(vf, vf2) 5406 if (Subset == Tail || Tail[Subset.size()].first != I) { 5407 std::string OutString; 5408 raw_string_ostream OS(OutString); 5409 assert(!Subset.empty() && "Unexpected empty range"); 5410 OS << "Instruction with invalid costs prevented vectorization at VF=("; 5411 for (auto &Pair : Subset) 5412 OS << (Pair.second == Subset.front().second ? "" : ", ") 5413 << Pair.second; 5414 OS << "):"; 5415 if (auto *CI = dyn_cast<CallInst>(I)) 5416 OS << " call to " << CI->getCalledFunction()->getName(); 5417 else 5418 OS << " " << I->getOpcodeName(); 5419 OS.flush(); 5420 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 5421 Tail = Tail.drop_front(Subset.size()); 5422 Subset = {}; 5423 } else 5424 // Grow the subset by one element 5425 Subset = Tail.take_front(Subset.size() + 1); 5426 } while (!Tail.empty()); 5427 } 5428 5429 if (!EnableCondStoresVectorization && NumPredStores) { 5430 reportVectorizationFailure("There are conditional stores.", 5431 "store that is conditionally executed prevents vectorization", 5432 "ConditionalStore", ORE, TheLoop); 5433 ChosenFactor = ScalarCost; 5434 } 5435 5436 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 5437 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 5438 << "LV: Vectorization seems to be not beneficial, " 5439 << "but was forced by a user.\n"); 5440 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 5441 return ChosenFactor; 5442 } 5443 5444 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5445 const Loop &L, ElementCount VF) const { 5446 // Cross iteration phis such as reductions need special handling and are 5447 // currently unsupported. 5448 if (any_of(L.getHeader()->phis(), 5449 [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); })) 5450 return false; 5451 5452 // Phis with uses outside of the loop require special handling and are 5453 // currently unsupported. 5454 for (auto &Entry : Legal->getInductionVars()) { 5455 // Look for uses of the value of the induction at the last iteration. 5456 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5457 for (User *U : PostInc->users()) 5458 if (!L.contains(cast<Instruction>(U))) 5459 return false; 5460 // Look for uses of penultimate value of the induction. 5461 for (User *U : Entry.first->users()) 5462 if (!L.contains(cast<Instruction>(U))) 5463 return false; 5464 } 5465 5466 // Induction variables that are widened require special handling that is 5467 // currently not supported. 5468 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5469 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5470 this->isProfitableToScalarize(Entry.first, VF)); 5471 })) 5472 return false; 5473 5474 // Epilogue vectorization code has not been auditted to ensure it handles 5475 // non-latch exits properly. It may be fine, but it needs auditted and 5476 // tested. 5477 if (L.getExitingBlock() != L.getLoopLatch()) 5478 return false; 5479 5480 return true; 5481 } 5482 5483 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5484 const ElementCount VF) const { 5485 // FIXME: We need a much better cost-model to take different parameters such 5486 // as register pressure, code size increase and cost of extra branches into 5487 // account. For now we apply a very crude heuristic and only consider loops 5488 // with vectorization factors larger than a certain value. 5489 // We also consider epilogue vectorization unprofitable for targets that don't 5490 // consider interleaving beneficial (eg. MVE). 5491 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5492 return false; 5493 // FIXME: We should consider changing the threshold for scalable 5494 // vectors to take VScaleForTuning into account. 5495 if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF) 5496 return true; 5497 return false; 5498 } 5499 5500 VectorizationFactor 5501 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5502 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5503 VectorizationFactor Result = VectorizationFactor::Disabled(); 5504 if (!EnableEpilogueVectorization) { 5505 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5506 return Result; 5507 } 5508 5509 if (!isScalarEpilogueAllowed()) { 5510 LLVM_DEBUG( 5511 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5512 "allowed.\n";); 5513 return Result; 5514 } 5515 5516 // Not really a cost consideration, but check for unsupported cases here to 5517 // simplify the logic. 5518 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5519 LLVM_DEBUG( 5520 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5521 "not a supported candidate.\n";); 5522 return Result; 5523 } 5524 5525 if (EpilogueVectorizationForceVF > 1) { 5526 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5527 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 5528 if (LVP.hasPlanWithVF(ForcedEC)) 5529 return {ForcedEC, 0}; 5530 else { 5531 LLVM_DEBUG( 5532 dbgs() 5533 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5534 return Result; 5535 } 5536 } 5537 5538 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5539 TheLoop->getHeader()->getParent()->hasMinSize()) { 5540 LLVM_DEBUG( 5541 dbgs() 5542 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5543 return Result; 5544 } 5545 5546 if (!isEpilogueVectorizationProfitable(MainLoopVF)) { 5547 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 5548 "this loop\n"); 5549 return Result; 5550 } 5551 5552 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know 5553 // the main loop handles 8 lanes per iteration. We could still benefit from 5554 // vectorizing the epilogue loop with VF=4. 5555 ElementCount EstimatedRuntimeVF = MainLoopVF; 5556 if (MainLoopVF.isScalable()) { 5557 EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 5558 if (Optional<unsigned> VScale = getVScaleForTuning()) 5559 EstimatedRuntimeVF *= VScale.getValue(); 5560 } 5561 5562 for (auto &NextVF : ProfitableVFs) 5563 if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() && 5564 ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) || 5565 ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) && 5566 (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) && 5567 LVP.hasPlanWithVF(NextVF.Width)) 5568 Result = NextVF; 5569 5570 if (Result != VectorizationFactor::Disabled()) 5571 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5572 << Result.Width << "\n";); 5573 return Result; 5574 } 5575 5576 std::pair<unsigned, unsigned> 5577 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5578 unsigned MinWidth = -1U; 5579 unsigned MaxWidth = 8; 5580 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5581 // For in-loop reductions, no element types are added to ElementTypesInLoop 5582 // if there are no loads/stores in the loop. In this case, check through the 5583 // reduction variables to determine the maximum width. 5584 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) { 5585 // Reset MaxWidth so that we can find the smallest type used by recurrences 5586 // in the loop. 5587 MaxWidth = -1U; 5588 for (auto &PhiDescriptorPair : Legal->getReductionVars()) { 5589 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second; 5590 // When finding the min width used by the recurrence we need to account 5591 // for casts on the input operands of the recurrence. 5592 MaxWidth = std::min<unsigned>( 5593 MaxWidth, std::min<unsigned>( 5594 RdxDesc.getMinWidthCastToRecurrenceTypeInBits(), 5595 RdxDesc.getRecurrenceType()->getScalarSizeInBits())); 5596 } 5597 } else { 5598 for (Type *T : ElementTypesInLoop) { 5599 MinWidth = std::min<unsigned>( 5600 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5601 MaxWidth = std::max<unsigned>( 5602 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5603 } 5604 } 5605 return {MinWidth, MaxWidth}; 5606 } 5607 5608 void LoopVectorizationCostModel::collectElementTypesForWidening() { 5609 ElementTypesInLoop.clear(); 5610 // For each block. 5611 for (BasicBlock *BB : TheLoop->blocks()) { 5612 // For each instruction in the loop. 5613 for (Instruction &I : BB->instructionsWithoutDebug()) { 5614 Type *T = I.getType(); 5615 5616 // Skip ignored values. 5617 if (ValuesToIgnore.count(&I)) 5618 continue; 5619 5620 // Only examine Loads, Stores and PHINodes. 5621 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5622 continue; 5623 5624 // Examine PHI nodes that are reduction variables. Update the type to 5625 // account for the recurrence type. 5626 if (auto *PN = dyn_cast<PHINode>(&I)) { 5627 if (!Legal->isReductionVariable(PN)) 5628 continue; 5629 const RecurrenceDescriptor &RdxDesc = 5630 Legal->getReductionVars().find(PN)->second; 5631 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 5632 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 5633 RdxDesc.getRecurrenceType(), 5634 TargetTransformInfo::ReductionFlags())) 5635 continue; 5636 T = RdxDesc.getRecurrenceType(); 5637 } 5638 5639 // Examine the stored values. 5640 if (auto *ST = dyn_cast<StoreInst>(&I)) 5641 T = ST->getValueOperand()->getType(); 5642 5643 assert(T->isSized() && 5644 "Expected the load/store/recurrence type to be sized"); 5645 5646 ElementTypesInLoop.insert(T); 5647 } 5648 } 5649 } 5650 5651 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 5652 unsigned LoopCost) { 5653 // -- The interleave heuristics -- 5654 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5655 // There are many micro-architectural considerations that we can't predict 5656 // at this level. For example, frontend pressure (on decode or fetch) due to 5657 // code size, or the number and capabilities of the execution ports. 5658 // 5659 // We use the following heuristics to select the interleave count: 5660 // 1. If the code has reductions, then we interleave to break the cross 5661 // iteration dependency. 5662 // 2. If the loop is really small, then we interleave to reduce the loop 5663 // overhead. 5664 // 3. We don't interleave if we think that we will spill registers to memory 5665 // due to the increased register pressure. 5666 5667 if (!isScalarEpilogueAllowed()) 5668 return 1; 5669 5670 // We used the distance for the interleave count. 5671 if (Legal->getMaxSafeDepDistBytes() != -1U) 5672 return 1; 5673 5674 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 5675 const bool HasReductions = !Legal->getReductionVars().empty(); 5676 // Do not interleave loops with a relatively small known or estimated trip 5677 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 5678 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 5679 // because with the above conditions interleaving can expose ILP and break 5680 // cross iteration dependences for reductions. 5681 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 5682 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 5683 return 1; 5684 5685 // If we did not calculate the cost for VF (because the user selected the VF) 5686 // then we calculate the cost of VF here. 5687 if (LoopCost == 0) { 5688 InstructionCost C = expectedCost(VF).first; 5689 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 5690 LoopCost = *C.getValue(); 5691 5692 // Loop body is free and there is no need for interleaving. 5693 if (LoopCost == 0) 5694 return 1; 5695 } 5696 5697 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5698 // We divide by these constants so assume that we have at least one 5699 // instruction that uses at least one register. 5700 for (auto& pair : R.MaxLocalUsers) { 5701 pair.second = std::max(pair.second, 1U); 5702 } 5703 5704 // We calculate the interleave count using the following formula. 5705 // Subtract the number of loop invariants from the number of available 5706 // registers. These registers are used by all of the interleaved instances. 5707 // Next, divide the remaining registers by the number of registers that is 5708 // required by the loop, in order to estimate how many parallel instances 5709 // fit without causing spills. All of this is rounded down if necessary to be 5710 // a power of two. We want power of two interleave count to simplify any 5711 // addressing operations or alignment considerations. 5712 // We also want power of two interleave counts to ensure that the induction 5713 // variable of the vector loop wraps to zero, when tail is folded by masking; 5714 // this currently happens when OptForSize, in which case IC is set to 1 above. 5715 unsigned IC = UINT_MAX; 5716 5717 for (auto& pair : R.MaxLocalUsers) { 5718 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5719 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5720 << " registers of " 5721 << TTI.getRegisterClassName(pair.first) << " register class\n"); 5722 if (VF.isScalar()) { 5723 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5724 TargetNumRegisters = ForceTargetNumScalarRegs; 5725 } else { 5726 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5727 TargetNumRegisters = ForceTargetNumVectorRegs; 5728 } 5729 unsigned MaxLocalUsers = pair.second; 5730 unsigned LoopInvariantRegs = 0; 5731 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 5732 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 5733 5734 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 5735 // Don't count the induction variable as interleaved. 5736 if (EnableIndVarRegisterHeur) { 5737 TmpIC = 5738 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 5739 std::max(1U, (MaxLocalUsers - 1))); 5740 } 5741 5742 IC = std::min(IC, TmpIC); 5743 } 5744 5745 // Clamp the interleave ranges to reasonable counts. 5746 unsigned MaxInterleaveCount = 5747 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 5748 5749 // Check if the user has overridden the max. 5750 if (VF.isScalar()) { 5751 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5752 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5753 } else { 5754 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5755 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5756 } 5757 5758 // If trip count is known or estimated compile time constant, limit the 5759 // interleave count to be less than the trip count divided by VF, provided it 5760 // is at least 1. 5761 // 5762 // For scalable vectors we can't know if interleaving is beneficial. It may 5763 // not be beneficial for small loops if none of the lanes in the second vector 5764 // iterations is enabled. However, for larger loops, there is likely to be a 5765 // similar benefit as for fixed-width vectors. For now, we choose to leave 5766 // the InterleaveCount as if vscale is '1', although if some information about 5767 // the vector is known (e.g. min vector size), we can make a better decision. 5768 if (BestKnownTC) { 5769 MaxInterleaveCount = 5770 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 5771 // Make sure MaxInterleaveCount is greater than 0. 5772 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 5773 } 5774 5775 assert(MaxInterleaveCount > 0 && 5776 "Maximum interleave count must be greater than 0"); 5777 5778 // Clamp the calculated IC to be between the 1 and the max interleave count 5779 // that the target and trip count allows. 5780 if (IC > MaxInterleaveCount) 5781 IC = MaxInterleaveCount; 5782 else 5783 // Make sure IC is greater than 0. 5784 IC = std::max(1u, IC); 5785 5786 assert(IC > 0 && "Interleave count must be greater than 0."); 5787 5788 // Interleave if we vectorized this loop and there is a reduction that could 5789 // benefit from interleaving. 5790 if (VF.isVector() && HasReductions) { 5791 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5792 return IC; 5793 } 5794 5795 // For any scalar loop that either requires runtime checks or predication we 5796 // are better off leaving this to the unroller. Note that if we've already 5797 // vectorized the loop we will have done the runtime check and so interleaving 5798 // won't require further checks. 5799 bool ScalarInterleavingRequiresPredication = 5800 (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) { 5801 return Legal->blockNeedsPredication(BB); 5802 })); 5803 bool ScalarInterleavingRequiresRuntimePointerCheck = 5804 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 5805 5806 // We want to interleave small loops in order to reduce the loop overhead and 5807 // potentially expose ILP opportunities. 5808 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 5809 << "LV: IC is " << IC << '\n' 5810 << "LV: VF is " << VF << '\n'); 5811 const bool AggressivelyInterleaveReductions = 5812 TTI.enableAggressiveInterleaving(HasReductions); 5813 if (!ScalarInterleavingRequiresRuntimePointerCheck && 5814 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) { 5815 // We assume that the cost overhead is 1 and we use the cost model 5816 // to estimate the cost of the loop and interleave until the cost of the 5817 // loop overhead is about 5% of the cost of the loop. 5818 unsigned SmallIC = 5819 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5820 5821 // Interleave until store/load ports (estimated by max interleave count) are 5822 // saturated. 5823 unsigned NumStores = Legal->getNumStores(); 5824 unsigned NumLoads = Legal->getNumLoads(); 5825 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5826 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5827 5828 // There is little point in interleaving for reductions containing selects 5829 // and compares when VF=1 since it may just create more overhead than it's 5830 // worth for loops with small trip counts. This is because we still have to 5831 // do the final reduction after the loop. 5832 bool HasSelectCmpReductions = 5833 HasReductions && 5834 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5835 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5836 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 5837 RdxDesc.getRecurrenceKind()); 5838 }); 5839 if (HasSelectCmpReductions) { 5840 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 5841 return 1; 5842 } 5843 5844 // If we have a scalar reduction (vector reductions are already dealt with 5845 // by this point), we can increase the critical path length if the loop 5846 // we're interleaving is inside another loop. For tree-wise reductions 5847 // set the limit to 2, and for ordered reductions it's best to disable 5848 // interleaving entirely. 5849 if (HasReductions && TheLoop->getLoopDepth() > 1) { 5850 bool HasOrderedReductions = 5851 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5852 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5853 return RdxDesc.isOrdered(); 5854 }); 5855 if (HasOrderedReductions) { 5856 LLVM_DEBUG( 5857 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 5858 return 1; 5859 } 5860 5861 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5862 SmallIC = std::min(SmallIC, F); 5863 StoresIC = std::min(StoresIC, F); 5864 LoadsIC = std::min(LoadsIC, F); 5865 } 5866 5867 if (EnableLoadStoreRuntimeInterleave && 5868 std::max(StoresIC, LoadsIC) > SmallIC) { 5869 LLVM_DEBUG( 5870 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5871 return std::max(StoresIC, LoadsIC); 5872 } 5873 5874 // If there are scalar reductions and TTI has enabled aggressive 5875 // interleaving for reductions, we will interleave to expose ILP. 5876 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 5877 AggressivelyInterleaveReductions) { 5878 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5879 // Interleave no less than SmallIC but not as aggressive as the normal IC 5880 // to satisfy the rare situation when resources are too limited. 5881 return std::max(IC / 2, SmallIC); 5882 } else { 5883 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5884 return SmallIC; 5885 } 5886 } 5887 5888 // Interleave if this is a large loop (small loops are already dealt with by 5889 // this point) that could benefit from interleaving. 5890 if (AggressivelyInterleaveReductions) { 5891 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5892 return IC; 5893 } 5894 5895 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5896 return 1; 5897 } 5898 5899 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5900 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 5901 // This function calculates the register usage by measuring the highest number 5902 // of values that are alive at a single location. Obviously, this is a very 5903 // rough estimation. We scan the loop in a topological order in order and 5904 // assign a number to each instruction. We use RPO to ensure that defs are 5905 // met before their users. We assume that each instruction that has in-loop 5906 // users starts an interval. We record every time that an in-loop value is 5907 // used, so we have a list of the first and last occurrences of each 5908 // instruction. Next, we transpose this data structure into a multi map that 5909 // holds the list of intervals that *end* at a specific location. This multi 5910 // map allows us to perform a linear search. We scan the instructions linearly 5911 // and record each time that a new interval starts, by placing it in a set. 5912 // If we find this value in the multi-map then we remove it from the set. 5913 // The max register usage is the maximum size of the set. 5914 // We also search for instructions that are defined outside the loop, but are 5915 // used inside the loop. We need this number separately from the max-interval 5916 // usage number because when we unroll, loop-invariant values do not take 5917 // more register. 5918 LoopBlocksDFS DFS(TheLoop); 5919 DFS.perform(LI); 5920 5921 RegisterUsage RU; 5922 5923 // Each 'key' in the map opens a new interval. The values 5924 // of the map are the index of the 'last seen' usage of the 5925 // instruction that is the key. 5926 using IntervalMap = DenseMap<Instruction *, unsigned>; 5927 5928 // Maps instruction to its index. 5929 SmallVector<Instruction *, 64> IdxToInstr; 5930 // Marks the end of each interval. 5931 IntervalMap EndPoint; 5932 // Saves the list of instruction indices that are used in the loop. 5933 SmallPtrSet<Instruction *, 8> Ends; 5934 // Saves the list of values that are used in the loop but are 5935 // defined outside the loop, such as arguments and constants. 5936 SmallPtrSet<Value *, 8> LoopInvariants; 5937 5938 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5939 for (Instruction &I : BB->instructionsWithoutDebug()) { 5940 IdxToInstr.push_back(&I); 5941 5942 // Save the end location of each USE. 5943 for (Value *U : I.operands()) { 5944 auto *Instr = dyn_cast<Instruction>(U); 5945 5946 // Ignore non-instruction values such as arguments, constants, etc. 5947 if (!Instr) 5948 continue; 5949 5950 // If this instruction is outside the loop then record it and continue. 5951 if (!TheLoop->contains(Instr)) { 5952 LoopInvariants.insert(Instr); 5953 continue; 5954 } 5955 5956 // Overwrite previous end points. 5957 EndPoint[Instr] = IdxToInstr.size(); 5958 Ends.insert(Instr); 5959 } 5960 } 5961 } 5962 5963 // Saves the list of intervals that end with the index in 'key'. 5964 using InstrList = SmallVector<Instruction *, 2>; 5965 DenseMap<unsigned, InstrList> TransposeEnds; 5966 5967 // Transpose the EndPoints to a list of values that end at each index. 5968 for (auto &Interval : EndPoint) 5969 TransposeEnds[Interval.second].push_back(Interval.first); 5970 5971 SmallPtrSet<Instruction *, 8> OpenIntervals; 5972 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5973 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 5974 5975 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5976 5977 // A lambda that gets the register usage for the given type and VF. 5978 const auto &TTICapture = TTI; 5979 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 5980 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 5981 return 0; 5982 InstructionCost::CostType RegUsage = 5983 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 5984 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 5985 "Nonsensical values for register usage."); 5986 return RegUsage; 5987 }; 5988 5989 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 5990 Instruction *I = IdxToInstr[i]; 5991 5992 // Remove all of the instructions that end at this location. 5993 InstrList &List = TransposeEnds[i]; 5994 for (Instruction *ToRemove : List) 5995 OpenIntervals.erase(ToRemove); 5996 5997 // Ignore instructions that are never used within the loop. 5998 if (!Ends.count(I)) 5999 continue; 6000 6001 // Skip ignored values. 6002 if (ValuesToIgnore.count(I)) 6003 continue; 6004 6005 // For each VF find the maximum usage of registers. 6006 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6007 // Count the number of live intervals. 6008 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6009 6010 if (VFs[j].isScalar()) { 6011 for (auto Inst : OpenIntervals) { 6012 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6013 if (RegUsage.find(ClassID) == RegUsage.end()) 6014 RegUsage[ClassID] = 1; 6015 else 6016 RegUsage[ClassID] += 1; 6017 } 6018 } else { 6019 collectUniformsAndScalars(VFs[j]); 6020 for (auto Inst : OpenIntervals) { 6021 // Skip ignored values for VF > 1. 6022 if (VecValuesToIgnore.count(Inst)) 6023 continue; 6024 if (isScalarAfterVectorization(Inst, VFs[j])) { 6025 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6026 if (RegUsage.find(ClassID) == RegUsage.end()) 6027 RegUsage[ClassID] = 1; 6028 else 6029 RegUsage[ClassID] += 1; 6030 } else { 6031 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6032 if (RegUsage.find(ClassID) == RegUsage.end()) 6033 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6034 else 6035 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6036 } 6037 } 6038 } 6039 6040 for (auto& pair : RegUsage) { 6041 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6042 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6043 else 6044 MaxUsages[j][pair.first] = pair.second; 6045 } 6046 } 6047 6048 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6049 << OpenIntervals.size() << '\n'); 6050 6051 // Add the current instruction to the list of open intervals. 6052 OpenIntervals.insert(I); 6053 } 6054 6055 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6056 SmallMapVector<unsigned, unsigned, 4> Invariant; 6057 6058 for (auto Inst : LoopInvariants) { 6059 unsigned Usage = 6060 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6061 unsigned ClassID = 6062 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6063 if (Invariant.find(ClassID) == Invariant.end()) 6064 Invariant[ClassID] = Usage; 6065 else 6066 Invariant[ClassID] += Usage; 6067 } 6068 6069 LLVM_DEBUG({ 6070 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6071 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6072 << " item\n"; 6073 for (const auto &pair : MaxUsages[i]) { 6074 dbgs() << "LV(REG): RegisterClass: " 6075 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6076 << " registers\n"; 6077 } 6078 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6079 << " item\n"; 6080 for (const auto &pair : Invariant) { 6081 dbgs() << "LV(REG): RegisterClass: " 6082 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6083 << " registers\n"; 6084 } 6085 }); 6086 6087 RU.LoopInvariantRegs = Invariant; 6088 RU.MaxLocalUsers = MaxUsages[i]; 6089 RUs[i] = RU; 6090 } 6091 6092 return RUs; 6093 } 6094 6095 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I, 6096 ElementCount VF) { 6097 // TODO: Cost model for emulated masked load/store is completely 6098 // broken. This hack guides the cost model to use an artificially 6099 // high enough value to practically disable vectorization with such 6100 // operations, except where previously deployed legality hack allowed 6101 // using very low cost values. This is to avoid regressions coming simply 6102 // from moving "masked load/store" check from legality to cost model. 6103 // Masked Load/Gather emulation was previously never allowed. 6104 // Limited number of Masked Store/Scatter emulation was allowed. 6105 assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction"); 6106 return isa<LoadInst>(I) || 6107 (isa<StoreInst>(I) && 6108 NumPredStores > NumberOfStoresToPredicate); 6109 } 6110 6111 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6112 // If we aren't vectorizing the loop, or if we've already collected the 6113 // instructions to scalarize, there's nothing to do. Collection may already 6114 // have occurred if we have a user-selected VF and are now computing the 6115 // expected cost for interleaving. 6116 if (VF.isScalar() || VF.isZero() || 6117 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6118 return; 6119 6120 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6121 // not profitable to scalarize any instructions, the presence of VF in the 6122 // map will indicate that we've analyzed it already. 6123 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6124 6125 // Find all the instructions that are scalar with predication in the loop and 6126 // determine if it would be better to not if-convert the blocks they are in. 6127 // If so, we also record the instructions to scalarize. 6128 for (BasicBlock *BB : TheLoop->blocks()) { 6129 if (!blockNeedsPredicationForAnyReason(BB)) 6130 continue; 6131 for (Instruction &I : *BB) 6132 if (isScalarWithPredication(&I, VF)) { 6133 ScalarCostsTy ScalarCosts; 6134 // Do not apply discount if scalable, because that would lead to 6135 // invalid scalarization costs. 6136 // Do not apply discount logic if hacked cost is needed 6137 // for emulated masked memrefs. 6138 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) && 6139 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6140 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6141 // Remember that BB will remain after vectorization. 6142 PredicatedBBsAfterVectorization.insert(BB); 6143 } 6144 } 6145 } 6146 6147 int LoopVectorizationCostModel::computePredInstDiscount( 6148 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6149 assert(!isUniformAfterVectorization(PredInst, VF) && 6150 "Instruction marked uniform-after-vectorization will be predicated"); 6151 6152 // Initialize the discount to zero, meaning that the scalar version and the 6153 // vector version cost the same. 6154 InstructionCost Discount = 0; 6155 6156 // Holds instructions to analyze. The instructions we visit are mapped in 6157 // ScalarCosts. Those instructions are the ones that would be scalarized if 6158 // we find that the scalar version costs less. 6159 SmallVector<Instruction *, 8> Worklist; 6160 6161 // Returns true if the given instruction can be scalarized. 6162 auto canBeScalarized = [&](Instruction *I) -> bool { 6163 // We only attempt to scalarize instructions forming a single-use chain 6164 // from the original predicated block that would otherwise be vectorized. 6165 // Although not strictly necessary, we give up on instructions we know will 6166 // already be scalar to avoid traversing chains that are unlikely to be 6167 // beneficial. 6168 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6169 isScalarAfterVectorization(I, VF)) 6170 return false; 6171 6172 // If the instruction is scalar with predication, it will be analyzed 6173 // separately. We ignore it within the context of PredInst. 6174 if (isScalarWithPredication(I, VF)) 6175 return false; 6176 6177 // If any of the instruction's operands are uniform after vectorization, 6178 // the instruction cannot be scalarized. This prevents, for example, a 6179 // masked load from being scalarized. 6180 // 6181 // We assume we will only emit a value for lane zero of an instruction 6182 // marked uniform after vectorization, rather than VF identical values. 6183 // Thus, if we scalarize an instruction that uses a uniform, we would 6184 // create uses of values corresponding to the lanes we aren't emitting code 6185 // for. This behavior can be changed by allowing getScalarValue to clone 6186 // the lane zero values for uniforms rather than asserting. 6187 for (Use &U : I->operands()) 6188 if (auto *J = dyn_cast<Instruction>(U.get())) 6189 if (isUniformAfterVectorization(J, VF)) 6190 return false; 6191 6192 // Otherwise, we can scalarize the instruction. 6193 return true; 6194 }; 6195 6196 // Compute the expected cost discount from scalarizing the entire expression 6197 // feeding the predicated instruction. We currently only consider expressions 6198 // that are single-use instruction chains. 6199 Worklist.push_back(PredInst); 6200 while (!Worklist.empty()) { 6201 Instruction *I = Worklist.pop_back_val(); 6202 6203 // If we've already analyzed the instruction, there's nothing to do. 6204 if (ScalarCosts.find(I) != ScalarCosts.end()) 6205 continue; 6206 6207 // Compute the cost of the vector instruction. Note that this cost already 6208 // includes the scalarization overhead of the predicated instruction. 6209 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6210 6211 // Compute the cost of the scalarized instruction. This cost is the cost of 6212 // the instruction as if it wasn't if-converted and instead remained in the 6213 // predicated block. We will scale this cost by block probability after 6214 // computing the scalarization overhead. 6215 InstructionCost ScalarCost = 6216 VF.getFixedValue() * 6217 getInstructionCost(I, ElementCount::getFixed(1)).first; 6218 6219 // Compute the scalarization overhead of needed insertelement instructions 6220 // and phi nodes. 6221 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) { 6222 ScalarCost += TTI.getScalarizationOverhead( 6223 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6224 APInt::getAllOnes(VF.getFixedValue()), true, false); 6225 ScalarCost += 6226 VF.getFixedValue() * 6227 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6228 } 6229 6230 // Compute the scalarization overhead of needed extractelement 6231 // instructions. For each of the instruction's operands, if the operand can 6232 // be scalarized, add it to the worklist; otherwise, account for the 6233 // overhead. 6234 for (Use &U : I->operands()) 6235 if (auto *J = dyn_cast<Instruction>(U.get())) { 6236 assert(VectorType::isValidElementType(J->getType()) && 6237 "Instruction has non-scalar type"); 6238 if (canBeScalarized(J)) 6239 Worklist.push_back(J); 6240 else if (needsExtract(J, VF)) { 6241 ScalarCost += TTI.getScalarizationOverhead( 6242 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6243 APInt::getAllOnes(VF.getFixedValue()), false, true); 6244 } 6245 } 6246 6247 // Scale the total scalar cost by block probability. 6248 ScalarCost /= getReciprocalPredBlockProb(); 6249 6250 // Compute the discount. A non-negative discount means the vector version 6251 // of the instruction costs more, and scalarizing would be beneficial. 6252 Discount += VectorCost - ScalarCost; 6253 ScalarCosts[I] = ScalarCost; 6254 } 6255 6256 return *Discount.getValue(); 6257 } 6258 6259 LoopVectorizationCostModel::VectorizationCostTy 6260 LoopVectorizationCostModel::expectedCost( 6261 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6262 VectorizationCostTy Cost; 6263 6264 // For each block. 6265 for (BasicBlock *BB : TheLoop->blocks()) { 6266 VectorizationCostTy BlockCost; 6267 6268 // For each instruction in the old loop. 6269 for (Instruction &I : BB->instructionsWithoutDebug()) { 6270 // Skip ignored values. 6271 if (ValuesToIgnore.count(&I) || 6272 (VF.isVector() && VecValuesToIgnore.count(&I))) 6273 continue; 6274 6275 VectorizationCostTy C = getInstructionCost(&I, VF); 6276 6277 // Check if we should override the cost. 6278 if (C.first.isValid() && 6279 ForceTargetInstructionCost.getNumOccurrences() > 0) 6280 C.first = InstructionCost(ForceTargetInstructionCost); 6281 6282 // Keep a list of instructions with invalid costs. 6283 if (Invalid && !C.first.isValid()) 6284 Invalid->emplace_back(&I, VF); 6285 6286 BlockCost.first += C.first; 6287 BlockCost.second |= C.second; 6288 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6289 << " for VF " << VF << " For instruction: " << I 6290 << '\n'); 6291 } 6292 6293 // If we are vectorizing a predicated block, it will have been 6294 // if-converted. This means that the block's instructions (aside from 6295 // stores and instructions that may divide by zero) will now be 6296 // unconditionally executed. For the scalar case, we may not always execute 6297 // the predicated block, if it is an if-else block. Thus, scale the block's 6298 // cost by the probability of executing it. blockNeedsPredication from 6299 // Legal is used so as to not include all blocks in tail folded loops. 6300 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6301 BlockCost.first /= getReciprocalPredBlockProb(); 6302 6303 Cost.first += BlockCost.first; 6304 Cost.second |= BlockCost.second; 6305 } 6306 6307 return Cost; 6308 } 6309 6310 /// Gets Address Access SCEV after verifying that the access pattern 6311 /// is loop invariant except the induction variable dependence. 6312 /// 6313 /// This SCEV can be sent to the Target in order to estimate the address 6314 /// calculation cost. 6315 static const SCEV *getAddressAccessSCEV( 6316 Value *Ptr, 6317 LoopVectorizationLegality *Legal, 6318 PredicatedScalarEvolution &PSE, 6319 const Loop *TheLoop) { 6320 6321 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6322 if (!Gep) 6323 return nullptr; 6324 6325 // We are looking for a gep with all loop invariant indices except for one 6326 // which should be an induction variable. 6327 auto SE = PSE.getSE(); 6328 unsigned NumOperands = Gep->getNumOperands(); 6329 for (unsigned i = 1; i < NumOperands; ++i) { 6330 Value *Opd = Gep->getOperand(i); 6331 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6332 !Legal->isInductionVariable(Opd)) 6333 return nullptr; 6334 } 6335 6336 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6337 return PSE.getSCEV(Ptr); 6338 } 6339 6340 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6341 return Legal->hasStride(I->getOperand(0)) || 6342 Legal->hasStride(I->getOperand(1)); 6343 } 6344 6345 InstructionCost 6346 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6347 ElementCount VF) { 6348 assert(VF.isVector() && 6349 "Scalarization cost of instruction implies vectorization."); 6350 if (VF.isScalable()) 6351 return InstructionCost::getInvalid(); 6352 6353 Type *ValTy = getLoadStoreType(I); 6354 auto SE = PSE.getSE(); 6355 6356 unsigned AS = getLoadStoreAddressSpace(I); 6357 Value *Ptr = getLoadStorePointerOperand(I); 6358 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6359 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost` 6360 // that it is being called from this specific place. 6361 6362 // Figure out whether the access is strided and get the stride value 6363 // if it's known in compile time 6364 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6365 6366 // Get the cost of the scalar memory instruction and address computation. 6367 InstructionCost Cost = 6368 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6369 6370 // Don't pass *I here, since it is scalar but will actually be part of a 6371 // vectorized loop where the user of it is a vectorized instruction. 6372 const Align Alignment = getLoadStoreAlignment(I); 6373 Cost += VF.getKnownMinValue() * 6374 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6375 AS, TTI::TCK_RecipThroughput); 6376 6377 // Get the overhead of the extractelement and insertelement instructions 6378 // we might create due to scalarization. 6379 Cost += getScalarizationOverhead(I, VF); 6380 6381 // If we have a predicated load/store, it will need extra i1 extracts and 6382 // conditional branches, but may not be executed for each vector lane. Scale 6383 // the cost by the probability of executing the predicated block. 6384 if (isPredicatedInst(I, VF)) { 6385 Cost /= getReciprocalPredBlockProb(); 6386 6387 // Add the cost of an i1 extract and a branch 6388 auto *Vec_i1Ty = 6389 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6390 Cost += TTI.getScalarizationOverhead( 6391 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 6392 /*Insert=*/false, /*Extract=*/true); 6393 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6394 6395 if (useEmulatedMaskMemRefHack(I, VF)) 6396 // Artificially setting to a high enough value to practically disable 6397 // vectorization with such operations. 6398 Cost = 3000000; 6399 } 6400 6401 return Cost; 6402 } 6403 6404 InstructionCost 6405 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6406 ElementCount VF) { 6407 Type *ValTy = getLoadStoreType(I); 6408 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6409 Value *Ptr = getLoadStorePointerOperand(I); 6410 unsigned AS = getLoadStoreAddressSpace(I); 6411 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 6412 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6413 6414 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6415 "Stride should be 1 or -1 for consecutive memory access"); 6416 const Align Alignment = getLoadStoreAlignment(I); 6417 InstructionCost Cost = 0; 6418 if (Legal->isMaskRequired(I)) 6419 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6420 CostKind); 6421 else 6422 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6423 CostKind, I); 6424 6425 bool Reverse = ConsecutiveStride < 0; 6426 if (Reverse) 6427 Cost += 6428 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6429 return Cost; 6430 } 6431 6432 InstructionCost 6433 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6434 ElementCount VF) { 6435 assert(Legal->isUniformMemOp(*I)); 6436 6437 Type *ValTy = getLoadStoreType(I); 6438 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6439 const Align Alignment = getLoadStoreAlignment(I); 6440 unsigned AS = getLoadStoreAddressSpace(I); 6441 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6442 if (isa<LoadInst>(I)) { 6443 return TTI.getAddressComputationCost(ValTy) + 6444 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6445 CostKind) + 6446 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6447 } 6448 StoreInst *SI = cast<StoreInst>(I); 6449 6450 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6451 return TTI.getAddressComputationCost(ValTy) + 6452 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6453 CostKind) + 6454 (isLoopInvariantStoreValue 6455 ? 0 6456 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6457 VF.getKnownMinValue() - 1)); 6458 } 6459 6460 InstructionCost 6461 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6462 ElementCount VF) { 6463 Type *ValTy = getLoadStoreType(I); 6464 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6465 const Align Alignment = getLoadStoreAlignment(I); 6466 const Value *Ptr = getLoadStorePointerOperand(I); 6467 6468 return TTI.getAddressComputationCost(VectorTy) + 6469 TTI.getGatherScatterOpCost( 6470 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6471 TargetTransformInfo::TCK_RecipThroughput, I); 6472 } 6473 6474 InstructionCost 6475 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6476 ElementCount VF) { 6477 // TODO: Once we have support for interleaving with scalable vectors 6478 // we can calculate the cost properly here. 6479 if (VF.isScalable()) 6480 return InstructionCost::getInvalid(); 6481 6482 Type *ValTy = getLoadStoreType(I); 6483 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6484 unsigned AS = getLoadStoreAddressSpace(I); 6485 6486 auto Group = getInterleavedAccessGroup(I); 6487 assert(Group && "Fail to get an interleaved access group."); 6488 6489 unsigned InterleaveFactor = Group->getFactor(); 6490 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6491 6492 // Holds the indices of existing members in the interleaved group. 6493 SmallVector<unsigned, 4> Indices; 6494 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 6495 if (Group->getMember(IF)) 6496 Indices.push_back(IF); 6497 6498 // Calculate the cost of the whole interleaved group. 6499 bool UseMaskForGaps = 6500 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 6501 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 6502 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6503 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6504 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6505 6506 if (Group->isReverse()) { 6507 // TODO: Add support for reversed masked interleaved access. 6508 assert(!Legal->isMaskRequired(I) && 6509 "Reverse masked interleaved access not supported."); 6510 Cost += 6511 Group->getNumMembers() * 6512 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6513 } 6514 return Cost; 6515 } 6516 6517 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 6518 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6519 using namespace llvm::PatternMatch; 6520 // Early exit for no inloop reductions 6521 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6522 return None; 6523 auto *VectorTy = cast<VectorType>(Ty); 6524 6525 // We are looking for a pattern of, and finding the minimal acceptable cost: 6526 // reduce(mul(ext(A), ext(B))) or 6527 // reduce(mul(A, B)) or 6528 // reduce(ext(A)) or 6529 // reduce(A). 6530 // The basic idea is that we walk down the tree to do that, finding the root 6531 // reduction instruction in InLoopReductionImmediateChains. From there we find 6532 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6533 // of the components. If the reduction cost is lower then we return it for the 6534 // reduction instruction and 0 for the other instructions in the pattern. If 6535 // it is not we return an invalid cost specifying the orignal cost method 6536 // should be used. 6537 Instruction *RetI = I; 6538 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 6539 if (!RetI->hasOneUser()) 6540 return None; 6541 RetI = RetI->user_back(); 6542 } 6543 if (match(RetI, m_Mul(m_Value(), m_Value())) && 6544 RetI->user_back()->getOpcode() == Instruction::Add) { 6545 if (!RetI->hasOneUser()) 6546 return None; 6547 RetI = RetI->user_back(); 6548 } 6549 6550 // Test if the found instruction is a reduction, and if not return an invalid 6551 // cost specifying the parent to use the original cost modelling. 6552 if (!InLoopReductionImmediateChains.count(RetI)) 6553 return None; 6554 6555 // Find the reduction this chain is a part of and calculate the basic cost of 6556 // the reduction on its own. 6557 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6558 Instruction *ReductionPhi = LastChain; 6559 while (!isa<PHINode>(ReductionPhi)) 6560 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6561 6562 const RecurrenceDescriptor &RdxDesc = 6563 Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second; 6564 6565 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 6566 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 6567 6568 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a 6569 // normal fmul instruction to the cost of the fadd reduction. 6570 if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd) 6571 BaseCost += 6572 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind); 6573 6574 // If we're using ordered reductions then we can just return the base cost 6575 // here, since getArithmeticReductionCost calculates the full ordered 6576 // reduction cost when FP reassociation is not allowed. 6577 if (useOrderedReductions(RdxDesc)) 6578 return BaseCost; 6579 6580 // Get the operand that was not the reduction chain and match it to one of the 6581 // patterns, returning the better cost if it is found. 6582 Instruction *RedOp = RetI->getOperand(1) == LastChain 6583 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6584 : dyn_cast<Instruction>(RetI->getOperand(1)); 6585 6586 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6587 6588 Instruction *Op0, *Op1; 6589 if (RedOp && 6590 match(RedOp, 6591 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 6592 match(Op0, m_ZExtOrSExt(m_Value())) && 6593 Op0->getOpcode() == Op1->getOpcode() && 6594 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6595 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 6596 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 6597 6598 // Matched reduce(ext(mul(ext(A), ext(B))) 6599 // Note that the extend opcodes need to all match, or if A==B they will have 6600 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 6601 // which is equally fine. 6602 bool IsUnsigned = isa<ZExtInst>(Op0); 6603 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6604 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 6605 6606 InstructionCost ExtCost = 6607 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 6608 TTI::CastContextHint::None, CostKind, Op0); 6609 InstructionCost MulCost = 6610 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 6611 InstructionCost Ext2Cost = 6612 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 6613 TTI::CastContextHint::None, CostKind, RedOp); 6614 6615 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6616 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6617 CostKind); 6618 6619 if (RedCost.isValid() && 6620 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 6621 return I == RetI ? RedCost : 0; 6622 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 6623 !TheLoop->isLoopInvariant(RedOp)) { 6624 // Matched reduce(ext(A)) 6625 bool IsUnsigned = isa<ZExtInst>(RedOp); 6626 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6627 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6628 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6629 CostKind); 6630 6631 InstructionCost ExtCost = 6632 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6633 TTI::CastContextHint::None, CostKind, RedOp); 6634 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6635 return I == RetI ? RedCost : 0; 6636 } else if (RedOp && 6637 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 6638 if (match(Op0, m_ZExtOrSExt(m_Value())) && 6639 Op0->getOpcode() == Op1->getOpcode() && 6640 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 6641 bool IsUnsigned = isa<ZExtInst>(Op0); 6642 Type *Op0Ty = Op0->getOperand(0)->getType(); 6643 Type *Op1Ty = Op1->getOperand(0)->getType(); 6644 Type *LargestOpTy = 6645 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty 6646 : Op0Ty; 6647 auto *ExtType = VectorType::get(LargestOpTy, VectorTy); 6648 6649 // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of 6650 // different sizes. We take the largest type as the ext to reduce, and add 6651 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))). 6652 InstructionCost ExtCost0 = TTI.getCastInstrCost( 6653 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy), 6654 TTI::CastContextHint::None, CostKind, Op0); 6655 InstructionCost ExtCost1 = TTI.getCastInstrCost( 6656 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy), 6657 TTI::CastContextHint::None, CostKind, Op1); 6658 InstructionCost MulCost = 6659 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6660 6661 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6662 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6663 CostKind); 6664 InstructionCost ExtraExtCost = 0; 6665 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) { 6666 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1; 6667 ExtraExtCost = TTI.getCastInstrCost( 6668 ExtraExtOp->getOpcode(), ExtType, 6669 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy), 6670 TTI::CastContextHint::None, CostKind, ExtraExtOp); 6671 } 6672 6673 if (RedCost.isValid() && 6674 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost)) 6675 return I == RetI ? RedCost : 0; 6676 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 6677 // Matched reduce(mul()) 6678 InstructionCost MulCost = 6679 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6680 6681 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6682 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 6683 CostKind); 6684 6685 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 6686 return I == RetI ? RedCost : 0; 6687 } 6688 } 6689 6690 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 6691 } 6692 6693 InstructionCost 6694 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6695 ElementCount VF) { 6696 // Calculate scalar cost only. Vectorization cost should be ready at this 6697 // moment. 6698 if (VF.isScalar()) { 6699 Type *ValTy = getLoadStoreType(I); 6700 const Align Alignment = getLoadStoreAlignment(I); 6701 unsigned AS = getLoadStoreAddressSpace(I); 6702 6703 return TTI.getAddressComputationCost(ValTy) + 6704 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 6705 TTI::TCK_RecipThroughput, I); 6706 } 6707 return getWideningCost(I, VF); 6708 } 6709 6710 LoopVectorizationCostModel::VectorizationCostTy 6711 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6712 ElementCount VF) { 6713 // If we know that this instruction will remain uniform, check the cost of 6714 // the scalar version. 6715 if (isUniformAfterVectorization(I, VF)) 6716 VF = ElementCount::getFixed(1); 6717 6718 if (VF.isVector() && isProfitableToScalarize(I, VF)) 6719 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6720 6721 // Forced scalars do not have any scalarization overhead. 6722 auto ForcedScalar = ForcedScalars.find(VF); 6723 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 6724 auto InstSet = ForcedScalar->second; 6725 if (InstSet.count(I)) 6726 return VectorizationCostTy( 6727 (getInstructionCost(I, ElementCount::getFixed(1)).first * 6728 VF.getKnownMinValue()), 6729 false); 6730 } 6731 6732 Type *VectorTy; 6733 InstructionCost C = getInstructionCost(I, VF, VectorTy); 6734 6735 bool TypeNotScalarized = false; 6736 if (VF.isVector() && VectorTy->isVectorTy()) { 6737 unsigned NumParts = TTI.getNumberOfParts(VectorTy); 6738 if (NumParts) 6739 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 6740 else 6741 C = InstructionCost::getInvalid(); 6742 } 6743 return VectorizationCostTy(C, TypeNotScalarized); 6744 } 6745 6746 InstructionCost 6747 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6748 ElementCount VF) const { 6749 6750 // There is no mechanism yet to create a scalable scalarization loop, 6751 // so this is currently Invalid. 6752 if (VF.isScalable()) 6753 return InstructionCost::getInvalid(); 6754 6755 if (VF.isScalar()) 6756 return 0; 6757 6758 InstructionCost Cost = 0; 6759 Type *RetTy = ToVectorTy(I->getType(), VF); 6760 if (!RetTy->isVoidTy() && 6761 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 6762 Cost += TTI.getScalarizationOverhead( 6763 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 6764 false); 6765 6766 // Some targets keep addresses scalar. 6767 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 6768 return Cost; 6769 6770 // Some targets support efficient element stores. 6771 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 6772 return Cost; 6773 6774 // Collect operands to consider. 6775 CallInst *CI = dyn_cast<CallInst>(I); 6776 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 6777 6778 // Skip operands that do not require extraction/scalarization and do not incur 6779 // any overhead. 6780 SmallVector<Type *> Tys; 6781 for (auto *V : filterExtractingOperands(Ops, VF)) 6782 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 6783 return Cost + TTI.getOperandsScalarizationOverhead( 6784 filterExtractingOperands(Ops, VF), Tys); 6785 } 6786 6787 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 6788 if (VF.isScalar()) 6789 return; 6790 NumPredStores = 0; 6791 for (BasicBlock *BB : TheLoop->blocks()) { 6792 // For each instruction in the old loop. 6793 for (Instruction &I : *BB) { 6794 Value *Ptr = getLoadStorePointerOperand(&I); 6795 if (!Ptr) 6796 continue; 6797 6798 // TODO: We should generate better code and update the cost model for 6799 // predicated uniform stores. Today they are treated as any other 6800 // predicated store (see added test cases in 6801 // invariant-store-vectorization.ll). 6802 if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF)) 6803 NumPredStores++; 6804 6805 if (Legal->isUniformMemOp(I)) { 6806 // TODO: Avoid replicating loads and stores instead of 6807 // relying on instcombine to remove them. 6808 // Load: Scalar load + broadcast 6809 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 6810 InstructionCost Cost; 6811 if (isa<StoreInst>(&I) && VF.isScalable() && 6812 isLegalGatherOrScatter(&I, VF)) { 6813 Cost = getGatherScatterCost(&I, VF); 6814 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 6815 } else { 6816 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 6817 "Cannot yet scalarize uniform stores"); 6818 Cost = getUniformMemOpCost(&I, VF); 6819 setWideningDecision(&I, VF, CM_Scalarize, Cost); 6820 } 6821 continue; 6822 } 6823 6824 // We assume that widening is the best solution when possible. 6825 if (memoryInstructionCanBeWidened(&I, VF)) { 6826 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 6827 int ConsecutiveStride = Legal->isConsecutivePtr( 6828 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 6829 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6830 "Expected consecutive stride."); 6831 InstWidening Decision = 6832 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 6833 setWideningDecision(&I, VF, Decision, Cost); 6834 continue; 6835 } 6836 6837 // Choose between Interleaving, Gather/Scatter or Scalarization. 6838 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 6839 unsigned NumAccesses = 1; 6840 if (isAccessInterleaved(&I)) { 6841 auto Group = getInterleavedAccessGroup(&I); 6842 assert(Group && "Fail to get an interleaved access group."); 6843 6844 // Make one decision for the whole group. 6845 if (getWideningDecision(&I, VF) != CM_Unknown) 6846 continue; 6847 6848 NumAccesses = Group->getNumMembers(); 6849 if (interleavedAccessCanBeWidened(&I, VF)) 6850 InterleaveCost = getInterleaveGroupCost(&I, VF); 6851 } 6852 6853 InstructionCost GatherScatterCost = 6854 isLegalGatherOrScatter(&I, VF) 6855 ? getGatherScatterCost(&I, VF) * NumAccesses 6856 : InstructionCost::getInvalid(); 6857 6858 InstructionCost ScalarizationCost = 6859 getMemInstScalarizationCost(&I, VF) * NumAccesses; 6860 6861 // Choose better solution for the current VF, 6862 // write down this decision and use it during vectorization. 6863 InstructionCost Cost; 6864 InstWidening Decision; 6865 if (InterleaveCost <= GatherScatterCost && 6866 InterleaveCost < ScalarizationCost) { 6867 Decision = CM_Interleave; 6868 Cost = InterleaveCost; 6869 } else if (GatherScatterCost < ScalarizationCost) { 6870 Decision = CM_GatherScatter; 6871 Cost = GatherScatterCost; 6872 } else { 6873 Decision = CM_Scalarize; 6874 Cost = ScalarizationCost; 6875 } 6876 // If the instructions belongs to an interleave group, the whole group 6877 // receives the same decision. The whole group receives the cost, but 6878 // the cost will actually be assigned to one instruction. 6879 if (auto Group = getInterleavedAccessGroup(&I)) 6880 setWideningDecision(Group, VF, Decision, Cost); 6881 else 6882 setWideningDecision(&I, VF, Decision, Cost); 6883 } 6884 } 6885 6886 // Make sure that any load of address and any other address computation 6887 // remains scalar unless there is gather/scatter support. This avoids 6888 // inevitable extracts into address registers, and also has the benefit of 6889 // activating LSR more, since that pass can't optimize vectorized 6890 // addresses. 6891 if (TTI.prefersVectorizedAddressing()) 6892 return; 6893 6894 // Start with all scalar pointer uses. 6895 SmallPtrSet<Instruction *, 8> AddrDefs; 6896 for (BasicBlock *BB : TheLoop->blocks()) 6897 for (Instruction &I : *BB) { 6898 Instruction *PtrDef = 6899 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 6900 if (PtrDef && TheLoop->contains(PtrDef) && 6901 getWideningDecision(&I, VF) != CM_GatherScatter) 6902 AddrDefs.insert(PtrDef); 6903 } 6904 6905 // Add all instructions used to generate the addresses. 6906 SmallVector<Instruction *, 4> Worklist; 6907 append_range(Worklist, AddrDefs); 6908 while (!Worklist.empty()) { 6909 Instruction *I = Worklist.pop_back_val(); 6910 for (auto &Op : I->operands()) 6911 if (auto *InstOp = dyn_cast<Instruction>(Op)) 6912 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 6913 AddrDefs.insert(InstOp).second) 6914 Worklist.push_back(InstOp); 6915 } 6916 6917 for (auto *I : AddrDefs) { 6918 if (isa<LoadInst>(I)) { 6919 // Setting the desired widening decision should ideally be handled in 6920 // by cost functions, but since this involves the task of finding out 6921 // if the loaded register is involved in an address computation, it is 6922 // instead changed here when we know this is the case. 6923 InstWidening Decision = getWideningDecision(I, VF); 6924 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 6925 // Scalarize a widened load of address. 6926 setWideningDecision( 6927 I, VF, CM_Scalarize, 6928 (VF.getKnownMinValue() * 6929 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 6930 else if (auto Group = getInterleavedAccessGroup(I)) { 6931 // Scalarize an interleave group of address loads. 6932 for (unsigned I = 0; I < Group->getFactor(); ++I) { 6933 if (Instruction *Member = Group->getMember(I)) 6934 setWideningDecision( 6935 Member, VF, CM_Scalarize, 6936 (VF.getKnownMinValue() * 6937 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 6938 } 6939 } 6940 } else 6941 // Make sure I gets scalarized and a cost estimate without 6942 // scalarization overhead. 6943 ForcedScalars[VF].insert(I); 6944 } 6945 } 6946 6947 InstructionCost 6948 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 6949 Type *&VectorTy) { 6950 Type *RetTy = I->getType(); 6951 if (canTruncateToMinimalBitwidth(I, VF)) 6952 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6953 auto SE = PSE.getSE(); 6954 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6955 6956 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 6957 ElementCount VF) -> bool { 6958 if (VF.isScalar()) 6959 return true; 6960 6961 auto Scalarized = InstsToScalarize.find(VF); 6962 assert(Scalarized != InstsToScalarize.end() && 6963 "VF not yet analyzed for scalarization profitability"); 6964 return !Scalarized->second.count(I) && 6965 llvm::all_of(I->users(), [&](User *U) { 6966 auto *UI = cast<Instruction>(U); 6967 return !Scalarized->second.count(UI); 6968 }); 6969 }; 6970 (void) hasSingleCopyAfterVectorization; 6971 6972 if (isScalarAfterVectorization(I, VF)) { 6973 // With the exception of GEPs and PHIs, after scalarization there should 6974 // only be one copy of the instruction generated in the loop. This is 6975 // because the VF is either 1, or any instructions that need scalarizing 6976 // have already been dealt with by the the time we get here. As a result, 6977 // it means we don't have to multiply the instruction cost by VF. 6978 assert(I->getOpcode() == Instruction::GetElementPtr || 6979 I->getOpcode() == Instruction::PHI || 6980 (I->getOpcode() == Instruction::BitCast && 6981 I->getType()->isPointerTy()) || 6982 hasSingleCopyAfterVectorization(I, VF)); 6983 VectorTy = RetTy; 6984 } else 6985 VectorTy = ToVectorTy(RetTy, VF); 6986 6987 // TODO: We need to estimate the cost of intrinsic calls. 6988 switch (I->getOpcode()) { 6989 case Instruction::GetElementPtr: 6990 // We mark this instruction as zero-cost because the cost of GEPs in 6991 // vectorized code depends on whether the corresponding memory instruction 6992 // is scalarized or not. Therefore, we handle GEPs with the memory 6993 // instruction cost. 6994 return 0; 6995 case Instruction::Br: { 6996 // In cases of scalarized and predicated instructions, there will be VF 6997 // predicated blocks in the vectorized loop. Each branch around these 6998 // blocks requires also an extract of its vector compare i1 element. 6999 bool ScalarPredicatedBB = false; 7000 BranchInst *BI = cast<BranchInst>(I); 7001 if (VF.isVector() && BI->isConditional() && 7002 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7003 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7004 ScalarPredicatedBB = true; 7005 7006 if (ScalarPredicatedBB) { 7007 // Not possible to scalarize scalable vector with predicated instructions. 7008 if (VF.isScalable()) 7009 return InstructionCost::getInvalid(); 7010 // Return cost for branches around scalarized and predicated blocks. 7011 auto *Vec_i1Ty = 7012 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7013 return ( 7014 TTI.getScalarizationOverhead( 7015 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7016 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7017 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7018 // The back-edge branch will remain, as will all scalar branches. 7019 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7020 else 7021 // This branch will be eliminated by if-conversion. 7022 return 0; 7023 // Note: We currently assume zero cost for an unconditional branch inside 7024 // a predicated block since it will become a fall-through, although we 7025 // may decide in the future to call TTI for all branches. 7026 } 7027 case Instruction::PHI: { 7028 auto *Phi = cast<PHINode>(I); 7029 7030 // First-order recurrences are replaced by vector shuffles inside the loop. 7031 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7032 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7033 return TTI.getShuffleCost( 7034 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7035 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7036 7037 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7038 // converted into select instructions. We require N - 1 selects per phi 7039 // node, where N is the number of incoming values. 7040 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7041 return (Phi->getNumIncomingValues() - 1) * 7042 TTI.getCmpSelInstrCost( 7043 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7044 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7045 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7046 7047 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7048 } 7049 case Instruction::UDiv: 7050 case Instruction::SDiv: 7051 case Instruction::URem: 7052 case Instruction::SRem: 7053 // If we have a predicated instruction, it may not be executed for each 7054 // vector lane. Get the scalarization cost and scale this amount by the 7055 // probability of executing the predicated block. If the instruction is not 7056 // predicated, we fall through to the next case. 7057 if (VF.isVector() && isScalarWithPredication(I, VF)) { 7058 InstructionCost Cost = 0; 7059 7060 // These instructions have a non-void type, so account for the phi nodes 7061 // that we will create. This cost is likely to be zero. The phi node 7062 // cost, if any, should be scaled by the block probability because it 7063 // models a copy at the end of each predicated block. 7064 Cost += VF.getKnownMinValue() * 7065 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7066 7067 // The cost of the non-predicated instruction. 7068 Cost += VF.getKnownMinValue() * 7069 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7070 7071 // The cost of insertelement and extractelement instructions needed for 7072 // scalarization. 7073 Cost += getScalarizationOverhead(I, VF); 7074 7075 // Scale the cost by the probability of executing the predicated blocks. 7076 // This assumes the predicated block for each vector lane is equally 7077 // likely. 7078 return Cost / getReciprocalPredBlockProb(); 7079 } 7080 LLVM_FALLTHROUGH; 7081 case Instruction::Add: 7082 case Instruction::FAdd: 7083 case Instruction::Sub: 7084 case Instruction::FSub: 7085 case Instruction::Mul: 7086 case Instruction::FMul: 7087 case Instruction::FDiv: 7088 case Instruction::FRem: 7089 case Instruction::Shl: 7090 case Instruction::LShr: 7091 case Instruction::AShr: 7092 case Instruction::And: 7093 case Instruction::Or: 7094 case Instruction::Xor: { 7095 // Since we will replace the stride by 1 the multiplication should go away. 7096 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7097 return 0; 7098 7099 // Detect reduction patterns 7100 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7101 return *RedCost; 7102 7103 // Certain instructions can be cheaper to vectorize if they have a constant 7104 // second vector operand. One example of this are shifts on x86. 7105 Value *Op2 = I->getOperand(1); 7106 TargetTransformInfo::OperandValueProperties Op2VP; 7107 TargetTransformInfo::OperandValueKind Op2VK = 7108 TTI.getOperandInfo(Op2, Op2VP); 7109 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7110 Op2VK = TargetTransformInfo::OK_UniformValue; 7111 7112 SmallVector<const Value *, 4> Operands(I->operand_values()); 7113 return TTI.getArithmeticInstrCost( 7114 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7115 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7116 } 7117 case Instruction::FNeg: { 7118 return TTI.getArithmeticInstrCost( 7119 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7120 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7121 TargetTransformInfo::OP_None, I->getOperand(0), I); 7122 } 7123 case Instruction::Select: { 7124 SelectInst *SI = cast<SelectInst>(I); 7125 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7126 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7127 7128 const Value *Op0, *Op1; 7129 using namespace llvm::PatternMatch; 7130 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7131 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7132 // select x, y, false --> x & y 7133 // select x, true, y --> x | y 7134 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7135 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7136 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7137 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7138 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7139 Op1->getType()->getScalarSizeInBits() == 1); 7140 7141 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7142 return TTI.getArithmeticInstrCost( 7143 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7144 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7145 } 7146 7147 Type *CondTy = SI->getCondition()->getType(); 7148 if (!ScalarCond) 7149 CondTy = VectorType::get(CondTy, VF); 7150 7151 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 7152 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition())) 7153 Pred = Cmp->getPredicate(); 7154 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred, 7155 CostKind, I); 7156 } 7157 case Instruction::ICmp: 7158 case Instruction::FCmp: { 7159 Type *ValTy = I->getOperand(0)->getType(); 7160 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7161 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7162 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7163 VectorTy = ToVectorTy(ValTy, VF); 7164 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7165 cast<CmpInst>(I)->getPredicate(), CostKind, 7166 I); 7167 } 7168 case Instruction::Store: 7169 case Instruction::Load: { 7170 ElementCount Width = VF; 7171 if (Width.isVector()) { 7172 InstWidening Decision = getWideningDecision(I, Width); 7173 assert(Decision != CM_Unknown && 7174 "CM decision should be taken at this point"); 7175 if (Decision == CM_Scalarize) 7176 Width = ElementCount::getFixed(1); 7177 } 7178 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7179 return getMemoryInstructionCost(I, VF); 7180 } 7181 case Instruction::BitCast: 7182 if (I->getType()->isPointerTy()) 7183 return 0; 7184 LLVM_FALLTHROUGH; 7185 case Instruction::ZExt: 7186 case Instruction::SExt: 7187 case Instruction::FPToUI: 7188 case Instruction::FPToSI: 7189 case Instruction::FPExt: 7190 case Instruction::PtrToInt: 7191 case Instruction::IntToPtr: 7192 case Instruction::SIToFP: 7193 case Instruction::UIToFP: 7194 case Instruction::Trunc: 7195 case Instruction::FPTrunc: { 7196 // Computes the CastContextHint from a Load/Store instruction. 7197 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7198 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7199 "Expected a load or a store!"); 7200 7201 if (VF.isScalar() || !TheLoop->contains(I)) 7202 return TTI::CastContextHint::Normal; 7203 7204 switch (getWideningDecision(I, VF)) { 7205 case LoopVectorizationCostModel::CM_GatherScatter: 7206 return TTI::CastContextHint::GatherScatter; 7207 case LoopVectorizationCostModel::CM_Interleave: 7208 return TTI::CastContextHint::Interleave; 7209 case LoopVectorizationCostModel::CM_Scalarize: 7210 case LoopVectorizationCostModel::CM_Widen: 7211 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7212 : TTI::CastContextHint::Normal; 7213 case LoopVectorizationCostModel::CM_Widen_Reverse: 7214 return TTI::CastContextHint::Reversed; 7215 case LoopVectorizationCostModel::CM_Unknown: 7216 llvm_unreachable("Instr did not go through cost modelling?"); 7217 } 7218 7219 llvm_unreachable("Unhandled case!"); 7220 }; 7221 7222 unsigned Opcode = I->getOpcode(); 7223 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7224 // For Trunc, the context is the only user, which must be a StoreInst. 7225 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7226 if (I->hasOneUse()) 7227 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7228 CCH = ComputeCCH(Store); 7229 } 7230 // For Z/Sext, the context is the operand, which must be a LoadInst. 7231 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7232 Opcode == Instruction::FPExt) { 7233 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7234 CCH = ComputeCCH(Load); 7235 } 7236 7237 // We optimize the truncation of induction variables having constant 7238 // integer steps. The cost of these truncations is the same as the scalar 7239 // operation. 7240 if (isOptimizableIVTruncate(I, VF)) { 7241 auto *Trunc = cast<TruncInst>(I); 7242 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7243 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7244 } 7245 7246 // Detect reduction patterns 7247 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7248 return *RedCost; 7249 7250 Type *SrcScalarTy = I->getOperand(0)->getType(); 7251 Type *SrcVecTy = 7252 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7253 if (canTruncateToMinimalBitwidth(I, VF)) { 7254 // This cast is going to be shrunk. This may remove the cast or it might 7255 // turn it into slightly different cast. For example, if MinBW == 16, 7256 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7257 // 7258 // Calculate the modified src and dest types. 7259 Type *MinVecTy = VectorTy; 7260 if (Opcode == Instruction::Trunc) { 7261 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7262 VectorTy = 7263 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7264 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7265 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7266 VectorTy = 7267 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7268 } 7269 } 7270 7271 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7272 } 7273 case Instruction::Call: { 7274 if (RecurrenceDescriptor::isFMulAddIntrinsic(I)) 7275 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7276 return *RedCost; 7277 bool NeedToScalarize; 7278 CallInst *CI = cast<CallInst>(I); 7279 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7280 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7281 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7282 return std::min(CallCost, IntrinsicCost); 7283 } 7284 return CallCost; 7285 } 7286 case Instruction::ExtractValue: 7287 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7288 case Instruction::Alloca: 7289 // We cannot easily widen alloca to a scalable alloca, as 7290 // the result would need to be a vector of pointers. 7291 if (VF.isScalable()) 7292 return InstructionCost::getInvalid(); 7293 LLVM_FALLTHROUGH; 7294 default: 7295 // This opcode is unknown. Assume that it is the same as 'mul'. 7296 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7297 } // end of switch. 7298 } 7299 7300 char LoopVectorize::ID = 0; 7301 7302 static const char lv_name[] = "Loop Vectorization"; 7303 7304 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7305 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7306 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7307 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7308 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7309 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7310 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7311 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7312 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7313 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7314 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7315 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7316 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7317 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7318 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7319 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7320 7321 namespace llvm { 7322 7323 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7324 7325 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7326 bool VectorizeOnlyWhenForced) { 7327 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7328 } 7329 7330 } // end namespace llvm 7331 7332 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7333 // Check if the pointer operand of a load or store instruction is 7334 // consecutive. 7335 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7336 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 7337 return false; 7338 } 7339 7340 void LoopVectorizationCostModel::collectValuesToIgnore() { 7341 // Ignore ephemeral values. 7342 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7343 7344 // Ignore type-promoting instructions we identified during reduction 7345 // detection. 7346 for (auto &Reduction : Legal->getReductionVars()) { 7347 const RecurrenceDescriptor &RedDes = Reduction.second; 7348 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7349 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7350 } 7351 // Ignore type-casting instructions we identified during induction 7352 // detection. 7353 for (auto &Induction : Legal->getInductionVars()) { 7354 const InductionDescriptor &IndDes = Induction.second; 7355 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7356 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7357 } 7358 } 7359 7360 void LoopVectorizationCostModel::collectInLoopReductions() { 7361 for (auto &Reduction : Legal->getReductionVars()) { 7362 PHINode *Phi = Reduction.first; 7363 const RecurrenceDescriptor &RdxDesc = Reduction.second; 7364 7365 // We don't collect reductions that are type promoted (yet). 7366 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7367 continue; 7368 7369 // If the target would prefer this reduction to happen "in-loop", then we 7370 // want to record it as such. 7371 unsigned Opcode = RdxDesc.getOpcode(); 7372 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7373 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7374 TargetTransformInfo::ReductionFlags())) 7375 continue; 7376 7377 // Check that we can correctly put the reductions into the loop, by 7378 // finding the chain of operations that leads from the phi to the loop 7379 // exit value. 7380 SmallVector<Instruction *, 4> ReductionOperations = 7381 RdxDesc.getReductionOpChain(Phi, TheLoop); 7382 bool InLoop = !ReductionOperations.empty(); 7383 if (InLoop) { 7384 InLoopReductionChains[Phi] = ReductionOperations; 7385 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7386 Instruction *LastChain = Phi; 7387 for (auto *I : ReductionOperations) { 7388 InLoopReductionImmediateChains[I] = LastChain; 7389 LastChain = I; 7390 } 7391 } 7392 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7393 << " reduction for phi: " << *Phi << "\n"); 7394 } 7395 } 7396 7397 // TODO: we could return a pair of values that specify the max VF and 7398 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7399 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7400 // doesn't have a cost model that can choose which plan to execute if 7401 // more than one is generated. 7402 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7403 LoopVectorizationCostModel &CM) { 7404 unsigned WidestType; 7405 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7406 return WidestVectorRegBits / WidestType; 7407 } 7408 7409 VectorizationFactor 7410 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7411 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7412 ElementCount VF = UserVF; 7413 // Outer loop handling: They may require CFG and instruction level 7414 // transformations before even evaluating whether vectorization is profitable. 7415 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7416 // the vectorization pipeline. 7417 if (!OrigLoop->isInnermost()) { 7418 // If the user doesn't provide a vectorization factor, determine a 7419 // reasonable one. 7420 if (UserVF.isZero()) { 7421 VF = ElementCount::getFixed(determineVPlanVF( 7422 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7423 .getFixedSize(), 7424 CM)); 7425 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7426 7427 // Make sure we have a VF > 1 for stress testing. 7428 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7429 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7430 << "overriding computed VF.\n"); 7431 VF = ElementCount::getFixed(4); 7432 } 7433 } 7434 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7435 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7436 "VF needs to be a power of two"); 7437 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7438 << "VF " << VF << " to build VPlans.\n"); 7439 buildVPlans(VF, VF); 7440 7441 // For VPlan build stress testing, we bail out after VPlan construction. 7442 if (VPlanBuildStressTest) 7443 return VectorizationFactor::Disabled(); 7444 7445 return {VF, 0 /*Cost*/}; 7446 } 7447 7448 LLVM_DEBUG( 7449 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7450 "VPlan-native path.\n"); 7451 return VectorizationFactor::Disabled(); 7452 } 7453 7454 Optional<VectorizationFactor> 7455 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7456 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7457 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 7458 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 7459 return None; 7460 7461 // Invalidate interleave groups if all blocks of loop will be predicated. 7462 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 7463 !useMaskedInterleavedAccesses(*TTI)) { 7464 LLVM_DEBUG( 7465 dbgs() 7466 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7467 "which requires masked-interleaved support.\n"); 7468 if (CM.InterleaveInfo.invalidateGroups()) 7469 // Invalidating interleave groups also requires invalidating all decisions 7470 // based on them, which includes widening decisions and uniform and scalar 7471 // values. 7472 CM.invalidateCostModelingDecisions(); 7473 } 7474 7475 ElementCount MaxUserVF = 7476 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 7477 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 7478 if (!UserVF.isZero() && UserVFIsLegal) { 7479 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 7480 "VF needs to be a power of two"); 7481 // Collect the instructions (and their associated costs) that will be more 7482 // profitable to scalarize. 7483 if (CM.selectUserVectorizationFactor(UserVF)) { 7484 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7485 CM.collectInLoopReductions(); 7486 buildVPlansWithVPRecipes(UserVF, UserVF); 7487 LLVM_DEBUG(printPlans(dbgs())); 7488 return {{UserVF, 0}}; 7489 } else 7490 reportVectorizationInfo("UserVF ignored because of invalid costs.", 7491 "InvalidCost", ORE, OrigLoop); 7492 } 7493 7494 // Populate the set of Vectorization Factor Candidates. 7495 ElementCountSet VFCandidates; 7496 for (auto VF = ElementCount::getFixed(1); 7497 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 7498 VFCandidates.insert(VF); 7499 for (auto VF = ElementCount::getScalable(1); 7500 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 7501 VFCandidates.insert(VF); 7502 7503 for (const auto &VF : VFCandidates) { 7504 // Collect Uniform and Scalar instructions after vectorization with VF. 7505 CM.collectUniformsAndScalars(VF); 7506 7507 // Collect the instructions (and their associated costs) that will be more 7508 // profitable to scalarize. 7509 if (VF.isVector()) 7510 CM.collectInstsToScalarize(VF); 7511 } 7512 7513 CM.collectInLoopReductions(); 7514 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 7515 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 7516 7517 LLVM_DEBUG(printPlans(dbgs())); 7518 if (!MaxFactors.hasVector()) 7519 return VectorizationFactor::Disabled(); 7520 7521 // Select the optimal vectorization factor. 7522 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 7523 7524 // Check if it is profitable to vectorize with runtime checks. 7525 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 7526 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 7527 bool PragmaThresholdReached = 7528 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 7529 bool ThresholdReached = 7530 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 7531 if ((ThresholdReached && !Hints.allowReordering()) || 7532 PragmaThresholdReached) { 7533 ORE->emit([&]() { 7534 return OptimizationRemarkAnalysisAliasing( 7535 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 7536 OrigLoop->getHeader()) 7537 << "loop not vectorized: cannot prove it is safe to reorder " 7538 "memory operations"; 7539 }); 7540 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 7541 Hints.emitRemarkWithHints(); 7542 return VectorizationFactor::Disabled(); 7543 } 7544 } 7545 return SelectedVF; 7546 } 7547 7548 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 7549 assert(count_if(VPlans, 7550 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 7551 1 && 7552 "Best VF has not a single VPlan."); 7553 7554 for (const VPlanPtr &Plan : VPlans) { 7555 if (Plan->hasVF(VF)) 7556 return *Plan.get(); 7557 } 7558 llvm_unreachable("No plan found!"); 7559 } 7560 7561 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7562 SmallVector<Metadata *, 4> MDs; 7563 // Reserve first location for self reference to the LoopID metadata node. 7564 MDs.push_back(nullptr); 7565 bool IsUnrollMetadata = false; 7566 MDNode *LoopID = L->getLoopID(); 7567 if (LoopID) { 7568 // First find existing loop unrolling disable metadata. 7569 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7570 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7571 if (MD) { 7572 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7573 IsUnrollMetadata = 7574 S && S->getString().startswith("llvm.loop.unroll.disable"); 7575 } 7576 MDs.push_back(LoopID->getOperand(i)); 7577 } 7578 } 7579 7580 if (!IsUnrollMetadata) { 7581 // Add runtime unroll disable metadata. 7582 LLVMContext &Context = L->getHeader()->getContext(); 7583 SmallVector<Metadata *, 1> DisableOperands; 7584 DisableOperands.push_back( 7585 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7586 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7587 MDs.push_back(DisableNode); 7588 MDNode *NewLoopID = MDNode::get(Context, MDs); 7589 // Set operand 0 to refer to the loop id itself. 7590 NewLoopID->replaceOperandWith(0, NewLoopID); 7591 L->setLoopID(NewLoopID); 7592 } 7593 } 7594 7595 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 7596 VPlan &BestVPlan, 7597 InnerLoopVectorizer &ILV, 7598 DominatorTree *DT) { 7599 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 7600 << '\n'); 7601 7602 // Perform the actual loop transformation. 7603 7604 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7605 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 7606 Value *CanonicalIVStartValue; 7607 std::tie(State.CFG.VectorPreHeader, CanonicalIVStartValue) = 7608 ILV.createVectorizedLoopSkeleton(); 7609 ILV.collectPoisonGeneratingRecipes(State); 7610 7611 ILV.printDebugTracesAtStart(); 7612 7613 //===------------------------------------------------===// 7614 // 7615 // Notice: any optimization or new instruction that go 7616 // into the code below should also be implemented in 7617 // the cost-model. 7618 // 7619 //===------------------------------------------------===// 7620 7621 // 2. Copy and widen instructions from the old loop into the new loop. 7622 BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr), 7623 ILV.getOrCreateVectorTripCount(nullptr), 7624 CanonicalIVStartValue, State); 7625 BestVPlan.execute(&State); 7626 7627 // Keep all loop hints from the original loop on the vector loop (we'll 7628 // replace the vectorizer-specific hints below). 7629 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7630 7631 Optional<MDNode *> VectorizedLoopID = 7632 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7633 LLVMLoopVectorizeFollowupVectorized}); 7634 7635 Loop *L = LI->getLoopFor(State.CFG.PrevBB); 7636 if (VectorizedLoopID.hasValue()) 7637 L->setLoopID(VectorizedLoopID.getValue()); 7638 else { 7639 // Keep all loop hints from the original loop on the vector loop (we'll 7640 // replace the vectorizer-specific hints below). 7641 if (MDNode *LID = OrigLoop->getLoopID()) 7642 L->setLoopID(LID); 7643 7644 LoopVectorizeHints Hints(L, true, *ORE); 7645 Hints.setAlreadyVectorized(); 7646 } 7647 // Disable runtime unrolling when vectorizing the epilogue loop. 7648 if (CanonicalIVStartValue) 7649 AddRuntimeUnrollDisableMetaData(L); 7650 7651 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7652 // predication, updating analyses. 7653 ILV.fixVectorizedLoop(State); 7654 7655 ILV.printDebugTracesAtEnd(); 7656 } 7657 7658 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 7659 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 7660 for (const auto &Plan : VPlans) 7661 if (PrintVPlansInDotFormat) 7662 Plan->printDOT(O); 7663 else 7664 Plan->print(O); 7665 } 7666 #endif 7667 7668 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7669 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7670 7671 // We create new control-flow for the vectorized loop, so the original exit 7672 // conditions will be dead after vectorization if it's only used by the 7673 // terminator 7674 SmallVector<BasicBlock*> ExitingBlocks; 7675 OrigLoop->getExitingBlocks(ExitingBlocks); 7676 for (auto *BB : ExitingBlocks) { 7677 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 7678 if (!Cmp || !Cmp->hasOneUse()) 7679 continue; 7680 7681 // TODO: we should introduce a getUniqueExitingBlocks on Loop 7682 if (!DeadInstructions.insert(Cmp).second) 7683 continue; 7684 7685 // The operands of the icmp is often a dead trunc, used by IndUpdate. 7686 // TODO: can recurse through operands in general 7687 for (Value *Op : Cmp->operands()) { 7688 if (isa<TruncInst>(Op) && Op->hasOneUse()) 7689 DeadInstructions.insert(cast<Instruction>(Op)); 7690 } 7691 } 7692 7693 // We create new "steps" for induction variable updates to which the original 7694 // induction variables map. An original update instruction will be dead if 7695 // all its users except the induction variable are dead. 7696 auto *Latch = OrigLoop->getLoopLatch(); 7697 for (auto &Induction : Legal->getInductionVars()) { 7698 PHINode *Ind = Induction.first; 7699 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7700 7701 // If the tail is to be folded by masking, the primary induction variable, 7702 // if exists, isn't dead: it will be used for masking. Don't kill it. 7703 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 7704 continue; 7705 7706 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7707 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7708 })) 7709 DeadInstructions.insert(IndUpdate); 7710 } 7711 } 7712 7713 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7714 7715 //===--------------------------------------------------------------------===// 7716 // EpilogueVectorizerMainLoop 7717 //===--------------------------------------------------------------------===// 7718 7719 /// This function is partially responsible for generating the control flow 7720 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7721 std::pair<BasicBlock *, Value *> 7722 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 7723 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7724 7725 // Workaround! Compute the trip count of the original loop and cache it 7726 // before we start modifying the CFG. This code has a systemic problem 7727 // wherein it tries to run analysis over partially constructed IR; this is 7728 // wrong, and not simply for SCEV. The trip count of the original loop 7729 // simply happens to be prone to hitting this in practice. In theory, we 7730 // can hit the same issue for any SCEV, or ValueTracking query done during 7731 // mutation. See PR49900. 7732 getOrCreateTripCount(OrigLoop->getLoopPreheader()); 7733 createVectorLoopSkeleton(""); 7734 7735 // Generate the code to check the minimum iteration count of the vector 7736 // epilogue (see below). 7737 EPI.EpilogueIterationCountCheck = 7738 emitMinimumIterationCountCheck(LoopScalarPreHeader, true); 7739 EPI.EpilogueIterationCountCheck->setName("iter.check"); 7740 7741 // Generate the code to check any assumptions that we've made for SCEV 7742 // expressions. 7743 EPI.SCEVSafetyCheck = emitSCEVChecks(LoopScalarPreHeader); 7744 7745 // Generate the code that checks at runtime if arrays overlap. We put the 7746 // checks into a separate block to make the more common case of few elements 7747 // faster. 7748 EPI.MemSafetyCheck = emitMemRuntimeChecks(LoopScalarPreHeader); 7749 7750 // Generate the iteration count check for the main loop, *after* the check 7751 // for the epilogue loop, so that the path-length is shorter for the case 7752 // that goes directly through the vector epilogue. The longer-path length for 7753 // the main loop is compensated for, by the gain from vectorizing the larger 7754 // trip count. Note: the branch will get updated later on when we vectorize 7755 // the epilogue. 7756 EPI.MainLoopIterationCountCheck = 7757 emitMinimumIterationCountCheck(LoopScalarPreHeader, false); 7758 7759 // Generate the induction variable. 7760 Value *CountRoundDown = getOrCreateVectorTripCount(LoopVectorPreHeader); 7761 EPI.VectorTripCount = CountRoundDown; 7762 7763 // Skip induction resume value creation here because they will be created in 7764 // the second pass. If we created them here, they wouldn't be used anyway, 7765 // because the vplan in the second pass still contains the inductions from the 7766 // original loop. 7767 7768 return {completeLoopSkeleton(OrigLoopID), nullptr}; 7769 } 7770 7771 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 7772 LLVM_DEBUG({ 7773 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 7774 << "Main Loop VF:" << EPI.MainLoopVF 7775 << ", Main Loop UF:" << EPI.MainLoopUF 7776 << ", Epilogue Loop VF:" << EPI.EpilogueVF 7777 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7778 }); 7779 } 7780 7781 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 7782 DEBUG_WITH_TYPE(VerboseDebug, { 7783 dbgs() << "intermediate fn:\n" 7784 << *OrigLoop->getHeader()->getParent() << "\n"; 7785 }); 7786 } 7787 7788 BasicBlock * 7789 EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(BasicBlock *Bypass, 7790 bool ForEpilogue) { 7791 assert(Bypass && "Expected valid bypass basic block."); 7792 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 7793 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 7794 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 7795 // Reuse existing vector loop preheader for TC checks. 7796 // Note that new preheader block is generated for vector loop. 7797 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 7798 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 7799 7800 // Generate code to check if the loop's trip count is less than VF * UF of the 7801 // main vector loop. 7802 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 7803 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7804 7805 Value *CheckMinIters = Builder.CreateICmp( 7806 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 7807 "min.iters.check"); 7808 7809 if (!ForEpilogue) 7810 TCCheckBlock->setName("vector.main.loop.iter.check"); 7811 7812 // Create new preheader for vector loop. 7813 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 7814 DT, LI, nullptr, "vector.ph"); 7815 7816 if (ForEpilogue) { 7817 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 7818 DT->getNode(Bypass)->getIDom()) && 7819 "TC check is expected to dominate Bypass"); 7820 7821 // Update dominator for Bypass & LoopExit. 7822 DT->changeImmediateDominator(Bypass, TCCheckBlock); 7823 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7824 // For loops with multiple exits, there's no edge from the middle block 7825 // to exit blocks (as the epilogue must run) and thus no need to update 7826 // the immediate dominator of the exit blocks. 7827 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 7828 7829 LoopBypassBlocks.push_back(TCCheckBlock); 7830 7831 // Save the trip count so we don't have to regenerate it in the 7832 // vec.epilog.iter.check. This is safe to do because the trip count 7833 // generated here dominates the vector epilog iter check. 7834 EPI.TripCount = Count; 7835 } 7836 7837 ReplaceInstWithInst( 7838 TCCheckBlock->getTerminator(), 7839 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7840 7841 return TCCheckBlock; 7842 } 7843 7844 //===--------------------------------------------------------------------===// 7845 // EpilogueVectorizerEpilogueLoop 7846 //===--------------------------------------------------------------------===// 7847 7848 /// This function is partially responsible for generating the control flow 7849 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7850 std::pair<BasicBlock *, Value *> 7851 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 7852 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7853 createVectorLoopSkeleton("vec.epilog."); 7854 7855 // Now, compare the remaining count and if there aren't enough iterations to 7856 // execute the vectorized epilogue skip to the scalar part. 7857 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 7858 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 7859 LoopVectorPreHeader = 7860 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 7861 LI, nullptr, "vec.epilog.ph"); 7862 emitMinimumVectorEpilogueIterCountCheck(LoopScalarPreHeader, 7863 VecEpilogueIterationCountCheck); 7864 7865 // Adjust the control flow taking the state info from the main loop 7866 // vectorization into account. 7867 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 7868 "expected this to be saved from the previous pass."); 7869 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 7870 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 7871 7872 DT->changeImmediateDominator(LoopVectorPreHeader, 7873 EPI.MainLoopIterationCountCheck); 7874 7875 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 7876 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7877 7878 if (EPI.SCEVSafetyCheck) 7879 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 7880 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7881 if (EPI.MemSafetyCheck) 7882 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 7883 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7884 7885 DT->changeImmediateDominator( 7886 VecEpilogueIterationCountCheck, 7887 VecEpilogueIterationCountCheck->getSinglePredecessor()); 7888 7889 DT->changeImmediateDominator(LoopScalarPreHeader, 7890 EPI.EpilogueIterationCountCheck); 7891 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7892 // If there is an epilogue which must run, there's no edge from the 7893 // middle block to exit blocks and thus no need to update the immediate 7894 // dominator of the exit blocks. 7895 DT->changeImmediateDominator(LoopExitBlock, 7896 EPI.EpilogueIterationCountCheck); 7897 7898 // Keep track of bypass blocks, as they feed start values to the induction 7899 // phis in the scalar loop preheader. 7900 if (EPI.SCEVSafetyCheck) 7901 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 7902 if (EPI.MemSafetyCheck) 7903 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 7904 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 7905 7906 // The vec.epilog.iter.check block may contain Phi nodes from reductions which 7907 // merge control-flow from the latch block and the middle block. Update the 7908 // incoming values here and move the Phi into the preheader. 7909 SmallVector<PHINode *, 4> PhisInBlock; 7910 for (PHINode &Phi : VecEpilogueIterationCountCheck->phis()) 7911 PhisInBlock.push_back(&Phi); 7912 7913 for (PHINode *Phi : PhisInBlock) { 7914 Phi->replaceIncomingBlockWith( 7915 VecEpilogueIterationCountCheck->getSinglePredecessor(), 7916 VecEpilogueIterationCountCheck); 7917 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck); 7918 if (EPI.SCEVSafetyCheck) 7919 Phi->removeIncomingValue(EPI.SCEVSafetyCheck); 7920 if (EPI.MemSafetyCheck) 7921 Phi->removeIncomingValue(EPI.MemSafetyCheck); 7922 Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI()); 7923 } 7924 7925 // Generate a resume induction for the vector epilogue and put it in the 7926 // vector epilogue preheader 7927 Type *IdxTy = Legal->getWidestInductionType(); 7928 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 7929 LoopVectorPreHeader->getFirstNonPHI()); 7930 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 7931 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 7932 EPI.MainLoopIterationCountCheck); 7933 7934 // Generate induction resume values. These variables save the new starting 7935 // indexes for the scalar loop. They are used to test if there are any tail 7936 // iterations left once the vector loop has completed. 7937 // Note that when the vectorized epilogue is skipped due to iteration count 7938 // check, then the resume value for the induction variable comes from 7939 // the trip count of the main vector loop, hence passing the AdditionalBypass 7940 // argument. 7941 createInductionResumeValues({VecEpilogueIterationCountCheck, 7942 EPI.VectorTripCount} /* AdditionalBypass */); 7943 7944 return {completeLoopSkeleton(OrigLoopID), EPResumeVal}; 7945 } 7946 7947 BasicBlock * 7948 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 7949 BasicBlock *Bypass, BasicBlock *Insert) { 7950 7951 assert(EPI.TripCount && 7952 "Expected trip count to have been safed in the first pass."); 7953 assert( 7954 (!isa<Instruction>(EPI.TripCount) || 7955 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 7956 "saved trip count does not dominate insertion point."); 7957 Value *TC = EPI.TripCount; 7958 IRBuilder<> Builder(Insert->getTerminator()); 7959 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 7960 7961 // Generate code to check if the loop's trip count is less than VF * UF of the 7962 // vector epilogue loop. 7963 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 7964 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7965 7966 Value *CheckMinIters = 7967 Builder.CreateICmp(P, Count, 7968 createStepForVF(Builder, Count->getType(), 7969 EPI.EpilogueVF, EPI.EpilogueUF), 7970 "min.epilog.iters.check"); 7971 7972 ReplaceInstWithInst( 7973 Insert->getTerminator(), 7974 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7975 7976 LoopBypassBlocks.push_back(Insert); 7977 return Insert; 7978 } 7979 7980 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 7981 LLVM_DEBUG({ 7982 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 7983 << "Epilogue Loop VF:" << EPI.EpilogueVF 7984 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7985 }); 7986 } 7987 7988 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 7989 DEBUG_WITH_TYPE(VerboseDebug, { 7990 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n"; 7991 }); 7992 } 7993 7994 bool LoopVectorizationPlanner::getDecisionAndClampRange( 7995 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 7996 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 7997 bool PredicateAtRangeStart = Predicate(Range.Start); 7998 7999 for (ElementCount TmpVF = Range.Start * 2; 8000 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8001 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8002 Range.End = TmpVF; 8003 break; 8004 } 8005 8006 return PredicateAtRangeStart; 8007 } 8008 8009 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8010 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8011 /// of VF's starting at a given VF and extending it as much as possible. Each 8012 /// vectorization decision can potentially shorten this sub-range during 8013 /// buildVPlan(). 8014 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8015 ElementCount MaxVF) { 8016 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8017 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8018 VFRange SubRange = {VF, MaxVFPlusOne}; 8019 VPlans.push_back(buildVPlan(SubRange)); 8020 VF = SubRange.End; 8021 } 8022 } 8023 8024 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8025 VPlanPtr &Plan) { 8026 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8027 8028 // Look for cached value. 8029 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8030 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8031 if (ECEntryIt != EdgeMaskCache.end()) 8032 return ECEntryIt->second; 8033 8034 VPValue *SrcMask = createBlockInMask(Src, Plan); 8035 8036 // The terminator has to be a branch inst! 8037 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8038 assert(BI && "Unexpected terminator found"); 8039 8040 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8041 return EdgeMaskCache[Edge] = SrcMask; 8042 8043 // If source is an exiting block, we know the exit edge is dynamically dead 8044 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8045 // adding uses of an otherwise potentially dead instruction. 8046 if (OrigLoop->isLoopExiting(Src)) 8047 return EdgeMaskCache[Edge] = SrcMask; 8048 8049 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8050 assert(EdgeMask && "No Edge Mask found for condition"); 8051 8052 if (BI->getSuccessor(0) != Dst) 8053 EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc()); 8054 8055 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8056 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8057 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8058 // The select version does not introduce new UB if SrcMask is false and 8059 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8060 VPValue *False = Plan->getOrAddVPValue( 8061 ConstantInt::getFalse(BI->getCondition()->getType())); 8062 EdgeMask = 8063 Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc()); 8064 } 8065 8066 return EdgeMaskCache[Edge] = EdgeMask; 8067 } 8068 8069 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8070 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8071 8072 // Look for cached value. 8073 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8074 if (BCEntryIt != BlockMaskCache.end()) 8075 return BCEntryIt->second; 8076 8077 // All-one mask is modelled as no-mask following the convention for masked 8078 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8079 VPValue *BlockMask = nullptr; 8080 8081 if (OrigLoop->getHeader() == BB) { 8082 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8083 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8084 8085 // Introduce the early-exit compare IV <= BTC to form header block mask. 8086 // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by 8087 // constructing the desired canonical IV in the header block as its first 8088 // non-phi instructions. 8089 assert(CM.foldTailByMasking() && "must fold the tail"); 8090 VPBasicBlock *HeaderVPBB = 8091 Plan->getVectorLoopRegion()->getEntryBasicBlock(); 8092 auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi(); 8093 auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV()); 8094 HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi()); 8095 8096 VPBuilder::InsertPointGuard Guard(Builder); 8097 Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint); 8098 if (CM.TTI.emitGetActiveLaneMask()) { 8099 VPValue *TC = Plan->getOrCreateTripCount(); 8100 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC}); 8101 } else { 8102 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8103 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8104 } 8105 return BlockMaskCache[BB] = BlockMask; 8106 } 8107 8108 // This is the block mask. We OR all incoming edges. 8109 for (auto *Predecessor : predecessors(BB)) { 8110 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8111 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8112 return BlockMaskCache[BB] = EdgeMask; 8113 8114 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8115 BlockMask = EdgeMask; 8116 continue; 8117 } 8118 8119 BlockMask = Builder.createOr(BlockMask, EdgeMask, {}); 8120 } 8121 8122 return BlockMaskCache[BB] = BlockMask; 8123 } 8124 8125 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8126 ArrayRef<VPValue *> Operands, 8127 VFRange &Range, 8128 VPlanPtr &Plan) { 8129 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8130 "Must be called with either a load or store"); 8131 8132 auto willWiden = [&](ElementCount VF) -> bool { 8133 if (VF.isScalar()) 8134 return false; 8135 LoopVectorizationCostModel::InstWidening Decision = 8136 CM.getWideningDecision(I, VF); 8137 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8138 "CM decision should be taken at this point."); 8139 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8140 return true; 8141 if (CM.isScalarAfterVectorization(I, VF) || 8142 CM.isProfitableToScalarize(I, VF)) 8143 return false; 8144 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8145 }; 8146 8147 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8148 return nullptr; 8149 8150 VPValue *Mask = nullptr; 8151 if (Legal->isMaskRequired(I)) 8152 Mask = createBlockInMask(I->getParent(), Plan); 8153 8154 // Determine if the pointer operand of the access is either consecutive or 8155 // reverse consecutive. 8156 LoopVectorizationCostModel::InstWidening Decision = 8157 CM.getWideningDecision(I, Range.Start); 8158 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8159 bool Consecutive = 8160 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8161 8162 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8163 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8164 Consecutive, Reverse); 8165 8166 StoreInst *Store = cast<StoreInst>(I); 8167 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8168 Mask, Consecutive, Reverse); 8169 } 8170 8171 static VPWidenIntOrFpInductionRecipe * 8172 createWidenInductionRecipe(PHINode *Phi, Instruction *PhiOrTrunc, 8173 VPValue *Start, const InductionDescriptor &IndDesc, 8174 LoopVectorizationCostModel &CM, ScalarEvolution &SE, 8175 Loop &OrigLoop, VFRange &Range) { 8176 // Returns true if an instruction \p I should be scalarized instead of 8177 // vectorized for the chosen vectorization factor. 8178 auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) { 8179 return CM.isScalarAfterVectorization(I, VF) || 8180 CM.isProfitableToScalarize(I, VF); 8181 }; 8182 8183 bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange( 8184 [&](ElementCount VF) { 8185 // Returns true if we should generate a scalar version of \p IV. 8186 if (ShouldScalarizeInstruction(PhiOrTrunc, VF)) 8187 return true; 8188 auto isScalarInst = [&](User *U) -> bool { 8189 auto *I = cast<Instruction>(U); 8190 return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF); 8191 }; 8192 return any_of(PhiOrTrunc->users(), isScalarInst); 8193 }, 8194 Range); 8195 bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange( 8196 [&](ElementCount VF) { 8197 return ShouldScalarizeInstruction(PhiOrTrunc, VF); 8198 }, 8199 Range); 8200 assert(IndDesc.getStartValue() == 8201 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader())); 8202 assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) && 8203 "step must be loop invariant"); 8204 if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) { 8205 return new VPWidenIntOrFpInductionRecipe( 8206 Phi, Start, IndDesc, TruncI, NeedsScalarIV, !NeedsScalarIVOnly, SE); 8207 } 8208 assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here"); 8209 return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, NeedsScalarIV, 8210 !NeedsScalarIVOnly, SE); 8211 } 8212 8213 VPRecipeBase *VPRecipeBuilder::tryToOptimizeInductionPHI( 8214 PHINode *Phi, ArrayRef<VPValue *> Operands, VFRange &Range) const { 8215 8216 // Check if this is an integer or fp induction. If so, build the recipe that 8217 // produces its scalar and vector values. 8218 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) 8219 return createWidenInductionRecipe(Phi, Phi, Operands[0], *II, CM, 8220 *PSE.getSE(), *OrigLoop, Range); 8221 8222 // Check if this is pointer induction. If so, build the recipe for it. 8223 if (auto *II = Legal->getPointerInductionDescriptor(Phi)) 8224 return new VPWidenPointerInductionRecipe(Phi, Operands[0], *II, 8225 *PSE.getSE()); 8226 return nullptr; 8227 } 8228 8229 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8230 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8231 VPlan &Plan) const { 8232 // Optimize the special case where the source is a constant integer 8233 // induction variable. Notice that we can only optimize the 'trunc' case 8234 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8235 // (c) other casts depend on pointer size. 8236 8237 // Determine whether \p K is a truncation based on an induction variable that 8238 // can be optimized. 8239 auto isOptimizableIVTruncate = 8240 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8241 return [=](ElementCount VF) -> bool { 8242 return CM.isOptimizableIVTruncate(K, VF); 8243 }; 8244 }; 8245 8246 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8247 isOptimizableIVTruncate(I), Range)) { 8248 8249 auto *Phi = cast<PHINode>(I->getOperand(0)); 8250 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi); 8251 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8252 return createWidenInductionRecipe(Phi, I, Start, II, CM, *PSE.getSE(), 8253 *OrigLoop, Range); 8254 } 8255 return nullptr; 8256 } 8257 8258 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8259 ArrayRef<VPValue *> Operands, 8260 VPlanPtr &Plan) { 8261 // If all incoming values are equal, the incoming VPValue can be used directly 8262 // instead of creating a new VPBlendRecipe. 8263 VPValue *FirstIncoming = Operands[0]; 8264 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8265 return FirstIncoming == Inc; 8266 })) { 8267 return Operands[0]; 8268 } 8269 8270 unsigned NumIncoming = Phi->getNumIncomingValues(); 8271 // For in-loop reductions, we do not need to create an additional select. 8272 VPValue *InLoopVal = nullptr; 8273 for (unsigned In = 0; In < NumIncoming; In++) { 8274 PHINode *PhiOp = 8275 dyn_cast_or_null<PHINode>(Operands[In]->getUnderlyingValue()); 8276 if (PhiOp && CM.isInLoopReduction(PhiOp)) { 8277 assert(!InLoopVal && "Found more than one in-loop reduction!"); 8278 InLoopVal = Operands[In]; 8279 } 8280 } 8281 8282 assert((!InLoopVal || NumIncoming == 2) && 8283 "Found an in-loop reduction for PHI with unexpected number of " 8284 "incoming values"); 8285 if (InLoopVal) 8286 return Operands[Operands[0] == InLoopVal ? 1 : 0]; 8287 8288 // We know that all PHIs in non-header blocks are converted into selects, so 8289 // we don't have to worry about the insertion order and we can just use the 8290 // builder. At this point we generate the predication tree. There may be 8291 // duplications since this is a simple recursive scan, but future 8292 // optimizations will clean it up. 8293 SmallVector<VPValue *, 2> OperandsWithMask; 8294 8295 for (unsigned In = 0; In < NumIncoming; In++) { 8296 VPValue *EdgeMask = 8297 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8298 assert((EdgeMask || NumIncoming == 1) && 8299 "Multiple predecessors with one having a full mask"); 8300 OperandsWithMask.push_back(Operands[In]); 8301 if (EdgeMask) 8302 OperandsWithMask.push_back(EdgeMask); 8303 } 8304 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8305 } 8306 8307 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8308 ArrayRef<VPValue *> Operands, 8309 VFRange &Range) const { 8310 8311 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8312 [this, CI](ElementCount VF) { 8313 return CM.isScalarWithPredication(CI, VF); 8314 }, 8315 Range); 8316 8317 if (IsPredicated) 8318 return nullptr; 8319 8320 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8321 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8322 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8323 ID == Intrinsic::pseudoprobe || 8324 ID == Intrinsic::experimental_noalias_scope_decl)) 8325 return nullptr; 8326 8327 auto willWiden = [&](ElementCount VF) -> bool { 8328 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8329 // The following case may be scalarized depending on the VF. 8330 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8331 // version of the instruction. 8332 // Is it beneficial to perform intrinsic call compared to lib call? 8333 bool NeedToScalarize = false; 8334 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8335 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8336 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8337 return UseVectorIntrinsic || !NeedToScalarize; 8338 }; 8339 8340 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8341 return nullptr; 8342 8343 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8344 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8345 } 8346 8347 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8348 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8349 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8350 // Instruction should be widened, unless it is scalar after vectorization, 8351 // scalarization is profitable or it is predicated. 8352 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8353 return CM.isScalarAfterVectorization(I, VF) || 8354 CM.isProfitableToScalarize(I, VF) || 8355 CM.isScalarWithPredication(I, VF); 8356 }; 8357 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8358 Range); 8359 } 8360 8361 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8362 ArrayRef<VPValue *> Operands) const { 8363 auto IsVectorizableOpcode = [](unsigned Opcode) { 8364 switch (Opcode) { 8365 case Instruction::Add: 8366 case Instruction::And: 8367 case Instruction::AShr: 8368 case Instruction::BitCast: 8369 case Instruction::FAdd: 8370 case Instruction::FCmp: 8371 case Instruction::FDiv: 8372 case Instruction::FMul: 8373 case Instruction::FNeg: 8374 case Instruction::FPExt: 8375 case Instruction::FPToSI: 8376 case Instruction::FPToUI: 8377 case Instruction::FPTrunc: 8378 case Instruction::FRem: 8379 case Instruction::FSub: 8380 case Instruction::ICmp: 8381 case Instruction::IntToPtr: 8382 case Instruction::LShr: 8383 case Instruction::Mul: 8384 case Instruction::Or: 8385 case Instruction::PtrToInt: 8386 case Instruction::SDiv: 8387 case Instruction::Select: 8388 case Instruction::SExt: 8389 case Instruction::Shl: 8390 case Instruction::SIToFP: 8391 case Instruction::SRem: 8392 case Instruction::Sub: 8393 case Instruction::Trunc: 8394 case Instruction::UDiv: 8395 case Instruction::UIToFP: 8396 case Instruction::URem: 8397 case Instruction::Xor: 8398 case Instruction::ZExt: 8399 return true; 8400 } 8401 return false; 8402 }; 8403 8404 if (!IsVectorizableOpcode(I->getOpcode())) 8405 return nullptr; 8406 8407 // Success: widen this instruction. 8408 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8409 } 8410 8411 void VPRecipeBuilder::fixHeaderPhis() { 8412 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8413 for (VPHeaderPHIRecipe *R : PhisToFix) { 8414 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8415 VPRecipeBase *IncR = 8416 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8417 R->addOperand(IncR->getVPSingleValue()); 8418 } 8419 } 8420 8421 VPBasicBlock *VPRecipeBuilder::handleReplication( 8422 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8423 VPlanPtr &Plan) { 8424 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8425 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8426 Range); 8427 8428 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8429 [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); }, 8430 Range); 8431 8432 // Even if the instruction is not marked as uniform, there are certain 8433 // intrinsic calls that can be effectively treated as such, so we check for 8434 // them here. Conservatively, we only do this for scalable vectors, since 8435 // for fixed-width VFs we can always fall back on full scalarization. 8436 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 8437 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 8438 case Intrinsic::assume: 8439 case Intrinsic::lifetime_start: 8440 case Intrinsic::lifetime_end: 8441 // For scalable vectors if one of the operands is variant then we still 8442 // want to mark as uniform, which will generate one instruction for just 8443 // the first lane of the vector. We can't scalarize the call in the same 8444 // way as for fixed-width vectors because we don't know how many lanes 8445 // there are. 8446 // 8447 // The reasons for doing it this way for scalable vectors are: 8448 // 1. For the assume intrinsic generating the instruction for the first 8449 // lane is still be better than not generating any at all. For 8450 // example, the input may be a splat across all lanes. 8451 // 2. For the lifetime start/end intrinsics the pointer operand only 8452 // does anything useful when the input comes from a stack object, 8453 // which suggests it should always be uniform. For non-stack objects 8454 // the effect is to poison the object, which still allows us to 8455 // remove the call. 8456 IsUniform = true; 8457 break; 8458 default: 8459 break; 8460 } 8461 } 8462 8463 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8464 IsUniform, IsPredicated); 8465 setRecipe(I, Recipe); 8466 Plan->addVPValue(I, Recipe); 8467 8468 // Find if I uses a predicated instruction. If so, it will use its scalar 8469 // value. Avoid hoisting the insert-element which packs the scalar value into 8470 // a vector value, as that happens iff all users use the vector value. 8471 for (VPValue *Op : Recipe->operands()) { 8472 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8473 if (!PredR) 8474 continue; 8475 auto *RepR = 8476 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8477 assert(RepR->isPredicated() && 8478 "expected Replicate recipe to be predicated"); 8479 RepR->setAlsoPack(false); 8480 } 8481 8482 // Finalize the recipe for Instr, first if it is not predicated. 8483 if (!IsPredicated) { 8484 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8485 VPBB->appendRecipe(Recipe); 8486 return VPBB; 8487 } 8488 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8489 8490 VPBlockBase *SingleSucc = VPBB->getSingleSuccessor(); 8491 assert(SingleSucc && "VPBB must have a single successor when handling " 8492 "predicated replication."); 8493 VPBlockUtils::disconnectBlocks(VPBB, SingleSucc); 8494 // Record predicated instructions for above packing optimizations. 8495 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8496 VPBlockUtils::insertBlockAfter(Region, VPBB); 8497 auto *RegSucc = new VPBasicBlock(); 8498 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8499 VPBlockUtils::connectBlocks(RegSucc, SingleSucc); 8500 return RegSucc; 8501 } 8502 8503 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8504 VPRecipeBase *PredRecipe, 8505 VPlanPtr &Plan) { 8506 // Instructions marked for predication are replicated and placed under an 8507 // if-then construct to prevent side-effects. 8508 8509 // Generate recipes to compute the block mask for this region. 8510 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8511 8512 // Build the triangular if-then region. 8513 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8514 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8515 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8516 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8517 auto *PHIRecipe = Instr->getType()->isVoidTy() 8518 ? nullptr 8519 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8520 if (PHIRecipe) { 8521 Plan->removeVPValueFor(Instr); 8522 Plan->addVPValue(Instr, PHIRecipe); 8523 } 8524 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8525 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8526 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8527 8528 // Note: first set Entry as region entry and then connect successors starting 8529 // from it in order, to propagate the "parent" of each VPBasicBlock. 8530 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8531 VPBlockUtils::connectBlocks(Pred, Exit); 8532 8533 return Region; 8534 } 8535 8536 VPRecipeOrVPValueTy 8537 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8538 ArrayRef<VPValue *> Operands, 8539 VFRange &Range, VPlanPtr &Plan) { 8540 // First, check for specific widening recipes that deal with calls, memory 8541 // operations, inductions and Phi nodes. 8542 if (auto *CI = dyn_cast<CallInst>(Instr)) 8543 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8544 8545 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8546 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8547 8548 VPRecipeBase *Recipe; 8549 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8550 if (Phi->getParent() != OrigLoop->getHeader()) 8551 return tryToBlend(Phi, Operands, Plan); 8552 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range))) 8553 return toVPRecipeResult(Recipe); 8554 8555 VPHeaderPHIRecipe *PhiRecipe = nullptr; 8556 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 8557 VPValue *StartV = Operands[0]; 8558 if (Legal->isReductionVariable(Phi)) { 8559 const RecurrenceDescriptor &RdxDesc = 8560 Legal->getReductionVars().find(Phi)->second; 8561 assert(RdxDesc.getRecurrenceStartValue() == 8562 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8563 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 8564 CM.isInLoopReduction(Phi), 8565 CM.useOrderedReductions(RdxDesc)); 8566 } else { 8567 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 8568 } 8569 8570 // Record the incoming value from the backedge, so we can add the incoming 8571 // value from the backedge after all recipes have been created. 8572 recordRecipeOf(cast<Instruction>( 8573 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8574 PhisToFix.push_back(PhiRecipe); 8575 } else { 8576 // TODO: record backedge value for remaining pointer induction phis. 8577 assert(Phi->getType()->isPointerTy() && 8578 "only pointer phis should be handled here"); 8579 assert(Legal->getInductionVars().count(Phi) && 8580 "Not an induction variable"); 8581 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8582 VPValue *Start = Plan->getOrAddVPValue(II.getStartValue()); 8583 PhiRecipe = new VPWidenPHIRecipe(Phi, Start); 8584 } 8585 8586 return toVPRecipeResult(PhiRecipe); 8587 } 8588 8589 if (isa<TruncInst>(Instr) && 8590 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8591 Range, *Plan))) 8592 return toVPRecipeResult(Recipe); 8593 8594 if (!shouldWiden(Instr, Range)) 8595 return nullptr; 8596 8597 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8598 return toVPRecipeResult(new VPWidenGEPRecipe( 8599 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8600 8601 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8602 bool InvariantCond = 8603 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8604 return toVPRecipeResult(new VPWidenSelectRecipe( 8605 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8606 } 8607 8608 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8609 } 8610 8611 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8612 ElementCount MaxVF) { 8613 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8614 8615 // Collect instructions from the original loop that will become trivially dead 8616 // in the vectorized loop. We don't need to vectorize these instructions. For 8617 // example, original induction update instructions can become dead because we 8618 // separately emit induction "steps" when generating code for the new loop. 8619 // Similarly, we create a new latch condition when setting up the structure 8620 // of the new loop, so the old one can become dead. 8621 SmallPtrSet<Instruction *, 4> DeadInstructions; 8622 collectTriviallyDeadInstructions(DeadInstructions); 8623 8624 // Add assume instructions we need to drop to DeadInstructions, to prevent 8625 // them from being added to the VPlan. 8626 // TODO: We only need to drop assumes in blocks that get flattend. If the 8627 // control flow is preserved, we should keep them. 8628 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8629 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8630 8631 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8632 // Dead instructions do not need sinking. Remove them from SinkAfter. 8633 for (Instruction *I : DeadInstructions) 8634 SinkAfter.erase(I); 8635 8636 // Cannot sink instructions after dead instructions (there won't be any 8637 // recipes for them). Instead, find the first non-dead previous instruction. 8638 for (auto &P : Legal->getSinkAfter()) { 8639 Instruction *SinkTarget = P.second; 8640 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 8641 (void)FirstInst; 8642 while (DeadInstructions.contains(SinkTarget)) { 8643 assert( 8644 SinkTarget != FirstInst && 8645 "Must find a live instruction (at least the one feeding the " 8646 "first-order recurrence PHI) before reaching beginning of the block"); 8647 SinkTarget = SinkTarget->getPrevNode(); 8648 assert(SinkTarget != P.first && 8649 "sink source equals target, no sinking required"); 8650 } 8651 P.second = SinkTarget; 8652 } 8653 8654 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8655 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8656 VFRange SubRange = {VF, MaxVFPlusOne}; 8657 VPlans.push_back( 8658 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8659 VF = SubRange.End; 8660 } 8661 } 8662 8663 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a 8664 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a 8665 // BranchOnCount VPInstruction to the latch. 8666 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL, 8667 bool HasNUW, bool IsVPlanNative) { 8668 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8669 auto *StartV = Plan.getOrAddVPValue(StartIdx); 8670 8671 auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL); 8672 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion(); 8673 VPBasicBlock *Header = TopRegion->getEntryBasicBlock(); 8674 if (IsVPlanNative) 8675 Header = cast<VPBasicBlock>(Header->getSingleSuccessor()); 8676 Header->insert(CanonicalIVPHI, Header->begin()); 8677 8678 auto *CanonicalIVIncrement = 8679 new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW 8680 : VPInstruction::CanonicalIVIncrement, 8681 {CanonicalIVPHI}, DL); 8682 CanonicalIVPHI->addOperand(CanonicalIVIncrement); 8683 8684 VPBasicBlock *EB = TopRegion->getExitBasicBlock(); 8685 if (IsVPlanNative) { 8686 EB = cast<VPBasicBlock>(EB->getSinglePredecessor()); 8687 EB->setCondBit(nullptr); 8688 } 8689 EB->appendRecipe(CanonicalIVIncrement); 8690 8691 auto *BranchOnCount = 8692 new VPInstruction(VPInstruction::BranchOnCount, 8693 {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL); 8694 EB->appendRecipe(BranchOnCount); 8695 } 8696 8697 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8698 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8699 const MapVector<Instruction *, Instruction *> &SinkAfter) { 8700 8701 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8702 8703 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8704 8705 // --------------------------------------------------------------------------- 8706 // Pre-construction: record ingredients whose recipes we'll need to further 8707 // process after constructing the initial VPlan. 8708 // --------------------------------------------------------------------------- 8709 8710 // Mark instructions we'll need to sink later and their targets as 8711 // ingredients whose recipe we'll need to record. 8712 for (auto &Entry : SinkAfter) { 8713 RecipeBuilder.recordRecipeOf(Entry.first); 8714 RecipeBuilder.recordRecipeOf(Entry.second); 8715 } 8716 for (auto &Reduction : CM.getInLoopReductionChains()) { 8717 PHINode *Phi = Reduction.first; 8718 RecurKind Kind = 8719 Legal->getReductionVars().find(Phi)->second.getRecurrenceKind(); 8720 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8721 8722 RecipeBuilder.recordRecipeOf(Phi); 8723 for (auto &R : ReductionOperations) { 8724 RecipeBuilder.recordRecipeOf(R); 8725 // For min/max reductions, where we have a pair of icmp/select, we also 8726 // need to record the ICmp recipe, so it can be removed later. 8727 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 8728 "Only min/max recurrences allowed for inloop reductions"); 8729 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8730 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8731 } 8732 } 8733 8734 // For each interleave group which is relevant for this (possibly trimmed) 8735 // Range, add it to the set of groups to be later applied to the VPlan and add 8736 // placeholders for its members' Recipes which we'll be replacing with a 8737 // single VPInterleaveRecipe. 8738 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8739 auto applyIG = [IG, this](ElementCount VF) -> bool { 8740 return (VF.isVector() && // Query is illegal for VF == 1 8741 CM.getWideningDecision(IG->getInsertPos(), VF) == 8742 LoopVectorizationCostModel::CM_Interleave); 8743 }; 8744 if (!getDecisionAndClampRange(applyIG, Range)) 8745 continue; 8746 InterleaveGroups.insert(IG); 8747 for (unsigned i = 0; i < IG->getFactor(); i++) 8748 if (Instruction *Member = IG->getMember(i)) 8749 RecipeBuilder.recordRecipeOf(Member); 8750 }; 8751 8752 // --------------------------------------------------------------------------- 8753 // Build initial VPlan: Scan the body of the loop in a topological order to 8754 // visit each basic block after having visited its predecessor basic blocks. 8755 // --------------------------------------------------------------------------- 8756 8757 // Create initial VPlan skeleton, with separate header and latch blocks. 8758 VPBasicBlock *HeaderVPBB = new VPBasicBlock(); 8759 VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch"); 8760 VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB); 8761 auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop"); 8762 auto Plan = std::make_unique<VPlan>(TopRegion); 8763 8764 Instruction *DLInst = 8765 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 8766 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), 8767 DLInst ? DLInst->getDebugLoc() : DebugLoc(), 8768 !CM.foldTailByMasking(), false); 8769 8770 // Scan the body of the loop in a topological order to visit each basic block 8771 // after having visited its predecessor basic blocks. 8772 LoopBlocksDFS DFS(OrigLoop); 8773 DFS.perform(LI); 8774 8775 VPBasicBlock *VPBB = HeaderVPBB; 8776 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 8777 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8778 // Relevant instructions from basic block BB will be grouped into VPRecipe 8779 // ingredients and fill a new VPBasicBlock. 8780 unsigned VPBBsForBB = 0; 8781 VPBB->setName(BB->getName()); 8782 Builder.setInsertPoint(VPBB); 8783 8784 // Introduce each ingredient into VPlan. 8785 // TODO: Model and preserve debug instrinsics in VPlan. 8786 for (Instruction &I : BB->instructionsWithoutDebug()) { 8787 Instruction *Instr = &I; 8788 8789 // First filter out irrelevant instructions, to ensure no recipes are 8790 // built for them. 8791 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 8792 continue; 8793 8794 SmallVector<VPValue *, 4> Operands; 8795 auto *Phi = dyn_cast<PHINode>(Instr); 8796 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 8797 Operands.push_back(Plan->getOrAddVPValue( 8798 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 8799 } else { 8800 auto OpRange = Plan->mapToVPValues(Instr->operands()); 8801 Operands = {OpRange.begin(), OpRange.end()}; 8802 } 8803 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 8804 Instr, Operands, Range, Plan)) { 8805 // If Instr can be simplified to an existing VPValue, use it. 8806 if (RecipeOrValue.is<VPValue *>()) { 8807 auto *VPV = RecipeOrValue.get<VPValue *>(); 8808 Plan->addVPValue(Instr, VPV); 8809 // If the re-used value is a recipe, register the recipe for the 8810 // instruction, in case the recipe for Instr needs to be recorded. 8811 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 8812 RecipeBuilder.setRecipe(Instr, R); 8813 continue; 8814 } 8815 // Otherwise, add the new recipe. 8816 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 8817 for (auto *Def : Recipe->definedValues()) { 8818 auto *UV = Def->getUnderlyingValue(); 8819 Plan->addVPValue(UV, Def); 8820 } 8821 8822 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 8823 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 8824 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 8825 // of the header block. That can happen for truncates of induction 8826 // variables. Those recipes are moved to the phi section of the header 8827 // block after applying SinkAfter, which relies on the original 8828 // position of the trunc. 8829 assert(isa<TruncInst>(Instr)); 8830 InductionsToMove.push_back( 8831 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 8832 } 8833 RecipeBuilder.setRecipe(Instr, Recipe); 8834 VPBB->appendRecipe(Recipe); 8835 continue; 8836 } 8837 8838 // Otherwise, if all widening options failed, Instruction is to be 8839 // replicated. This may create a successor for VPBB. 8840 VPBasicBlock *NextVPBB = 8841 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 8842 if (NextVPBB != VPBB) { 8843 VPBB = NextVPBB; 8844 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8845 : ""); 8846 } 8847 } 8848 8849 VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB); 8850 VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor()); 8851 } 8852 8853 HeaderVPBB->setName("vector.body"); 8854 8855 // Fold the last, empty block into its predecessor. 8856 VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB); 8857 assert(VPBB && "expected to fold last (empty) block"); 8858 // After here, VPBB should not be used. 8859 VPBB = nullptr; 8860 8861 assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) && 8862 !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() && 8863 "entry block must be set to a VPRegionBlock having a non-empty entry " 8864 "VPBasicBlock"); 8865 RecipeBuilder.fixHeaderPhis(); 8866 8867 // --------------------------------------------------------------------------- 8868 // Transform initial VPlan: Apply previously taken decisions, in order, to 8869 // bring the VPlan to its final state. 8870 // --------------------------------------------------------------------------- 8871 8872 // Apply Sink-After legal constraints. 8873 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 8874 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 8875 if (Region && Region->isReplicator()) { 8876 assert(Region->getNumSuccessors() == 1 && 8877 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 8878 assert(R->getParent()->size() == 1 && 8879 "A recipe in an original replicator region must be the only " 8880 "recipe in its block"); 8881 return Region; 8882 } 8883 return nullptr; 8884 }; 8885 for (auto &Entry : SinkAfter) { 8886 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 8887 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 8888 8889 auto *TargetRegion = GetReplicateRegion(Target); 8890 auto *SinkRegion = GetReplicateRegion(Sink); 8891 if (!SinkRegion) { 8892 // If the sink source is not a replicate region, sink the recipe directly. 8893 if (TargetRegion) { 8894 // The target is in a replication region, make sure to move Sink to 8895 // the block after it, not into the replication region itself. 8896 VPBasicBlock *NextBlock = 8897 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 8898 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 8899 } else 8900 Sink->moveAfter(Target); 8901 continue; 8902 } 8903 8904 // The sink source is in a replicate region. Unhook the region from the CFG. 8905 auto *SinkPred = SinkRegion->getSinglePredecessor(); 8906 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 8907 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 8908 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 8909 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 8910 8911 if (TargetRegion) { 8912 // The target recipe is also in a replicate region, move the sink region 8913 // after the target region. 8914 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 8915 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 8916 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 8917 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 8918 } else { 8919 // The sink source is in a replicate region, we need to move the whole 8920 // replicate region, which should only contain a single recipe in the 8921 // main block. 8922 auto *SplitBlock = 8923 Target->getParent()->splitAt(std::next(Target->getIterator())); 8924 8925 auto *SplitPred = SplitBlock->getSinglePredecessor(); 8926 8927 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 8928 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 8929 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 8930 } 8931 } 8932 8933 VPlanTransforms::removeRedundantCanonicalIVs(*Plan); 8934 VPlanTransforms::removeRedundantInductionCasts(*Plan); 8935 8936 // Now that sink-after is done, move induction recipes for optimized truncates 8937 // to the phi section of the header block. 8938 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 8939 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 8940 8941 // Adjust the recipes for any inloop reductions. 8942 adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan, 8943 RecipeBuilder, Range.Start); 8944 8945 // Introduce a recipe to combine the incoming and previous values of a 8946 // first-order recurrence. 8947 for (VPRecipeBase &R : 8948 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) { 8949 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 8950 if (!RecurPhi) 8951 continue; 8952 8953 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 8954 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 8955 auto *Region = GetReplicateRegion(PrevRecipe); 8956 if (Region) 8957 InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor()); 8958 if (Region || PrevRecipe->isPhi()) 8959 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 8960 else 8961 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 8962 8963 auto *RecurSplice = cast<VPInstruction>( 8964 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 8965 {RecurPhi, RecurPhi->getBackedgeValue()})); 8966 8967 RecurPhi->replaceAllUsesWith(RecurSplice); 8968 // Set the first operand of RecurSplice to RecurPhi again, after replacing 8969 // all users. 8970 RecurSplice->setOperand(0, RecurPhi); 8971 } 8972 8973 // Interleave memory: for each Interleave Group we marked earlier as relevant 8974 // for this VPlan, replace the Recipes widening its memory instructions with a 8975 // single VPInterleaveRecipe at its insertion point. 8976 for (auto IG : InterleaveGroups) { 8977 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 8978 RecipeBuilder.getRecipe(IG->getInsertPos())); 8979 SmallVector<VPValue *, 4> StoredValues; 8980 for (unsigned i = 0; i < IG->getFactor(); ++i) 8981 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 8982 auto *StoreR = 8983 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 8984 StoredValues.push_back(StoreR->getStoredValue()); 8985 } 8986 8987 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 8988 Recipe->getMask()); 8989 VPIG->insertBefore(Recipe); 8990 unsigned J = 0; 8991 for (unsigned i = 0; i < IG->getFactor(); ++i) 8992 if (Instruction *Member = IG->getMember(i)) { 8993 if (!Member->getType()->isVoidTy()) { 8994 VPValue *OriginalV = Plan->getVPValue(Member); 8995 Plan->removeVPValueFor(Member); 8996 Plan->addVPValue(Member, VPIG->getVPValue(J)); 8997 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 8998 J++; 8999 } 9000 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9001 } 9002 } 9003 9004 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9005 // in ways that accessing values using original IR values is incorrect. 9006 Plan->disableValue2VPValue(); 9007 9008 VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE()); 9009 VPlanTransforms::sinkScalarOperands(*Plan); 9010 VPlanTransforms::mergeReplicateRegions(*Plan); 9011 VPlanTransforms::removeDeadRecipes(*Plan, *OrigLoop); 9012 9013 std::string PlanName; 9014 raw_string_ostream RSO(PlanName); 9015 ElementCount VF = Range.Start; 9016 Plan->addVF(VF); 9017 RSO << "Initial VPlan for VF={" << VF; 9018 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9019 Plan->addVF(VF); 9020 RSO << "," << VF; 9021 } 9022 RSO << "},UF>=1"; 9023 RSO.flush(); 9024 Plan->setName(PlanName); 9025 9026 // Fold Exit block into its predecessor if possible. 9027 // TODO: Fold block earlier once all VPlan transforms properly maintain a 9028 // VPBasicBlock as exit. 9029 VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit()); 9030 9031 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9032 return Plan; 9033 } 9034 9035 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9036 // Outer loop handling: They may require CFG and instruction level 9037 // transformations before even evaluating whether vectorization is profitable. 9038 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9039 // the vectorization pipeline. 9040 assert(!OrigLoop->isInnermost()); 9041 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9042 9043 // Create new empty VPlan 9044 auto Plan = std::make_unique<VPlan>(); 9045 9046 // Build hierarchical CFG 9047 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9048 HCFGBuilder.buildHierarchicalCFG(); 9049 9050 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9051 VF *= 2) 9052 Plan->addVF(VF); 9053 9054 if (EnableVPlanPredication) { 9055 VPlanPredicator VPP(*Plan); 9056 VPP.predicate(); 9057 9058 // Avoid running transformation to recipes until masked code generation in 9059 // VPlan-native path is in place. 9060 return Plan; 9061 } 9062 9063 SmallPtrSet<Instruction *, 1> DeadInstructions; 9064 VPlanTransforms::VPInstructionsToVPRecipes( 9065 OrigLoop, Plan, 9066 [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); }, 9067 DeadInstructions, *PSE.getSE()); 9068 9069 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(), 9070 true, true); 9071 return Plan; 9072 } 9073 9074 // Adjust the recipes for reductions. For in-loop reductions the chain of 9075 // instructions leading from the loop exit instr to the phi need to be converted 9076 // to reductions, with one operand being vector and the other being the scalar 9077 // reduction chain. For other reductions, a select is introduced between the phi 9078 // and live-out recipes when folding the tail. 9079 void LoopVectorizationPlanner::adjustRecipesForReductions( 9080 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9081 ElementCount MinVF) { 9082 for (auto &Reduction : CM.getInLoopReductionChains()) { 9083 PHINode *Phi = Reduction.first; 9084 const RecurrenceDescriptor &RdxDesc = 9085 Legal->getReductionVars().find(Phi)->second; 9086 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9087 9088 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9089 continue; 9090 9091 // ReductionOperations are orders top-down from the phi's use to the 9092 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9093 // which of the two operands will remain scalar and which will be reduced. 9094 // For minmax the chain will be the select instructions. 9095 Instruction *Chain = Phi; 9096 for (Instruction *R : ReductionOperations) { 9097 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9098 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9099 9100 VPValue *ChainOp = Plan->getVPValue(Chain); 9101 unsigned FirstOpId; 9102 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9103 "Only min/max recurrences allowed for inloop reductions"); 9104 // Recognize a call to the llvm.fmuladd intrinsic. 9105 bool IsFMulAdd = (Kind == RecurKind::FMulAdd); 9106 assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) && 9107 "Expected instruction to be a call to the llvm.fmuladd intrinsic"); 9108 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9109 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9110 "Expected to replace a VPWidenSelectSC"); 9111 FirstOpId = 1; 9112 } else { 9113 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) || 9114 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) && 9115 "Expected to replace a VPWidenSC"); 9116 FirstOpId = 0; 9117 } 9118 unsigned VecOpId = 9119 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9120 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9121 9122 auto *CondOp = CM.blockNeedsPredicationForAnyReason(R->getParent()) 9123 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9124 : nullptr; 9125 9126 if (IsFMulAdd) { 9127 // If the instruction is a call to the llvm.fmuladd intrinsic then we 9128 // need to create an fmul recipe to use as the vector operand for the 9129 // fadd reduction. 9130 VPInstruction *FMulRecipe = new VPInstruction( 9131 Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))}); 9132 FMulRecipe->setFastMathFlags(R->getFastMathFlags()); 9133 WidenRecipe->getParent()->insert(FMulRecipe, 9134 WidenRecipe->getIterator()); 9135 VecOp = FMulRecipe; 9136 } 9137 VPReductionRecipe *RedRecipe = 9138 new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9139 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9140 Plan->removeVPValueFor(R); 9141 Plan->addVPValue(R, RedRecipe); 9142 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9143 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9144 WidenRecipe->eraseFromParent(); 9145 9146 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9147 VPRecipeBase *CompareRecipe = 9148 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9149 assert(isa<VPWidenRecipe>(CompareRecipe) && 9150 "Expected to replace a VPWidenSC"); 9151 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9152 "Expected no remaining users"); 9153 CompareRecipe->eraseFromParent(); 9154 } 9155 Chain = R; 9156 } 9157 } 9158 9159 // If tail is folded by masking, introduce selects between the phi 9160 // and the live-out instruction of each reduction, at the beginning of the 9161 // dedicated latch block. 9162 if (CM.foldTailByMasking()) { 9163 Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin()); 9164 for (VPRecipeBase &R : 9165 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) { 9166 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9167 if (!PhiR || PhiR->isInLoop()) 9168 continue; 9169 VPValue *Cond = 9170 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9171 VPValue *Red = PhiR->getBackedgeValue(); 9172 assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB && 9173 "reduction recipe must be defined before latch"); 9174 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9175 } 9176 } 9177 } 9178 9179 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9180 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9181 VPSlotTracker &SlotTracker) const { 9182 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9183 IG->getInsertPos()->printAsOperand(O, false); 9184 O << ", "; 9185 getAddr()->printAsOperand(O, SlotTracker); 9186 VPValue *Mask = getMask(); 9187 if (Mask) { 9188 O << ", "; 9189 Mask->printAsOperand(O, SlotTracker); 9190 } 9191 9192 unsigned OpIdx = 0; 9193 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9194 if (!IG->getMember(i)) 9195 continue; 9196 if (getNumStoreOperands() > 0) { 9197 O << "\n" << Indent << " store "; 9198 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9199 O << " to index " << i; 9200 } else { 9201 O << "\n" << Indent << " "; 9202 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9203 O << " = load from index " << i; 9204 } 9205 ++OpIdx; 9206 } 9207 } 9208 #endif 9209 9210 void VPWidenCallRecipe::execute(VPTransformState &State) { 9211 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9212 *this, State); 9213 } 9214 9215 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9216 auto &I = *cast<SelectInst>(getUnderlyingInstr()); 9217 State.ILV->setDebugLocFromInst(&I); 9218 9219 // The condition can be loop invariant but still defined inside the 9220 // loop. This means that we can't just use the original 'cond' value. 9221 // We have to take the 'vectorized' value and pick the first lane. 9222 // Instcombine will make this a no-op. 9223 auto *InvarCond = 9224 InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr; 9225 9226 for (unsigned Part = 0; Part < State.UF; ++Part) { 9227 Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part); 9228 Value *Op0 = State.get(getOperand(1), Part); 9229 Value *Op1 = State.get(getOperand(2), Part); 9230 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1); 9231 State.set(this, Sel, Part); 9232 State.ILV->addMetadata(Sel, &I); 9233 } 9234 } 9235 9236 void VPWidenRecipe::execute(VPTransformState &State) { 9237 auto &I = *cast<Instruction>(getUnderlyingValue()); 9238 auto &Builder = State.Builder; 9239 switch (I.getOpcode()) { 9240 case Instruction::Call: 9241 case Instruction::Br: 9242 case Instruction::PHI: 9243 case Instruction::GetElementPtr: 9244 case Instruction::Select: 9245 llvm_unreachable("This instruction is handled by a different recipe."); 9246 case Instruction::UDiv: 9247 case Instruction::SDiv: 9248 case Instruction::SRem: 9249 case Instruction::URem: 9250 case Instruction::Add: 9251 case Instruction::FAdd: 9252 case Instruction::Sub: 9253 case Instruction::FSub: 9254 case Instruction::FNeg: 9255 case Instruction::Mul: 9256 case Instruction::FMul: 9257 case Instruction::FDiv: 9258 case Instruction::FRem: 9259 case Instruction::Shl: 9260 case Instruction::LShr: 9261 case Instruction::AShr: 9262 case Instruction::And: 9263 case Instruction::Or: 9264 case Instruction::Xor: { 9265 // Just widen unops and binops. 9266 State.ILV->setDebugLocFromInst(&I); 9267 9268 for (unsigned Part = 0; Part < State.UF; ++Part) { 9269 SmallVector<Value *, 2> Ops; 9270 for (VPValue *VPOp : operands()) 9271 Ops.push_back(State.get(VPOp, Part)); 9272 9273 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 9274 9275 if (auto *VecOp = dyn_cast<Instruction>(V)) { 9276 VecOp->copyIRFlags(&I); 9277 9278 // If the instruction is vectorized and was in a basic block that needed 9279 // predication, we can't propagate poison-generating flags (nuw/nsw, 9280 // exact, etc.). The control flow has been linearized and the 9281 // instruction is no longer guarded by the predicate, which could make 9282 // the flag properties to no longer hold. 9283 if (State.MayGeneratePoisonRecipes.contains(this)) 9284 VecOp->dropPoisonGeneratingFlags(); 9285 } 9286 9287 // Use this vector value for all users of the original instruction. 9288 State.set(this, V, Part); 9289 State.ILV->addMetadata(V, &I); 9290 } 9291 9292 break; 9293 } 9294 case Instruction::ICmp: 9295 case Instruction::FCmp: { 9296 // Widen compares. Generate vector compares. 9297 bool FCmp = (I.getOpcode() == Instruction::FCmp); 9298 auto *Cmp = cast<CmpInst>(&I); 9299 State.ILV->setDebugLocFromInst(Cmp); 9300 for (unsigned Part = 0; Part < State.UF; ++Part) { 9301 Value *A = State.get(getOperand(0), Part); 9302 Value *B = State.get(getOperand(1), Part); 9303 Value *C = nullptr; 9304 if (FCmp) { 9305 // Propagate fast math flags. 9306 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9307 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 9308 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 9309 } else { 9310 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 9311 } 9312 State.set(this, C, Part); 9313 State.ILV->addMetadata(C, &I); 9314 } 9315 9316 break; 9317 } 9318 9319 case Instruction::ZExt: 9320 case Instruction::SExt: 9321 case Instruction::FPToUI: 9322 case Instruction::FPToSI: 9323 case Instruction::FPExt: 9324 case Instruction::PtrToInt: 9325 case Instruction::IntToPtr: 9326 case Instruction::SIToFP: 9327 case Instruction::UIToFP: 9328 case Instruction::Trunc: 9329 case Instruction::FPTrunc: 9330 case Instruction::BitCast: { 9331 auto *CI = cast<CastInst>(&I); 9332 State.ILV->setDebugLocFromInst(CI); 9333 9334 /// Vectorize casts. 9335 Type *DestTy = (State.VF.isScalar()) 9336 ? CI->getType() 9337 : VectorType::get(CI->getType(), State.VF); 9338 9339 for (unsigned Part = 0; Part < State.UF; ++Part) { 9340 Value *A = State.get(getOperand(0), Part); 9341 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 9342 State.set(this, Cast, Part); 9343 State.ILV->addMetadata(Cast, &I); 9344 } 9345 break; 9346 } 9347 default: 9348 // This instruction is not vectorized by simple widening. 9349 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 9350 llvm_unreachable("Unhandled instruction!"); 9351 } // end of switch. 9352 } 9353 9354 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9355 auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr()); 9356 // Construct a vector GEP by widening the operands of the scalar GEP as 9357 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 9358 // results in a vector of pointers when at least one operand of the GEP 9359 // is vector-typed. Thus, to keep the representation compact, we only use 9360 // vector-typed operands for loop-varying values. 9361 9362 if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 9363 // If we are vectorizing, but the GEP has only loop-invariant operands, 9364 // the GEP we build (by only using vector-typed operands for 9365 // loop-varying values) would be a scalar pointer. Thus, to ensure we 9366 // produce a vector of pointers, we need to either arbitrarily pick an 9367 // operand to broadcast, or broadcast a clone of the original GEP. 9368 // Here, we broadcast a clone of the original. 9369 // 9370 // TODO: If at some point we decide to scalarize instructions having 9371 // loop-invariant operands, this special case will no longer be 9372 // required. We would add the scalarization decision to 9373 // collectLoopScalars() and teach getVectorValue() to broadcast 9374 // the lane-zero scalar value. 9375 auto *Clone = State.Builder.Insert(GEP->clone()); 9376 for (unsigned Part = 0; Part < State.UF; ++Part) { 9377 Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone); 9378 State.set(this, EntryPart, Part); 9379 State.ILV->addMetadata(EntryPart, GEP); 9380 } 9381 } else { 9382 // If the GEP has at least one loop-varying operand, we are sure to 9383 // produce a vector of pointers. But if we are only unrolling, we want 9384 // to produce a scalar GEP for each unroll part. Thus, the GEP we 9385 // produce with the code below will be scalar (if VF == 1) or vector 9386 // (otherwise). Note that for the unroll-only case, we still maintain 9387 // values in the vector mapping with initVector, as we do for other 9388 // instructions. 9389 for (unsigned Part = 0; Part < State.UF; ++Part) { 9390 // The pointer operand of the new GEP. If it's loop-invariant, we 9391 // won't broadcast it. 9392 auto *Ptr = IsPtrLoopInvariant 9393 ? State.get(getOperand(0), VPIteration(0, 0)) 9394 : State.get(getOperand(0), Part); 9395 9396 // Collect all the indices for the new GEP. If any index is 9397 // loop-invariant, we won't broadcast it. 9398 SmallVector<Value *, 4> Indices; 9399 for (unsigned I = 1, E = getNumOperands(); I < E; I++) { 9400 VPValue *Operand = getOperand(I); 9401 if (IsIndexLoopInvariant[I - 1]) 9402 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 9403 else 9404 Indices.push_back(State.get(Operand, Part)); 9405 } 9406 9407 // If the GEP instruction is vectorized and was in a basic block that 9408 // needed predication, we can't propagate the poison-generating 'inbounds' 9409 // flag. The control flow has been linearized and the GEP is no longer 9410 // guarded by the predicate, which could make the 'inbounds' properties to 9411 // no longer hold. 9412 bool IsInBounds = 9413 GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0; 9414 9415 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 9416 // but it should be a vector, otherwise. 9417 auto *NewGEP = IsInBounds 9418 ? State.Builder.CreateInBoundsGEP( 9419 GEP->getSourceElementType(), Ptr, Indices) 9420 : State.Builder.CreateGEP(GEP->getSourceElementType(), 9421 Ptr, Indices); 9422 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) && 9423 "NewGEP is not a pointer vector"); 9424 State.set(this, NewGEP, Part); 9425 State.ILV->addMetadata(NewGEP, GEP); 9426 } 9427 } 9428 } 9429 9430 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9431 assert(!State.Instance && "Int or FP induction being replicated."); 9432 9433 Value *Start = getStartValue()->getLiveInIRValue(); 9434 const InductionDescriptor &ID = getInductionDescriptor(); 9435 TruncInst *Trunc = getTruncInst(); 9436 IRBuilderBase &Builder = State.Builder; 9437 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 9438 assert(State.VF.isVector() && "must have vector VF"); 9439 9440 // The value from the original loop to which we are mapping the new induction 9441 // variable. 9442 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 9443 9444 auto &DL = EntryVal->getModule()->getDataLayout(); 9445 9446 // Generate code for the induction step. Note that induction steps are 9447 // required to be loop-invariant 9448 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 9449 if (SE.isSCEVable(IV->getType())) { 9450 SCEVExpander Exp(SE, DL, "induction"); 9451 return Exp.expandCodeFor(Step, Step->getType(), 9452 State.CFG.VectorPreHeader->getTerminator()); 9453 } 9454 return cast<SCEVUnknown>(Step)->getValue(); 9455 }; 9456 9457 // Fast-math-flags propagate from the original induction instruction. 9458 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9459 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 9460 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 9461 9462 // Now do the actual transformations, and start with creating the step value. 9463 Value *Step = CreateStepValue(ID.getStep()); 9464 9465 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 9466 "Expected either an induction phi-node or a truncate of it!"); 9467 9468 // Construct the initial value of the vector IV in the vector loop preheader 9469 auto CurrIP = Builder.saveIP(); 9470 Builder.SetInsertPoint(State.CFG.VectorPreHeader->getTerminator()); 9471 if (isa<TruncInst>(EntryVal)) { 9472 assert(Start->getType()->isIntegerTy() && 9473 "Truncation requires an integer type"); 9474 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 9475 Step = Builder.CreateTrunc(Step, TruncType); 9476 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 9477 } 9478 9479 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 9480 Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start); 9481 Value *SteppedStart = getStepVector( 9482 SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder); 9483 9484 // We create vector phi nodes for both integer and floating-point induction 9485 // variables. Here, we determine the kind of arithmetic we will perform. 9486 Instruction::BinaryOps AddOp; 9487 Instruction::BinaryOps MulOp; 9488 if (Step->getType()->isIntegerTy()) { 9489 AddOp = Instruction::Add; 9490 MulOp = Instruction::Mul; 9491 } else { 9492 AddOp = ID.getInductionOpcode(); 9493 MulOp = Instruction::FMul; 9494 } 9495 9496 // Multiply the vectorization factor by the step using integer or 9497 // floating-point arithmetic as appropriate. 9498 Type *StepType = Step->getType(); 9499 Value *RuntimeVF; 9500 if (Step->getType()->isFloatingPointTy()) 9501 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF); 9502 else 9503 RuntimeVF = getRuntimeVF(Builder, StepType, State.VF); 9504 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 9505 9506 // Create a vector splat to use in the induction update. 9507 // 9508 // FIXME: If the step is non-constant, we create the vector splat with 9509 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 9510 // handle a constant vector splat. 9511 Value *SplatVF = isa<Constant>(Mul) 9512 ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul)) 9513 : Builder.CreateVectorSplat(State.VF, Mul); 9514 Builder.restoreIP(CurrIP); 9515 9516 // We may need to add the step a number of times, depending on the unroll 9517 // factor. The last of those goes into the PHI. 9518 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 9519 &*State.CFG.PrevBB->getFirstInsertionPt()); 9520 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 9521 Instruction *LastInduction = VecInd; 9522 for (unsigned Part = 0; Part < State.UF; ++Part) { 9523 State.set(this, LastInduction, Part); 9524 9525 if (isa<TruncInst>(EntryVal)) 9526 State.ILV->addMetadata(LastInduction, EntryVal); 9527 9528 LastInduction = cast<Instruction>( 9529 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 9530 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 9531 } 9532 9533 LastInduction->setName("vec.ind.next"); 9534 VecInd->addIncoming(SteppedStart, State.CFG.VectorPreHeader); 9535 // Add induction update using an incorrect block temporarily. The phi node 9536 // will be fixed after VPlan execution. Note that at this point the latch 9537 // block cannot be used, as it does not exist yet. 9538 // TODO: Model increment value in VPlan, by turning the recipe into a 9539 // multi-def and a subclass of VPHeaderPHIRecipe. 9540 VecInd->addIncoming(LastInduction, State.CFG.VectorPreHeader); 9541 } 9542 9543 void VPWidenPointerInductionRecipe::execute(VPTransformState &State) { 9544 assert(IndDesc.getKind() == InductionDescriptor::IK_PtrInduction && 9545 "Not a pointer induction according to InductionDescriptor!"); 9546 assert(cast<PHINode>(getUnderlyingInstr())->getType()->isPointerTy() && 9547 "Unexpected type."); 9548 9549 auto *IVR = getParent()->getPlan()->getCanonicalIV(); 9550 PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0)); 9551 9552 if (all_of(users(), [this](const VPUser *U) { 9553 return cast<VPRecipeBase>(U)->usesScalars(this); 9554 })) { 9555 // This is the normalized GEP that starts counting at zero. 9556 Value *PtrInd = State.Builder.CreateSExtOrTrunc( 9557 CanonicalIV, IndDesc.getStep()->getType()); 9558 // Determine the number of scalars we need to generate for each unroll 9559 // iteration. If the instruction is uniform, we only need to generate the 9560 // first lane. Otherwise, we generate all VF values. 9561 bool IsUniform = vputils::onlyFirstLaneUsed(this); 9562 assert((IsUniform || !State.VF.isScalable()) && 9563 "Cannot scalarize a scalable VF"); 9564 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 9565 9566 for (unsigned Part = 0; Part < State.UF; ++Part) { 9567 Value *PartStart = 9568 createStepForVF(State.Builder, PtrInd->getType(), State.VF, Part); 9569 9570 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 9571 Value *Idx = State.Builder.CreateAdd( 9572 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 9573 Value *GlobalIdx = State.Builder.CreateAdd(PtrInd, Idx); 9574 9575 Value *Step = CreateStepValue(IndDesc.getStep(), SE, 9576 State.CFG.PrevBB->getTerminator()); 9577 Value *SclrGep = emitTransformedIndex( 9578 State.Builder, GlobalIdx, IndDesc.getStartValue(), Step, IndDesc); 9579 SclrGep->setName("next.gep"); 9580 State.set(this, SclrGep, VPIteration(Part, Lane)); 9581 } 9582 } 9583 return; 9584 } 9585 9586 assert(isa<SCEVConstant>(IndDesc.getStep()) && 9587 "Induction step not a SCEV constant!"); 9588 Type *PhiType = IndDesc.getStep()->getType(); 9589 9590 // Build a pointer phi 9591 Value *ScalarStartValue = getStartValue()->getLiveInIRValue(); 9592 Type *ScStValueType = ScalarStartValue->getType(); 9593 PHINode *NewPointerPhi = 9594 PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV); 9595 NewPointerPhi->addIncoming(ScalarStartValue, State.CFG.VectorPreHeader); 9596 9597 // A pointer induction, performed by using a gep 9598 const DataLayout &DL = NewPointerPhi->getModule()->getDataLayout(); 9599 Instruction *InductionLoc = &*State.Builder.GetInsertPoint(); 9600 9601 const SCEV *ScalarStep = IndDesc.getStep(); 9602 SCEVExpander Exp(SE, DL, "induction"); 9603 Value *ScalarStepValue = Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 9604 Value *RuntimeVF = getRuntimeVF(State.Builder, PhiType, State.VF); 9605 Value *NumUnrolledElems = 9606 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 9607 Value *InductionGEP = GetElementPtrInst::Create( 9608 IndDesc.getElementType(), NewPointerPhi, 9609 State.Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 9610 InductionLoc); 9611 // Add induction update using an incorrect block temporarily. The phi node 9612 // will be fixed after VPlan execution. Note that at this point the latch 9613 // block cannot be used, as it does not exist yet. 9614 // TODO: Model increment value in VPlan, by turning the recipe into a 9615 // multi-def and a subclass of VPHeaderPHIRecipe. 9616 NewPointerPhi->addIncoming(InductionGEP, State.CFG.VectorPreHeader); 9617 9618 // Create UF many actual address geps that use the pointer 9619 // phi as base and a vectorized version of the step value 9620 // (<step*0, ..., step*N>) as offset. 9621 for (unsigned Part = 0; Part < State.UF; ++Part) { 9622 Type *VecPhiType = VectorType::get(PhiType, State.VF); 9623 Value *StartOffsetScalar = 9624 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 9625 Value *StartOffset = 9626 State.Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 9627 // Create a vector of consecutive numbers from zero to VF. 9628 StartOffset = State.Builder.CreateAdd( 9629 StartOffset, State.Builder.CreateStepVector(VecPhiType)); 9630 9631 Value *GEP = State.Builder.CreateGEP( 9632 IndDesc.getElementType(), NewPointerPhi, 9633 State.Builder.CreateMul( 9634 StartOffset, 9635 State.Builder.CreateVectorSplat(State.VF, ScalarStepValue), 9636 "vector.gep")); 9637 State.set(this, GEP, Part); 9638 } 9639 } 9640 9641 void VPScalarIVStepsRecipe::execute(VPTransformState &State) { 9642 assert(!State.Instance && "VPScalarIVStepsRecipe being replicated."); 9643 9644 // Fast-math-flags propagate from the original induction instruction. 9645 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder); 9646 if (IndDesc.getInductionBinOp() && 9647 isa<FPMathOperator>(IndDesc.getInductionBinOp())) 9648 State.Builder.setFastMathFlags( 9649 IndDesc.getInductionBinOp()->getFastMathFlags()); 9650 9651 Value *Step = State.get(getStepValue(), VPIteration(0, 0)); 9652 auto CreateScalarIV = [&](Value *&Step) -> Value * { 9653 Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0)); 9654 auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0); 9655 if (!isCanonical() || CanonicalIV->getType() != Ty) { 9656 ScalarIV = 9657 Ty->isIntegerTy() 9658 ? State.Builder.CreateSExtOrTrunc(ScalarIV, Ty) 9659 : State.Builder.CreateCast(Instruction::SIToFP, ScalarIV, Ty); 9660 ScalarIV = emitTransformedIndex(State.Builder, ScalarIV, 9661 getStartValue()->getLiveInIRValue(), Step, 9662 IndDesc); 9663 ScalarIV->setName("offset.idx"); 9664 } 9665 if (TruncToTy) { 9666 assert(Step->getType()->isIntegerTy() && 9667 "Truncation requires an integer step"); 9668 ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncToTy); 9669 Step = State.Builder.CreateTrunc(Step, TruncToTy); 9670 } 9671 return ScalarIV; 9672 }; 9673 9674 Value *ScalarIV = CreateScalarIV(Step); 9675 if (State.VF.isVector()) { 9676 buildScalarSteps(ScalarIV, Step, IndDesc, this, State); 9677 return; 9678 } 9679 9680 for (unsigned Part = 0; Part < State.UF; ++Part) { 9681 assert(!State.VF.isScalable() && "scalable vectors not yet supported."); 9682 Value *EntryPart; 9683 if (Step->getType()->isFloatingPointTy()) { 9684 Value *StartIdx = 9685 getRuntimeVFAsFloat(State.Builder, Step->getType(), State.VF * Part); 9686 // Floating-point operations inherit FMF via the builder's flags. 9687 Value *MulOp = State.Builder.CreateFMul(StartIdx, Step); 9688 EntryPart = State.Builder.CreateBinOp(IndDesc.getInductionOpcode(), 9689 ScalarIV, MulOp); 9690 } else { 9691 Value *StartIdx = 9692 getRuntimeVF(State.Builder, Step->getType(), State.VF * Part); 9693 EntryPart = State.Builder.CreateAdd( 9694 ScalarIV, State.Builder.CreateMul(StartIdx, Step), "induction"); 9695 } 9696 State.set(this, EntryPart, Part); 9697 } 9698 } 9699 9700 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9701 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9702 State); 9703 } 9704 9705 void VPBlendRecipe::execute(VPTransformState &State) { 9706 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9707 // We know that all PHIs in non-header blocks are converted into 9708 // selects, so we don't have to worry about the insertion order and we 9709 // can just use the builder. 9710 // At this point we generate the predication tree. There may be 9711 // duplications since this is a simple recursive scan, but future 9712 // optimizations will clean it up. 9713 9714 unsigned NumIncoming = getNumIncomingValues(); 9715 9716 // Generate a sequence of selects of the form: 9717 // SELECT(Mask3, In3, 9718 // SELECT(Mask2, In2, 9719 // SELECT(Mask1, In1, 9720 // In0))) 9721 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9722 // are essentially undef are taken from In0. 9723 InnerLoopVectorizer::VectorParts Entry(State.UF); 9724 for (unsigned In = 0; In < NumIncoming; ++In) { 9725 for (unsigned Part = 0; Part < State.UF; ++Part) { 9726 // We might have single edge PHIs (blocks) - use an identity 9727 // 'select' for the first PHI operand. 9728 Value *In0 = State.get(getIncomingValue(In), Part); 9729 if (In == 0) 9730 Entry[Part] = In0; // Initialize with the first incoming value. 9731 else { 9732 // Select between the current value and the previous incoming edge 9733 // based on the incoming mask. 9734 Value *Cond = State.get(getMask(In), Part); 9735 Entry[Part] = 9736 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9737 } 9738 } 9739 } 9740 for (unsigned Part = 0; Part < State.UF; ++Part) 9741 State.set(this, Entry[Part], Part); 9742 } 9743 9744 void VPInterleaveRecipe::execute(VPTransformState &State) { 9745 assert(!State.Instance && "Interleave group being replicated."); 9746 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9747 getStoredValues(), getMask()); 9748 } 9749 9750 void VPReductionRecipe::execute(VPTransformState &State) { 9751 assert(!State.Instance && "Reduction being replicated."); 9752 Value *PrevInChain = State.get(getChainOp(), 0); 9753 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9754 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9755 // Propagate the fast-math flags carried by the underlying instruction. 9756 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9757 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9758 for (unsigned Part = 0; Part < State.UF; ++Part) { 9759 Value *NewVecOp = State.get(getVecOp(), Part); 9760 if (VPValue *Cond = getCondOp()) { 9761 Value *NewCond = State.get(Cond, Part); 9762 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9763 Value *Iden = RdxDesc->getRecurrenceIdentity( 9764 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9765 Value *IdenVec = 9766 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9767 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9768 NewVecOp = Select; 9769 } 9770 Value *NewRed; 9771 Value *NextInChain; 9772 if (IsOrdered) { 9773 if (State.VF.isVector()) 9774 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9775 PrevInChain); 9776 else 9777 NewRed = State.Builder.CreateBinOp( 9778 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9779 NewVecOp); 9780 PrevInChain = NewRed; 9781 } else { 9782 PrevInChain = State.get(getChainOp(), Part); 9783 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9784 } 9785 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9786 NextInChain = 9787 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9788 NewRed, PrevInChain); 9789 } else if (IsOrdered) 9790 NextInChain = NewRed; 9791 else 9792 NextInChain = State.Builder.CreateBinOp( 9793 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9794 PrevInChain); 9795 State.set(this, NextInChain, Part); 9796 } 9797 } 9798 9799 void VPReplicateRecipe::execute(VPTransformState &State) { 9800 if (State.Instance) { // Generate a single instance. 9801 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9802 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance, 9803 IsPredicated, State); 9804 // Insert scalar instance packing it into a vector. 9805 if (AlsoPack && State.VF.isVector()) { 9806 // If we're constructing lane 0, initialize to start from poison. 9807 if (State.Instance->Lane.isFirstLane()) { 9808 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9809 Value *Poison = PoisonValue::get( 9810 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9811 State.set(this, Poison, State.Instance->Part); 9812 } 9813 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9814 } 9815 return; 9816 } 9817 9818 // Generate scalar instances for all VF lanes of all UF parts, unless the 9819 // instruction is uniform inwhich case generate only the first lane for each 9820 // of the UF parts. 9821 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9822 assert((!State.VF.isScalable() || IsUniform) && 9823 "Can't scalarize a scalable vector"); 9824 for (unsigned Part = 0; Part < State.UF; ++Part) 9825 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9826 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 9827 VPIteration(Part, Lane), IsPredicated, 9828 State); 9829 } 9830 9831 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9832 assert(State.Instance && "Branch on Mask works only on single instance."); 9833 9834 unsigned Part = State.Instance->Part; 9835 unsigned Lane = State.Instance->Lane.getKnownLane(); 9836 9837 Value *ConditionBit = nullptr; 9838 VPValue *BlockInMask = getMask(); 9839 if (BlockInMask) { 9840 ConditionBit = State.get(BlockInMask, Part); 9841 if (ConditionBit->getType()->isVectorTy()) 9842 ConditionBit = State.Builder.CreateExtractElement( 9843 ConditionBit, State.Builder.getInt32(Lane)); 9844 } else // Block in mask is all-one. 9845 ConditionBit = State.Builder.getTrue(); 9846 9847 // Replace the temporary unreachable terminator with a new conditional branch, 9848 // whose two destinations will be set later when they are created. 9849 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9850 assert(isa<UnreachableInst>(CurrentTerminator) && 9851 "Expected to replace unreachable terminator with conditional branch."); 9852 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9853 CondBr->setSuccessor(0, nullptr); 9854 ReplaceInstWithInst(CurrentTerminator, CondBr); 9855 } 9856 9857 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9858 assert(State.Instance && "Predicated instruction PHI works per instance."); 9859 Instruction *ScalarPredInst = 9860 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9861 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9862 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9863 assert(PredicatingBB && "Predicated block has no single predecessor."); 9864 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9865 "operand must be VPReplicateRecipe"); 9866 9867 // By current pack/unpack logic we need to generate only a single phi node: if 9868 // a vector value for the predicated instruction exists at this point it means 9869 // the instruction has vector users only, and a phi for the vector value is 9870 // needed. In this case the recipe of the predicated instruction is marked to 9871 // also do that packing, thereby "hoisting" the insert-element sequence. 9872 // Otherwise, a phi node for the scalar value is needed. 9873 unsigned Part = State.Instance->Part; 9874 if (State.hasVectorValue(getOperand(0), Part)) { 9875 Value *VectorValue = State.get(getOperand(0), Part); 9876 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9877 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9878 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9879 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9880 if (State.hasVectorValue(this, Part)) 9881 State.reset(this, VPhi, Part); 9882 else 9883 State.set(this, VPhi, Part); 9884 // NOTE: Currently we need to update the value of the operand, so the next 9885 // predicated iteration inserts its generated value in the correct vector. 9886 State.reset(getOperand(0), VPhi, Part); 9887 } else { 9888 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9889 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9890 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9891 PredicatingBB); 9892 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9893 if (State.hasScalarValue(this, *State.Instance)) 9894 State.reset(this, Phi, *State.Instance); 9895 else 9896 State.set(this, Phi, *State.Instance); 9897 // NOTE: Currently we need to update the value of the operand, so the next 9898 // predicated iteration inserts its generated value in the correct vector. 9899 State.reset(getOperand(0), Phi, *State.Instance); 9900 } 9901 } 9902 9903 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9904 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9905 9906 // Attempt to issue a wide load. 9907 LoadInst *LI = dyn_cast<LoadInst>(&Ingredient); 9908 StoreInst *SI = dyn_cast<StoreInst>(&Ingredient); 9909 9910 assert((LI || SI) && "Invalid Load/Store instruction"); 9911 assert((!SI || StoredValue) && "No stored value provided for widened store"); 9912 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 9913 9914 Type *ScalarDataTy = getLoadStoreType(&Ingredient); 9915 9916 auto *DataTy = VectorType::get(ScalarDataTy, State.VF); 9917 const Align Alignment = getLoadStoreAlignment(&Ingredient); 9918 bool CreateGatherScatter = !Consecutive; 9919 9920 auto &Builder = State.Builder; 9921 InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF); 9922 bool isMaskRequired = getMask(); 9923 if (isMaskRequired) 9924 for (unsigned Part = 0; Part < State.UF; ++Part) 9925 BlockInMaskParts[Part] = State.get(getMask(), Part); 9926 9927 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 9928 // Calculate the pointer for the specific unroll-part. 9929 GetElementPtrInst *PartPtr = nullptr; 9930 9931 bool InBounds = false; 9932 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 9933 InBounds = gep->isInBounds(); 9934 if (Reverse) { 9935 // If the address is consecutive but reversed, then the 9936 // wide store needs to start at the last vector element. 9937 // RunTimeVF = VScale * VF.getKnownMinValue() 9938 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 9939 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF); 9940 // NumElt = -Part * RunTimeVF 9941 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 9942 // LastLane = 1 - RunTimeVF 9943 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 9944 PartPtr = 9945 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 9946 PartPtr->setIsInBounds(InBounds); 9947 PartPtr = cast<GetElementPtrInst>( 9948 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 9949 PartPtr->setIsInBounds(InBounds); 9950 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 9951 BlockInMaskParts[Part] = 9952 Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse"); 9953 } else { 9954 Value *Increment = 9955 createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part); 9956 PartPtr = cast<GetElementPtrInst>( 9957 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 9958 PartPtr->setIsInBounds(InBounds); 9959 } 9960 9961 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 9962 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 9963 }; 9964 9965 // Handle Stores: 9966 if (SI) { 9967 State.ILV->setDebugLocFromInst(SI); 9968 9969 for (unsigned Part = 0; Part < State.UF; ++Part) { 9970 Instruction *NewSI = nullptr; 9971 Value *StoredVal = State.get(StoredValue, Part); 9972 if (CreateGatherScatter) { 9973 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 9974 Value *VectorGep = State.get(getAddr(), Part); 9975 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 9976 MaskPart); 9977 } else { 9978 if (Reverse) { 9979 // If we store to reverse consecutive memory locations, then we need 9980 // to reverse the order of elements in the stored value. 9981 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse"); 9982 // We don't want to update the value in the map as it might be used in 9983 // another expression. So don't call resetVectorValue(StoredVal). 9984 } 9985 auto *VecPtr = 9986 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 9987 if (isMaskRequired) 9988 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 9989 BlockInMaskParts[Part]); 9990 else 9991 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 9992 } 9993 State.ILV->addMetadata(NewSI, SI); 9994 } 9995 return; 9996 } 9997 9998 // Handle loads. 9999 assert(LI && "Must have a load instruction"); 10000 State.ILV->setDebugLocFromInst(LI); 10001 for (unsigned Part = 0; Part < State.UF; ++Part) { 10002 Value *NewLI; 10003 if (CreateGatherScatter) { 10004 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 10005 Value *VectorGep = State.get(getAddr(), Part); 10006 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 10007 nullptr, "wide.masked.gather"); 10008 State.ILV->addMetadata(NewLI, LI); 10009 } else { 10010 auto *VecPtr = 10011 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10012 if (isMaskRequired) 10013 NewLI = Builder.CreateMaskedLoad( 10014 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 10015 PoisonValue::get(DataTy), "wide.masked.load"); 10016 else 10017 NewLI = 10018 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 10019 10020 // Add metadata to the load, but setVectorValue to the reverse shuffle. 10021 State.ILV->addMetadata(NewLI, LI); 10022 if (Reverse) 10023 NewLI = Builder.CreateVectorReverse(NewLI, "reverse"); 10024 } 10025 10026 State.set(this, NewLI, Part); 10027 } 10028 } 10029 10030 // Determine how to lower the scalar epilogue, which depends on 1) optimising 10031 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 10032 // predication, and 4) a TTI hook that analyses whether the loop is suitable 10033 // for predication. 10034 static ScalarEpilogueLowering getScalarEpilogueLowering( 10035 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 10036 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 10037 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 10038 LoopVectorizationLegality &LVL) { 10039 // 1) OptSize takes precedence over all other options, i.e. if this is set, 10040 // don't look at hints or options, and don't request a scalar epilogue. 10041 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 10042 // LoopAccessInfo (due to code dependency and not being able to reliably get 10043 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 10044 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 10045 // versioning when the vectorization is forced, unlike hasOptSize. So revert 10046 // back to the old way and vectorize with versioning when forced. See D81345.) 10047 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 10048 PGSOQueryType::IRPass) && 10049 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 10050 return CM_ScalarEpilogueNotAllowedOptSize; 10051 10052 // 2) If set, obey the directives 10053 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 10054 switch (PreferPredicateOverEpilogue) { 10055 case PreferPredicateTy::ScalarEpilogue: 10056 return CM_ScalarEpilogueAllowed; 10057 case PreferPredicateTy::PredicateElseScalarEpilogue: 10058 return CM_ScalarEpilogueNotNeededUsePredicate; 10059 case PreferPredicateTy::PredicateOrDontVectorize: 10060 return CM_ScalarEpilogueNotAllowedUsePredicate; 10061 }; 10062 } 10063 10064 // 3) If set, obey the hints 10065 switch (Hints.getPredicate()) { 10066 case LoopVectorizeHints::FK_Enabled: 10067 return CM_ScalarEpilogueNotNeededUsePredicate; 10068 case LoopVectorizeHints::FK_Disabled: 10069 return CM_ScalarEpilogueAllowed; 10070 }; 10071 10072 // 4) if the TTI hook indicates this is profitable, request predication. 10073 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 10074 LVL.getLAI())) 10075 return CM_ScalarEpilogueNotNeededUsePredicate; 10076 10077 return CM_ScalarEpilogueAllowed; 10078 } 10079 10080 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 10081 // If Values have been set for this Def return the one relevant for \p Part. 10082 if (hasVectorValue(Def, Part)) 10083 return Data.PerPartOutput[Def][Part]; 10084 10085 if (!hasScalarValue(Def, {Part, 0})) { 10086 Value *IRV = Def->getLiveInIRValue(); 10087 Value *B = ILV->getBroadcastInstrs(IRV); 10088 set(Def, B, Part); 10089 return B; 10090 } 10091 10092 Value *ScalarValue = get(Def, {Part, 0}); 10093 // If we aren't vectorizing, we can just copy the scalar map values over 10094 // to the vector map. 10095 if (VF.isScalar()) { 10096 set(Def, ScalarValue, Part); 10097 return ScalarValue; 10098 } 10099 10100 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 10101 bool IsUniform = RepR && RepR->isUniform(); 10102 10103 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 10104 // Check if there is a scalar value for the selected lane. 10105 if (!hasScalarValue(Def, {Part, LastLane})) { 10106 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 10107 assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) || 10108 isa<VPScalarIVStepsRecipe>(Def->getDef())) && 10109 "unexpected recipe found to be invariant"); 10110 IsUniform = true; 10111 LastLane = 0; 10112 } 10113 10114 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 10115 // Set the insert point after the last scalarized instruction or after the 10116 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 10117 // will directly follow the scalar definitions. 10118 auto OldIP = Builder.saveIP(); 10119 auto NewIP = 10120 isa<PHINode>(LastInst) 10121 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 10122 : std::next(BasicBlock::iterator(LastInst)); 10123 Builder.SetInsertPoint(&*NewIP); 10124 10125 // However, if we are vectorizing, we need to construct the vector values. 10126 // If the value is known to be uniform after vectorization, we can just 10127 // broadcast the scalar value corresponding to lane zero for each unroll 10128 // iteration. Otherwise, we construct the vector values using 10129 // insertelement instructions. Since the resulting vectors are stored in 10130 // State, we will only generate the insertelements once. 10131 Value *VectorValue = nullptr; 10132 if (IsUniform) { 10133 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 10134 set(Def, VectorValue, Part); 10135 } else { 10136 // Initialize packing with insertelements to start from undef. 10137 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 10138 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10139 set(Def, Undef, Part); 10140 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10141 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10142 VectorValue = get(Def, Part); 10143 } 10144 Builder.restoreIP(OldIP); 10145 return VectorValue; 10146 } 10147 10148 // Process the loop in the VPlan-native vectorization path. This path builds 10149 // VPlan upfront in the vectorization pipeline, which allows to apply 10150 // VPlan-to-VPlan transformations from the very beginning without modifying the 10151 // input LLVM IR. 10152 static bool processLoopInVPlanNativePath( 10153 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10154 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10155 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10156 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10157 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10158 LoopVectorizationRequirements &Requirements) { 10159 10160 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10161 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10162 return false; 10163 } 10164 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10165 Function *F = L->getHeader()->getParent(); 10166 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10167 10168 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10169 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10170 10171 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10172 &Hints, IAI); 10173 // Use the planner for outer loop vectorization. 10174 // TODO: CM is not used at this point inside the planner. Turn CM into an 10175 // optional argument if we don't need it in the future. 10176 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10177 Requirements, ORE); 10178 10179 // Get user vectorization factor. 10180 ElementCount UserVF = Hints.getWidth(); 10181 10182 CM.collectElementTypesForWidening(); 10183 10184 // Plan how to best vectorize, return the best VF and its cost. 10185 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10186 10187 // If we are stress testing VPlan builds, do not attempt to generate vector 10188 // code. Masked vector code generation support will follow soon. 10189 // Also, do not attempt to vectorize if no vector code will be produced. 10190 if (VPlanBuildStressTest || EnableVPlanPredication || 10191 VectorizationFactor::Disabled() == VF) 10192 return false; 10193 10194 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10195 10196 { 10197 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10198 F->getParent()->getDataLayout()); 10199 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10200 &CM, BFI, PSI, Checks); 10201 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10202 << L->getHeader()->getParent()->getName() << "\"\n"); 10203 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT); 10204 } 10205 10206 // Mark the loop as already vectorized to avoid vectorizing again. 10207 Hints.setAlreadyVectorized(); 10208 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10209 return true; 10210 } 10211 10212 // Emit a remark if there are stores to floats that required a floating point 10213 // extension. If the vectorized loop was generated with floating point there 10214 // will be a performance penalty from the conversion overhead and the change in 10215 // the vector width. 10216 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10217 SmallVector<Instruction *, 4> Worklist; 10218 for (BasicBlock *BB : L->getBlocks()) { 10219 for (Instruction &Inst : *BB) { 10220 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10221 if (S->getValueOperand()->getType()->isFloatTy()) 10222 Worklist.push_back(S); 10223 } 10224 } 10225 } 10226 10227 // Traverse the floating point stores upwards searching, for floating point 10228 // conversions. 10229 SmallPtrSet<const Instruction *, 4> Visited; 10230 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10231 while (!Worklist.empty()) { 10232 auto *I = Worklist.pop_back_val(); 10233 if (!L->contains(I)) 10234 continue; 10235 if (!Visited.insert(I).second) 10236 continue; 10237 10238 // Emit a remark if the floating point store required a floating 10239 // point conversion. 10240 // TODO: More work could be done to identify the root cause such as a 10241 // constant or a function return type and point the user to it. 10242 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10243 ORE->emit([&]() { 10244 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10245 I->getDebugLoc(), L->getHeader()) 10246 << "floating point conversion changes vector width. " 10247 << "Mixed floating point precision requires an up/down " 10248 << "cast that will negatively impact performance."; 10249 }); 10250 10251 for (Use &Op : I->operands()) 10252 if (auto *OpI = dyn_cast<Instruction>(Op)) 10253 Worklist.push_back(OpI); 10254 } 10255 } 10256 10257 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10258 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10259 !EnableLoopInterleaving), 10260 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10261 !EnableLoopVectorization) {} 10262 10263 bool LoopVectorizePass::processLoop(Loop *L) { 10264 assert((EnableVPlanNativePath || L->isInnermost()) && 10265 "VPlan-native path is not enabled. Only process inner loops."); 10266 10267 #ifndef NDEBUG 10268 const std::string DebugLocStr = getDebugLocString(L); 10269 #endif /* NDEBUG */ 10270 10271 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '" 10272 << L->getHeader()->getParent()->getName() << "' from " 10273 << DebugLocStr << "\n"); 10274 10275 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI); 10276 10277 LLVM_DEBUG( 10278 dbgs() << "LV: Loop hints:" 10279 << " force=" 10280 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10281 ? "disabled" 10282 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10283 ? "enabled" 10284 : "?")) 10285 << " width=" << Hints.getWidth() 10286 << " interleave=" << Hints.getInterleave() << "\n"); 10287 10288 // Function containing loop 10289 Function *F = L->getHeader()->getParent(); 10290 10291 // Looking at the diagnostic output is the only way to determine if a loop 10292 // was vectorized (other than looking at the IR or machine code), so it 10293 // is important to generate an optimization remark for each loop. Most of 10294 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10295 // generated as OptimizationRemark and OptimizationRemarkMissed are 10296 // less verbose reporting vectorized loops and unvectorized loops that may 10297 // benefit from vectorization, respectively. 10298 10299 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10300 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10301 return false; 10302 } 10303 10304 PredicatedScalarEvolution PSE(*SE, *L); 10305 10306 // Check if it is legal to vectorize the loop. 10307 LoopVectorizationRequirements Requirements; 10308 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10309 &Requirements, &Hints, DB, AC, BFI, PSI); 10310 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10311 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10312 Hints.emitRemarkWithHints(); 10313 return false; 10314 } 10315 10316 // Check the function attributes and profiles to find out if this function 10317 // should be optimized for size. 10318 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10319 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10320 10321 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10322 // here. They may require CFG and instruction level transformations before 10323 // even evaluating whether vectorization is profitable. Since we cannot modify 10324 // the incoming IR, we need to build VPlan upfront in the vectorization 10325 // pipeline. 10326 if (!L->isInnermost()) 10327 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10328 ORE, BFI, PSI, Hints, Requirements); 10329 10330 assert(L->isInnermost() && "Inner loop expected."); 10331 10332 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10333 // count by optimizing for size, to minimize overheads. 10334 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10335 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10336 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10337 << "This loop is worth vectorizing only if no scalar " 10338 << "iteration overheads are incurred."); 10339 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10340 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10341 else { 10342 LLVM_DEBUG(dbgs() << "\n"); 10343 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10344 } 10345 } 10346 10347 // Check the function attributes to see if implicit floats are allowed. 10348 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10349 // an integer loop and the vector instructions selected are purely integer 10350 // vector instructions? 10351 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10352 reportVectorizationFailure( 10353 "Can't vectorize when the NoImplicitFloat attribute is used", 10354 "loop not vectorized due to NoImplicitFloat attribute", 10355 "NoImplicitFloat", ORE, L); 10356 Hints.emitRemarkWithHints(); 10357 return false; 10358 } 10359 10360 // Check if the target supports potentially unsafe FP vectorization. 10361 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10362 // for the target we're vectorizing for, to make sure none of the 10363 // additional fp-math flags can help. 10364 if (Hints.isPotentiallyUnsafe() && 10365 TTI->isFPVectorizationPotentiallyUnsafe()) { 10366 reportVectorizationFailure( 10367 "Potentially unsafe FP op prevents vectorization", 10368 "loop not vectorized due to unsafe FP support.", 10369 "UnsafeFP", ORE, L); 10370 Hints.emitRemarkWithHints(); 10371 return false; 10372 } 10373 10374 bool AllowOrderedReductions; 10375 // If the flag is set, use that instead and override the TTI behaviour. 10376 if (ForceOrderedReductions.getNumOccurrences() > 0) 10377 AllowOrderedReductions = ForceOrderedReductions; 10378 else 10379 AllowOrderedReductions = TTI->enableOrderedReductions(); 10380 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10381 ORE->emit([&]() { 10382 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10383 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10384 ExactFPMathInst->getDebugLoc(), 10385 ExactFPMathInst->getParent()) 10386 << "loop not vectorized: cannot prove it is safe to reorder " 10387 "floating-point operations"; 10388 }); 10389 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10390 "reorder floating-point operations\n"); 10391 Hints.emitRemarkWithHints(); 10392 return false; 10393 } 10394 10395 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10396 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10397 10398 // If an override option has been passed in for interleaved accesses, use it. 10399 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10400 UseInterleaved = EnableInterleavedMemAccesses; 10401 10402 // Analyze interleaved memory accesses. 10403 if (UseInterleaved) { 10404 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10405 } 10406 10407 // Use the cost model. 10408 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10409 F, &Hints, IAI); 10410 CM.collectValuesToIgnore(); 10411 CM.collectElementTypesForWidening(); 10412 10413 // Use the planner for vectorization. 10414 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10415 Requirements, ORE); 10416 10417 // Get user vectorization factor and interleave count. 10418 ElementCount UserVF = Hints.getWidth(); 10419 unsigned UserIC = Hints.getInterleave(); 10420 10421 // Plan how to best vectorize, return the best VF and its cost. 10422 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10423 10424 VectorizationFactor VF = VectorizationFactor::Disabled(); 10425 unsigned IC = 1; 10426 10427 if (MaybeVF) { 10428 VF = *MaybeVF; 10429 // Select the interleave count. 10430 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10431 } 10432 10433 // Identify the diagnostic messages that should be produced. 10434 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10435 bool VectorizeLoop = true, InterleaveLoop = true; 10436 if (VF.Width.isScalar()) { 10437 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10438 VecDiagMsg = std::make_pair( 10439 "VectorizationNotBeneficial", 10440 "the cost-model indicates that vectorization is not beneficial"); 10441 VectorizeLoop = false; 10442 } 10443 10444 if (!MaybeVF && UserIC > 1) { 10445 // Tell the user interleaving was avoided up-front, despite being explicitly 10446 // requested. 10447 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10448 "interleaving should be avoided up front\n"); 10449 IntDiagMsg = std::make_pair( 10450 "InterleavingAvoided", 10451 "Ignoring UserIC, because interleaving was avoided up front"); 10452 InterleaveLoop = false; 10453 } else if (IC == 1 && UserIC <= 1) { 10454 // Tell the user interleaving is not beneficial. 10455 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10456 IntDiagMsg = std::make_pair( 10457 "InterleavingNotBeneficial", 10458 "the cost-model indicates that interleaving is not beneficial"); 10459 InterleaveLoop = false; 10460 if (UserIC == 1) { 10461 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10462 IntDiagMsg.second += 10463 " and is explicitly disabled or interleave count is set to 1"; 10464 } 10465 } else if (IC > 1 && UserIC == 1) { 10466 // Tell the user interleaving is beneficial, but it explicitly disabled. 10467 LLVM_DEBUG( 10468 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10469 IntDiagMsg = std::make_pair( 10470 "InterleavingBeneficialButDisabled", 10471 "the cost-model indicates that interleaving is beneficial " 10472 "but is explicitly disabled or interleave count is set to 1"); 10473 InterleaveLoop = false; 10474 } 10475 10476 // Override IC if user provided an interleave count. 10477 IC = UserIC > 0 ? UserIC : IC; 10478 10479 // Emit diagnostic messages, if any. 10480 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10481 if (!VectorizeLoop && !InterleaveLoop) { 10482 // Do not vectorize or interleaving the loop. 10483 ORE->emit([&]() { 10484 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10485 L->getStartLoc(), L->getHeader()) 10486 << VecDiagMsg.second; 10487 }); 10488 ORE->emit([&]() { 10489 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10490 L->getStartLoc(), L->getHeader()) 10491 << IntDiagMsg.second; 10492 }); 10493 return false; 10494 } else if (!VectorizeLoop && InterleaveLoop) { 10495 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10496 ORE->emit([&]() { 10497 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10498 L->getStartLoc(), L->getHeader()) 10499 << VecDiagMsg.second; 10500 }); 10501 } else if (VectorizeLoop && !InterleaveLoop) { 10502 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10503 << ") in " << DebugLocStr << '\n'); 10504 ORE->emit([&]() { 10505 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10506 L->getStartLoc(), L->getHeader()) 10507 << IntDiagMsg.second; 10508 }); 10509 } else if (VectorizeLoop && InterleaveLoop) { 10510 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10511 << ") in " << DebugLocStr << '\n'); 10512 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10513 } 10514 10515 bool DisableRuntimeUnroll = false; 10516 MDNode *OrigLoopID = L->getLoopID(); 10517 { 10518 // Optimistically generate runtime checks. Drop them if they turn out to not 10519 // be profitable. Limit the scope of Checks, so the cleanup happens 10520 // immediately after vector codegeneration is done. 10521 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10522 F->getParent()->getDataLayout()); 10523 if (!VF.Width.isScalar() || IC > 1) 10524 Checks.Create(L, *LVL.getLAI(), PSE.getPredicate()); 10525 10526 using namespace ore; 10527 if (!VectorizeLoop) { 10528 assert(IC > 1 && "interleave count should not be 1 or 0"); 10529 // If we decided that it is not legal to vectorize the loop, then 10530 // interleave it. 10531 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10532 &CM, BFI, PSI, Checks); 10533 10534 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10535 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT); 10536 10537 ORE->emit([&]() { 10538 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10539 L->getHeader()) 10540 << "interleaved loop (interleaved count: " 10541 << NV("InterleaveCount", IC) << ")"; 10542 }); 10543 } else { 10544 // If we decided that it is *legal* to vectorize the loop, then do it. 10545 10546 // Consider vectorizing the epilogue too if it's profitable. 10547 VectorizationFactor EpilogueVF = 10548 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10549 if (EpilogueVF.Width.isVector()) { 10550 10551 // The first pass vectorizes the main loop and creates a scalar epilogue 10552 // to be vectorized by executing the plan (potentially with a different 10553 // factor) again shortly afterwards. 10554 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10555 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10556 EPI, &LVL, &CM, BFI, PSI, Checks); 10557 10558 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10559 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10560 DT); 10561 ++LoopsVectorized; 10562 10563 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10564 formLCSSARecursively(*L, *DT, LI, SE); 10565 10566 // Second pass vectorizes the epilogue and adjusts the control flow 10567 // edges from the first pass. 10568 EPI.MainLoopVF = EPI.EpilogueVF; 10569 EPI.MainLoopUF = EPI.EpilogueUF; 10570 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10571 ORE, EPI, &LVL, &CM, BFI, PSI, 10572 Checks); 10573 10574 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10575 BestEpiPlan.getVectorLoopRegion()->getEntryBasicBlock()->setName( 10576 "vec.epilog.vector.body"); 10577 10578 // Ensure that the start values for any VPReductionPHIRecipes are 10579 // updated before vectorising the epilogue loop. 10580 VPBasicBlock *Header = 10581 BestEpiPlan.getVectorLoopRegion()->getEntryBasicBlock(); 10582 for (VPRecipeBase &R : Header->phis()) { 10583 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) { 10584 if (auto *Resume = MainILV.getReductionResumeValue( 10585 ReductionPhi->getRecurrenceDescriptor())) { 10586 VPValue *StartVal = new VPValue(Resume); 10587 BestEpiPlan.addExternalDef(StartVal); 10588 ReductionPhi->setOperand(0, StartVal); 10589 } 10590 } 10591 } 10592 10593 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10594 DT); 10595 ++LoopsEpilogueVectorized; 10596 10597 if (!MainILV.areSafetyChecksAdded()) 10598 DisableRuntimeUnroll = true; 10599 } else { 10600 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10601 &LVL, &CM, BFI, PSI, Checks); 10602 10603 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10604 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT); 10605 ++LoopsVectorized; 10606 10607 // Add metadata to disable runtime unrolling a scalar loop when there 10608 // are no runtime checks about strides and memory. A scalar loop that is 10609 // rarely used is not worth unrolling. 10610 if (!LB.areSafetyChecksAdded()) 10611 DisableRuntimeUnroll = true; 10612 } 10613 // Report the vectorization decision. 10614 ORE->emit([&]() { 10615 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10616 L->getHeader()) 10617 << "vectorized loop (vectorization width: " 10618 << NV("VectorizationFactor", VF.Width) 10619 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10620 }); 10621 } 10622 10623 if (ORE->allowExtraAnalysis(LV_NAME)) 10624 checkMixedPrecision(L, ORE); 10625 } 10626 10627 Optional<MDNode *> RemainderLoopID = 10628 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10629 LLVMLoopVectorizeFollowupEpilogue}); 10630 if (RemainderLoopID.hasValue()) { 10631 L->setLoopID(RemainderLoopID.getValue()); 10632 } else { 10633 if (DisableRuntimeUnroll) 10634 AddRuntimeUnrollDisableMetaData(L); 10635 10636 // Mark the loop as already vectorized to avoid vectorizing again. 10637 Hints.setAlreadyVectorized(); 10638 } 10639 10640 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10641 return true; 10642 } 10643 10644 LoopVectorizeResult LoopVectorizePass::runImpl( 10645 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10646 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10647 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10648 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10649 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10650 SE = &SE_; 10651 LI = &LI_; 10652 TTI = &TTI_; 10653 DT = &DT_; 10654 BFI = &BFI_; 10655 TLI = TLI_; 10656 AA = &AA_; 10657 AC = &AC_; 10658 GetLAA = &GetLAA_; 10659 DB = &DB_; 10660 ORE = &ORE_; 10661 PSI = PSI_; 10662 10663 // Don't attempt if 10664 // 1. the target claims to have no vector registers, and 10665 // 2. interleaving won't help ILP. 10666 // 10667 // The second condition is necessary because, even if the target has no 10668 // vector registers, loop vectorization may still enable scalar 10669 // interleaving. 10670 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10671 TTI->getMaxInterleaveFactor(1) < 2) 10672 return LoopVectorizeResult(false, false); 10673 10674 bool Changed = false, CFGChanged = false; 10675 10676 // The vectorizer requires loops to be in simplified form. 10677 // Since simplification may add new inner loops, it has to run before the 10678 // legality and profitability checks. This means running the loop vectorizer 10679 // will simplify all loops, regardless of whether anything end up being 10680 // vectorized. 10681 for (auto &L : *LI) 10682 Changed |= CFGChanged |= 10683 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10684 10685 // Build up a worklist of inner-loops to vectorize. This is necessary as 10686 // the act of vectorizing or partially unrolling a loop creates new loops 10687 // and can invalidate iterators across the loops. 10688 SmallVector<Loop *, 8> Worklist; 10689 10690 for (Loop *L : *LI) 10691 collectSupportedLoops(*L, LI, ORE, Worklist); 10692 10693 LoopsAnalyzed += Worklist.size(); 10694 10695 // Now walk the identified inner loops. 10696 while (!Worklist.empty()) { 10697 Loop *L = Worklist.pop_back_val(); 10698 10699 // For the inner loops we actually process, form LCSSA to simplify the 10700 // transform. 10701 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10702 10703 Changed |= CFGChanged |= processLoop(L); 10704 } 10705 10706 // Process each loop nest in the function. 10707 return LoopVectorizeResult(Changed, CFGChanged); 10708 } 10709 10710 PreservedAnalyses LoopVectorizePass::run(Function &F, 10711 FunctionAnalysisManager &AM) { 10712 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10713 auto &LI = AM.getResult<LoopAnalysis>(F); 10714 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10715 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10716 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10717 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10718 auto &AA = AM.getResult<AAManager>(F); 10719 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10720 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10721 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10722 10723 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10724 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10725 [&](Loop &L) -> const LoopAccessInfo & { 10726 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10727 TLI, TTI, nullptr, nullptr, nullptr}; 10728 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10729 }; 10730 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10731 ProfileSummaryInfo *PSI = 10732 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10733 LoopVectorizeResult Result = 10734 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10735 if (!Result.MadeAnyChange) 10736 return PreservedAnalyses::all(); 10737 PreservedAnalyses PA; 10738 10739 // We currently do not preserve loopinfo/dominator analyses with outer loop 10740 // vectorization. Until this is addressed, mark these analyses as preserved 10741 // only for non-VPlan-native path. 10742 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10743 if (!EnableVPlanNativePath) { 10744 PA.preserve<LoopAnalysis>(); 10745 PA.preserve<DominatorTreeAnalysis>(); 10746 } 10747 10748 if (Result.MadeCFGChange) { 10749 // Making CFG changes likely means a loop got vectorized. Indicate that 10750 // extra simplification passes should be run. 10751 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only 10752 // be run if runtime checks have been added. 10753 AM.getResult<ShouldRunExtraVectorPasses>(F); 10754 PA.preserve<ShouldRunExtraVectorPasses>(); 10755 } else { 10756 PA.preserveSet<CFGAnalyses>(); 10757 } 10758 return PA; 10759 } 10760 10761 void LoopVectorizePass::printPipeline( 10762 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10763 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10764 OS, MapClassName2PassName); 10765 10766 OS << "<"; 10767 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10768 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10769 OS << ">"; 10770 } 10771