1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/Metadata.h" 116 #include "llvm/IR/Module.h" 117 #include "llvm/IR/Operator.h" 118 #include "llvm/IR/PatternMatch.h" 119 #include "llvm/IR/Type.h" 120 #include "llvm/IR/Use.h" 121 #include "llvm/IR/User.h" 122 #include "llvm/IR/Value.h" 123 #include "llvm/IR/ValueHandle.h" 124 #include "llvm/IR/Verifier.h" 125 #include "llvm/InitializePasses.h" 126 #include "llvm/Pass.h" 127 #include "llvm/Support/Casting.h" 128 #include "llvm/Support/CommandLine.h" 129 #include "llvm/Support/Compiler.h" 130 #include "llvm/Support/Debug.h" 131 #include "llvm/Support/ErrorHandling.h" 132 #include "llvm/Support/InstructionCost.h" 133 #include "llvm/Support/MathExtras.h" 134 #include "llvm/Support/raw_ostream.h" 135 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 136 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 137 #include "llvm/Transforms/Utils/LoopSimplify.h" 138 #include "llvm/Transforms/Utils/LoopUtils.h" 139 #include "llvm/Transforms/Utils/LoopVersioning.h" 140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 141 #include "llvm/Transforms/Utils/SizeOpts.h" 142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 143 #include <algorithm> 144 #include <cassert> 145 #include <cstdint> 146 #include <functional> 147 #include <iterator> 148 #include <limits> 149 #include <map> 150 #include <memory> 151 #include <string> 152 #include <tuple> 153 #include <utility> 154 155 using namespace llvm; 156 157 #define LV_NAME "loop-vectorize" 158 #define DEBUG_TYPE LV_NAME 159 160 #ifndef NDEBUG 161 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 162 #endif 163 164 /// @{ 165 /// Metadata attribute names 166 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 167 const char LLVMLoopVectorizeFollowupVectorized[] = 168 "llvm.loop.vectorize.followup_vectorized"; 169 const char LLVMLoopVectorizeFollowupEpilogue[] = 170 "llvm.loop.vectorize.followup_epilogue"; 171 /// @} 172 173 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 174 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 175 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 176 177 static cl::opt<bool> EnableEpilogueVectorization( 178 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 179 cl::desc("Enable vectorization of epilogue loops.")); 180 181 static cl::opt<unsigned> EpilogueVectorizationForceVF( 182 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 183 cl::desc("When epilogue vectorization is enabled, and a value greater than " 184 "1 is specified, forces the given VF for all applicable epilogue " 185 "loops.")); 186 187 static cl::opt<unsigned> EpilogueVectorizationMinVF( 188 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 189 cl::desc("Only loops with vectorization factor equal to or larger than " 190 "the specified value are considered for epilogue vectorization.")); 191 192 /// Loops with a known constant trip count below this number are vectorized only 193 /// if no scalar iteration overheads are incurred. 194 static cl::opt<unsigned> TinyTripCountVectorThreshold( 195 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 196 cl::desc("Loops with a constant trip count that is smaller than this " 197 "value are vectorized only if no scalar iteration overheads " 198 "are incurred.")); 199 200 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 201 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 202 cl::desc("The maximum allowed number of runtime memory checks with a " 203 "vectorize(enable) pragma.")); 204 205 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 206 // that predication is preferred, and this lists all options. I.e., the 207 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 208 // and predicate the instructions accordingly. If tail-folding fails, there are 209 // different fallback strategies depending on these values: 210 namespace PreferPredicateTy { 211 enum Option { 212 ScalarEpilogue = 0, 213 PredicateElseScalarEpilogue, 214 PredicateOrDontVectorize 215 }; 216 } // namespace PreferPredicateTy 217 218 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 219 "prefer-predicate-over-epilogue", 220 cl::init(PreferPredicateTy::ScalarEpilogue), 221 cl::Hidden, 222 cl::desc("Tail-folding and predication preferences over creating a scalar " 223 "epilogue loop."), 224 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 225 "scalar-epilogue", 226 "Don't tail-predicate loops, create scalar epilogue"), 227 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 228 "predicate-else-scalar-epilogue", 229 "prefer tail-folding, create scalar epilogue if tail " 230 "folding fails."), 231 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 232 "predicate-dont-vectorize", 233 "prefers tail-folding, don't attempt vectorization if " 234 "tail-folding fails."))); 235 236 static cl::opt<bool> MaximizeBandwidth( 237 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 238 cl::desc("Maximize bandwidth when selecting vectorization factor which " 239 "will be determined by the smallest type in loop.")); 240 241 static cl::opt<bool> EnableInterleavedMemAccesses( 242 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 243 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 244 245 /// An interleave-group may need masking if it resides in a block that needs 246 /// predication, or in order to mask away gaps. 247 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 248 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 249 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 250 251 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 252 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 253 cl::desc("We don't interleave loops with a estimated constant trip count " 254 "below this number")); 255 256 static cl::opt<unsigned> ForceTargetNumScalarRegs( 257 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 258 cl::desc("A flag that overrides the target's number of scalar registers.")); 259 260 static cl::opt<unsigned> ForceTargetNumVectorRegs( 261 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 262 cl::desc("A flag that overrides the target's number of vector registers.")); 263 264 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 265 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 266 cl::desc("A flag that overrides the target's max interleave factor for " 267 "scalar loops.")); 268 269 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 270 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 271 cl::desc("A flag that overrides the target's max interleave factor for " 272 "vectorized loops.")); 273 274 static cl::opt<unsigned> ForceTargetInstructionCost( 275 "force-target-instruction-cost", cl::init(0), cl::Hidden, 276 cl::desc("A flag that overrides the target's expected cost for " 277 "an instruction to a single constant value. Mostly " 278 "useful for getting consistent testing.")); 279 280 static cl::opt<bool> ForceTargetSupportsScalableVectors( 281 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 282 cl::desc( 283 "Pretend that scalable vectors are supported, even if the target does " 284 "not support them. This flag should only be used for testing.")); 285 286 static cl::opt<unsigned> SmallLoopCost( 287 "small-loop-cost", cl::init(20), cl::Hidden, 288 cl::desc( 289 "The cost of a loop that is considered 'small' by the interleaver.")); 290 291 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 292 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 293 cl::desc("Enable the use of the block frequency analysis to access PGO " 294 "heuristics minimizing code growth in cold regions and being more " 295 "aggressive in hot regions.")); 296 297 // Runtime interleave loops for load/store throughput. 298 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 299 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 300 cl::desc( 301 "Enable runtime interleaving until load/store ports are saturated")); 302 303 /// Interleave small loops with scalar reductions. 304 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 305 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 306 cl::desc("Enable interleaving for loops with small iteration counts that " 307 "contain scalar reductions to expose ILP.")); 308 309 /// The number of stores in a loop that are allowed to need predication. 310 static cl::opt<unsigned> NumberOfStoresToPredicate( 311 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 312 cl::desc("Max number of stores to be predicated behind an if.")); 313 314 static cl::opt<bool> EnableIndVarRegisterHeur( 315 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 316 cl::desc("Count the induction variable only once when interleaving")); 317 318 static cl::opt<bool> EnableCondStoresVectorization( 319 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 320 cl::desc("Enable if predication of stores during vectorization.")); 321 322 static cl::opt<unsigned> MaxNestedScalarReductionIC( 323 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 324 cl::desc("The maximum interleave count to use when interleaving a scalar " 325 "reduction in a nested loop.")); 326 327 static cl::opt<bool> 328 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 329 cl::Hidden, 330 cl::desc("Prefer in-loop vector reductions, " 331 "overriding the targets preference.")); 332 333 static cl::opt<bool> ForceOrderedReductions( 334 "force-ordered-reductions", cl::init(false), cl::Hidden, 335 cl::desc("Enable the vectorisation of loops with in-order (strict) " 336 "FP reductions")); 337 338 static cl::opt<bool> PreferPredicatedReductionSelect( 339 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 340 cl::desc( 341 "Prefer predicating a reduction operation over an after loop select.")); 342 343 cl::opt<bool> EnableVPlanNativePath( 344 "enable-vplan-native-path", cl::init(false), cl::Hidden, 345 cl::desc("Enable VPlan-native vectorization path with " 346 "support for outer loop vectorization.")); 347 348 // FIXME: Remove this switch once we have divergence analysis. Currently we 349 // assume divergent non-backedge branches when this switch is true. 350 cl::opt<bool> EnableVPlanPredication( 351 "enable-vplan-predication", cl::init(false), cl::Hidden, 352 cl::desc("Enable VPlan-native vectorization path predicator with " 353 "support for outer loop vectorization.")); 354 355 // This flag enables the stress testing of the VPlan H-CFG construction in the 356 // VPlan-native vectorization path. It must be used in conjuction with 357 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 358 // verification of the H-CFGs built. 359 static cl::opt<bool> VPlanBuildStressTest( 360 "vplan-build-stress-test", cl::init(false), cl::Hidden, 361 cl::desc( 362 "Build VPlan for every supported loop nest in the function and bail " 363 "out right after the build (stress test the VPlan H-CFG construction " 364 "in the VPlan-native vectorization path).")); 365 366 cl::opt<bool> llvm::EnableLoopInterleaving( 367 "interleave-loops", cl::init(true), cl::Hidden, 368 cl::desc("Enable loop interleaving in Loop vectorization passes")); 369 cl::opt<bool> llvm::EnableLoopVectorization( 370 "vectorize-loops", cl::init(true), cl::Hidden, 371 cl::desc("Run the Loop vectorization passes")); 372 373 cl::opt<bool> PrintVPlansInDotFormat( 374 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 375 cl::desc("Use dot format instead of plain text when dumping VPlans")); 376 377 /// A helper function that returns true if the given type is irregular. The 378 /// type is irregular if its allocated size doesn't equal the store size of an 379 /// element of the corresponding vector type. 380 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 381 // Determine if an array of N elements of type Ty is "bitcast compatible" 382 // with a <N x Ty> vector. 383 // This is only true if there is no padding between the array elements. 384 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 385 } 386 387 /// A helper function that returns the reciprocal of the block probability of 388 /// predicated blocks. If we return X, we are assuming the predicated block 389 /// will execute once for every X iterations of the loop header. 390 /// 391 /// TODO: We should use actual block probability here, if available. Currently, 392 /// we always assume predicated blocks have a 50% chance of executing. 393 static unsigned getReciprocalPredBlockProb() { return 2; } 394 395 /// A helper function that returns an integer or floating-point constant with 396 /// value C. 397 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 398 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 399 : ConstantFP::get(Ty, C); 400 } 401 402 /// Returns "best known" trip count for the specified loop \p L as defined by 403 /// the following procedure: 404 /// 1) Returns exact trip count if it is known. 405 /// 2) Returns expected trip count according to profile data if any. 406 /// 3) Returns upper bound estimate if it is known. 407 /// 4) Returns None if all of the above failed. 408 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 409 // Check if exact trip count is known. 410 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 411 return ExpectedTC; 412 413 // Check if there is an expected trip count available from profile data. 414 if (LoopVectorizeWithBlockFrequency) 415 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 416 return EstimatedTC; 417 418 // Check if upper bound estimate is known. 419 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 420 return ExpectedTC; 421 422 return None; 423 } 424 425 // Forward declare GeneratedRTChecks. 426 class GeneratedRTChecks; 427 428 namespace llvm { 429 430 AnalysisKey ShouldRunExtraVectorPasses::Key; 431 432 /// InnerLoopVectorizer vectorizes loops which contain only one basic 433 /// block to a specified vectorization factor (VF). 434 /// This class performs the widening of scalars into vectors, or multiple 435 /// scalars. This class also implements the following features: 436 /// * It inserts an epilogue loop for handling loops that don't have iteration 437 /// counts that are known to be a multiple of the vectorization factor. 438 /// * It handles the code generation for reduction variables. 439 /// * Scalarization (implementation using scalars) of un-vectorizable 440 /// instructions. 441 /// InnerLoopVectorizer does not perform any vectorization-legality 442 /// checks, and relies on the caller to check for the different legality 443 /// aspects. The InnerLoopVectorizer relies on the 444 /// LoopVectorizationLegality class to provide information about the induction 445 /// and reduction variables that were found to a given vectorization factor. 446 class InnerLoopVectorizer { 447 public: 448 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 449 LoopInfo *LI, DominatorTree *DT, 450 const TargetLibraryInfo *TLI, 451 const TargetTransformInfo *TTI, AssumptionCache *AC, 452 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 453 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 454 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 455 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 456 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 457 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 458 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 459 PSI(PSI), RTChecks(RTChecks) { 460 // Query this against the original loop and save it here because the profile 461 // of the original loop header may change as the transformation happens. 462 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 463 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 464 } 465 466 virtual ~InnerLoopVectorizer() = default; 467 468 /// Create a new empty loop that will contain vectorized instructions later 469 /// on, while the old loop will be used as the scalar remainder. Control flow 470 /// is generated around the vectorized (and scalar epilogue) loops consisting 471 /// of various checks and bypasses. Return the pre-header block of the new 472 /// loop and the start value for the canonical induction, if it is != 0. The 473 /// latter is the case when vectorizing the epilogue loop. In the case of 474 /// epilogue vectorization, this function is overriden to handle the more 475 /// complex control flow around the loops. 476 virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton(); 477 478 /// Widen a single call instruction within the innermost loop. 479 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 480 VPTransformState &State); 481 482 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 483 void fixVectorizedLoop(VPTransformState &State); 484 485 // Return true if any runtime check is added. 486 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 487 488 /// A type for vectorized values in the new loop. Each value from the 489 /// original loop, when vectorized, is represented by UF vector values in the 490 /// new unrolled loop, where UF is the unroll factor. 491 using VectorParts = SmallVector<Value *, 2>; 492 493 /// Vectorize a single vector PHINode in a block in the VPlan-native path 494 /// only. 495 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 496 VPTransformState &State); 497 498 /// A helper function to scalarize a single Instruction in the innermost loop. 499 /// Generates a sequence of scalar instances for each lane between \p MinLane 500 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 501 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p 502 /// Instr's operands. 503 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe, 504 const VPIteration &Instance, bool IfPredicateInstr, 505 VPTransformState &State); 506 507 /// Construct the vector value of a scalarized value \p V one lane at a time. 508 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 509 VPTransformState &State); 510 511 /// Try to vectorize interleaved access group \p Group with the base address 512 /// given in \p Addr, optionally masking the vector operations if \p 513 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 514 /// values in the vectorized loop. 515 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 516 ArrayRef<VPValue *> VPDefs, 517 VPTransformState &State, VPValue *Addr, 518 ArrayRef<VPValue *> StoredValues, 519 VPValue *BlockInMask = nullptr); 520 521 /// Set the debug location in the builder \p Ptr using the debug location in 522 /// \p V. If \p Ptr is None then it uses the class member's Builder. 523 void setDebugLocFromInst(const Value *V, 524 Optional<IRBuilderBase *> CustomBuilder = None); 525 526 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 527 void fixNonInductionPHIs(VPTransformState &State); 528 529 /// Returns true if the reordering of FP operations is not allowed, but we are 530 /// able to vectorize with strict in-order reductions for the given RdxDesc. 531 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc); 532 533 /// Create a broadcast instruction. This method generates a broadcast 534 /// instruction (shuffle) for loop invariant values and for the induction 535 /// value. If this is the induction variable then we extend it to N, N+1, ... 536 /// this is needed because each iteration in the loop corresponds to a SIMD 537 /// element. 538 virtual Value *getBroadcastInstrs(Value *V); 539 540 /// Add metadata from one instruction to another. 541 /// 542 /// This includes both the original MDs from \p From and additional ones (\see 543 /// addNewMetadata). Use this for *newly created* instructions in the vector 544 /// loop. 545 void addMetadata(Instruction *To, Instruction *From); 546 547 /// Similar to the previous function but it adds the metadata to a 548 /// vector of instructions. 549 void addMetadata(ArrayRef<Value *> To, Instruction *From); 550 551 // Returns the resume value (bc.merge.rdx) for a reduction as 552 // generated by fixReduction. 553 PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc); 554 555 protected: 556 friend class LoopVectorizationPlanner; 557 558 /// A small list of PHINodes. 559 using PhiVector = SmallVector<PHINode *, 4>; 560 561 /// A type for scalarized values in the new loop. Each value from the 562 /// original loop, when scalarized, is represented by UF x VF scalar values 563 /// in the new unrolled loop, where UF is the unroll factor and VF is the 564 /// vectorization factor. 565 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 566 567 /// Set up the values of the IVs correctly when exiting the vector loop. 568 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 569 Value *CountRoundDown, Value *EndValue, 570 BasicBlock *MiddleBlock, BasicBlock *VectorHeader); 571 572 /// Introduce a conditional branch (on true, condition to be set later) at the 573 /// end of the header=latch connecting it to itself (across the backedge) and 574 /// to the exit block of \p L. 575 void createHeaderBranch(Loop *L); 576 577 /// Handle all cross-iteration phis in the header. 578 void fixCrossIterationPHIs(VPTransformState &State); 579 580 /// Create the exit value of first order recurrences in the middle block and 581 /// update their users. 582 void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR, 583 VPTransformState &State); 584 585 /// Create code for the loop exit value of the reduction. 586 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 587 588 /// Clear NSW/NUW flags from reduction instructions if necessary. 589 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 590 VPTransformState &State); 591 592 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 593 /// means we need to add the appropriate incoming value from the middle 594 /// block as exiting edges from the scalar epilogue loop (if present) are 595 /// already in place, and we exit the vector loop exclusively to the middle 596 /// block. 597 void fixLCSSAPHIs(VPTransformState &State); 598 599 /// Iteratively sink the scalarized operands of a predicated instruction into 600 /// the block that was created for it. 601 void sinkScalarOperands(Instruction *PredInst); 602 603 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 604 /// represented as. 605 void truncateToMinimalBitwidths(VPTransformState &State); 606 607 /// Returns (and creates if needed) the original loop trip count. 608 Value *getOrCreateTripCount(BasicBlock *InsertBlock); 609 610 /// Returns (and creates if needed) the trip count of the widened loop. 611 Value *getOrCreateVectorTripCount(BasicBlock *InsertBlock); 612 613 /// Returns a bitcasted value to the requested vector type. 614 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 615 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 616 const DataLayout &DL); 617 618 /// Emit a bypass check to see if the vector trip count is zero, including if 619 /// it overflows. 620 void emitMinimumIterationCountCheck(BasicBlock *Bypass); 621 622 /// Emit a bypass check to see if all of the SCEV assumptions we've 623 /// had to make are correct. Returns the block containing the checks or 624 /// nullptr if no checks have been added. 625 BasicBlock *emitSCEVChecks(BasicBlock *Bypass); 626 627 /// Emit bypass checks to check any memory assumptions we may have made. 628 /// Returns the block containing the checks or nullptr if no checks have been 629 /// added. 630 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass); 631 632 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 633 /// vector loop preheader, middle block and scalar preheader. Also 634 /// allocate a loop object for the new vector loop and return it. 635 Loop *createVectorLoopSkeleton(StringRef Prefix); 636 637 /// Create new phi nodes for the induction variables to resume iteration count 638 /// in the scalar epilogue, from where the vectorized loop left off. 639 /// In cases where the loop skeleton is more complicated (eg. epilogue 640 /// vectorization) and the resume values can come from an additional bypass 641 /// block, the \p AdditionalBypass pair provides information about the bypass 642 /// block and the end value on the edge from bypass to this loop. 643 void createInductionResumeValues( 644 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 645 646 /// Complete the loop skeleton by adding debug MDs, creating appropriate 647 /// conditional branches in the middle block, preparing the builder and 648 /// running the verifier. Return the preheader of the completed vector loop. 649 BasicBlock *completeLoopSkeleton(MDNode *OrigLoopID); 650 651 /// Add additional metadata to \p To that was not present on \p Orig. 652 /// 653 /// Currently this is used to add the noalias annotations based on the 654 /// inserted memchecks. Use this for instructions that are *cloned* into the 655 /// vector loop. 656 void addNewMetadata(Instruction *To, const Instruction *Orig); 657 658 /// Collect poison-generating recipes that may generate a poison value that is 659 /// used after vectorization, even when their operands are not poison. Those 660 /// recipes meet the following conditions: 661 /// * Contribute to the address computation of a recipe generating a widen 662 /// memory load/store (VPWidenMemoryInstructionRecipe or 663 /// VPInterleaveRecipe). 664 /// * Such a widen memory load/store has at least one underlying Instruction 665 /// that is in a basic block that needs predication and after vectorization 666 /// the generated instruction won't be predicated. 667 void collectPoisonGeneratingRecipes(VPTransformState &State); 668 669 /// Allow subclasses to override and print debug traces before/after vplan 670 /// execution, when trace information is requested. 671 virtual void printDebugTracesAtStart(){}; 672 virtual void printDebugTracesAtEnd(){}; 673 674 /// The original loop. 675 Loop *OrigLoop; 676 677 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 678 /// dynamic knowledge to simplify SCEV expressions and converts them to a 679 /// more usable form. 680 PredicatedScalarEvolution &PSE; 681 682 /// Loop Info. 683 LoopInfo *LI; 684 685 /// Dominator Tree. 686 DominatorTree *DT; 687 688 /// Alias Analysis. 689 AAResults *AA; 690 691 /// Target Library Info. 692 const TargetLibraryInfo *TLI; 693 694 /// Target Transform Info. 695 const TargetTransformInfo *TTI; 696 697 /// Assumption Cache. 698 AssumptionCache *AC; 699 700 /// Interface to emit optimization remarks. 701 OptimizationRemarkEmitter *ORE; 702 703 /// LoopVersioning. It's only set up (non-null) if memchecks were 704 /// used. 705 /// 706 /// This is currently only used to add no-alias metadata based on the 707 /// memchecks. The actually versioning is performed manually. 708 std::unique_ptr<LoopVersioning> LVer; 709 710 /// The vectorization SIMD factor to use. Each vector will have this many 711 /// vector elements. 712 ElementCount VF; 713 714 /// The vectorization unroll factor to use. Each scalar is vectorized to this 715 /// many different vector instructions. 716 unsigned UF; 717 718 /// The builder that we use 719 IRBuilder<> Builder; 720 721 // --- Vectorization state --- 722 723 /// The vector-loop preheader. 724 BasicBlock *LoopVectorPreHeader; 725 726 /// The scalar-loop preheader. 727 BasicBlock *LoopScalarPreHeader; 728 729 /// Middle Block between the vector and the scalar. 730 BasicBlock *LoopMiddleBlock; 731 732 /// The unique ExitBlock of the scalar loop if one exists. Note that 733 /// there can be multiple exiting edges reaching this block. 734 BasicBlock *LoopExitBlock; 735 736 /// The scalar loop body. 737 BasicBlock *LoopScalarBody; 738 739 /// A list of all bypass blocks. The first block is the entry of the loop. 740 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 741 742 /// Store instructions that were predicated. 743 SmallVector<Instruction *, 4> PredicatedInstructions; 744 745 /// Trip count of the original loop. 746 Value *TripCount = nullptr; 747 748 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 749 Value *VectorTripCount = nullptr; 750 751 /// The legality analysis. 752 LoopVectorizationLegality *Legal; 753 754 /// The profitablity analysis. 755 LoopVectorizationCostModel *Cost; 756 757 // Record whether runtime checks are added. 758 bool AddedSafetyChecks = false; 759 760 // Holds the end values for each induction variable. We save the end values 761 // so we can later fix-up the external users of the induction variables. 762 DenseMap<PHINode *, Value *> IVEndValues; 763 764 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 765 // fixed up at the end of vector code generation. 766 SmallVector<PHINode *, 8> OrigPHIsToFix; 767 768 /// BFI and PSI are used to check for profile guided size optimizations. 769 BlockFrequencyInfo *BFI; 770 ProfileSummaryInfo *PSI; 771 772 // Whether this loop should be optimized for size based on profile guided size 773 // optimizatios. 774 bool OptForSizeBasedOnProfile; 775 776 /// Structure to hold information about generated runtime checks, responsible 777 /// for cleaning the checks, if vectorization turns out unprofitable. 778 GeneratedRTChecks &RTChecks; 779 780 // Holds the resume values for reductions in the loops, used to set the 781 // correct start value of reduction PHIs when vectorizing the epilogue. 782 SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4> 783 ReductionResumeValues; 784 }; 785 786 class InnerLoopUnroller : public InnerLoopVectorizer { 787 public: 788 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 789 LoopInfo *LI, DominatorTree *DT, 790 const TargetLibraryInfo *TLI, 791 const TargetTransformInfo *TTI, AssumptionCache *AC, 792 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 793 LoopVectorizationLegality *LVL, 794 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 795 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 796 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 797 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 798 BFI, PSI, Check) {} 799 800 private: 801 Value *getBroadcastInstrs(Value *V) override; 802 }; 803 804 /// Encapsulate information regarding vectorization of a loop and its epilogue. 805 /// This information is meant to be updated and used across two stages of 806 /// epilogue vectorization. 807 struct EpilogueLoopVectorizationInfo { 808 ElementCount MainLoopVF = ElementCount::getFixed(0); 809 unsigned MainLoopUF = 0; 810 ElementCount EpilogueVF = ElementCount::getFixed(0); 811 unsigned EpilogueUF = 0; 812 BasicBlock *MainLoopIterationCountCheck = nullptr; 813 BasicBlock *EpilogueIterationCountCheck = nullptr; 814 BasicBlock *SCEVSafetyCheck = nullptr; 815 BasicBlock *MemSafetyCheck = nullptr; 816 Value *TripCount = nullptr; 817 Value *VectorTripCount = nullptr; 818 819 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 820 ElementCount EVF, unsigned EUF) 821 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 822 assert(EUF == 1 && 823 "A high UF for the epilogue loop is likely not beneficial."); 824 } 825 }; 826 827 /// An extension of the inner loop vectorizer that creates a skeleton for a 828 /// vectorized loop that has its epilogue (residual) also vectorized. 829 /// The idea is to run the vplan on a given loop twice, firstly to setup the 830 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 831 /// from the first step and vectorize the epilogue. This is achieved by 832 /// deriving two concrete strategy classes from this base class and invoking 833 /// them in succession from the loop vectorizer planner. 834 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 835 public: 836 InnerLoopAndEpilogueVectorizer( 837 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 838 DominatorTree *DT, const TargetLibraryInfo *TLI, 839 const TargetTransformInfo *TTI, AssumptionCache *AC, 840 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 841 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 842 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 843 GeneratedRTChecks &Checks) 844 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 845 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 846 Checks), 847 EPI(EPI) {} 848 849 // Override this function to handle the more complex control flow around the 850 // three loops. 851 std::pair<BasicBlock *, Value *> 852 createVectorizedLoopSkeleton() final override { 853 return createEpilogueVectorizedLoopSkeleton(); 854 } 855 856 /// The interface for creating a vectorized skeleton using one of two 857 /// different strategies, each corresponding to one execution of the vplan 858 /// as described above. 859 virtual std::pair<BasicBlock *, Value *> 860 createEpilogueVectorizedLoopSkeleton() = 0; 861 862 /// Holds and updates state information required to vectorize the main loop 863 /// and its epilogue in two separate passes. This setup helps us avoid 864 /// regenerating and recomputing runtime safety checks. It also helps us to 865 /// shorten the iteration-count-check path length for the cases where the 866 /// iteration count of the loop is so small that the main vector loop is 867 /// completely skipped. 868 EpilogueLoopVectorizationInfo &EPI; 869 }; 870 871 /// A specialized derived class of inner loop vectorizer that performs 872 /// vectorization of *main* loops in the process of vectorizing loops and their 873 /// epilogues. 874 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 875 public: 876 EpilogueVectorizerMainLoop( 877 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 878 DominatorTree *DT, const TargetLibraryInfo *TLI, 879 const TargetTransformInfo *TTI, AssumptionCache *AC, 880 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 881 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 882 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 883 GeneratedRTChecks &Check) 884 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 885 EPI, LVL, CM, BFI, PSI, Check) {} 886 /// Implements the interface for creating a vectorized skeleton using the 887 /// *main loop* strategy (ie the first pass of vplan execution). 888 std::pair<BasicBlock *, Value *> 889 createEpilogueVectorizedLoopSkeleton() final override; 890 891 protected: 892 /// Emits an iteration count bypass check once for the main loop (when \p 893 /// ForEpilogue is false) and once for the epilogue loop (when \p 894 /// ForEpilogue is true). 895 BasicBlock *emitMinimumIterationCountCheck(BasicBlock *Bypass, 896 bool ForEpilogue); 897 void printDebugTracesAtStart() override; 898 void printDebugTracesAtEnd() override; 899 }; 900 901 // A specialized derived class of inner loop vectorizer that performs 902 // vectorization of *epilogue* loops in the process of vectorizing loops and 903 // their epilogues. 904 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 905 public: 906 EpilogueVectorizerEpilogueLoop( 907 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 908 DominatorTree *DT, const TargetLibraryInfo *TLI, 909 const TargetTransformInfo *TTI, AssumptionCache *AC, 910 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 911 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 912 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 913 GeneratedRTChecks &Checks) 914 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 915 EPI, LVL, CM, BFI, PSI, Checks) {} 916 /// Implements the interface for creating a vectorized skeleton using the 917 /// *epilogue loop* strategy (ie the second pass of vplan execution). 918 std::pair<BasicBlock *, Value *> 919 createEpilogueVectorizedLoopSkeleton() final override; 920 921 protected: 922 /// Emits an iteration count bypass check after the main vector loop has 923 /// finished to see if there are any iterations left to execute by either 924 /// the vector epilogue or the scalar epilogue. 925 BasicBlock *emitMinimumVectorEpilogueIterCountCheck( 926 BasicBlock *Bypass, 927 BasicBlock *Insert); 928 void printDebugTracesAtStart() override; 929 void printDebugTracesAtEnd() override; 930 }; 931 } // end namespace llvm 932 933 /// Look for a meaningful debug location on the instruction or it's 934 /// operands. 935 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 936 if (!I) 937 return I; 938 939 DebugLoc Empty; 940 if (I->getDebugLoc() != Empty) 941 return I; 942 943 for (Use &Op : I->operands()) { 944 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 945 if (OpInst->getDebugLoc() != Empty) 946 return OpInst; 947 } 948 949 return I; 950 } 951 952 void InnerLoopVectorizer::setDebugLocFromInst( 953 const Value *V, Optional<IRBuilderBase *> CustomBuilder) { 954 IRBuilderBase *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 955 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 956 const DILocation *DIL = Inst->getDebugLoc(); 957 958 // When a FSDiscriminator is enabled, we don't need to add the multiply 959 // factors to the discriminators. 960 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 961 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 962 // FIXME: For scalable vectors, assume vscale=1. 963 auto NewDIL = 964 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 965 if (NewDIL) 966 B->SetCurrentDebugLocation(NewDIL.getValue()); 967 else 968 LLVM_DEBUG(dbgs() 969 << "Failed to create new discriminator: " 970 << DIL->getFilename() << " Line: " << DIL->getLine()); 971 } else 972 B->SetCurrentDebugLocation(DIL); 973 } else 974 B->SetCurrentDebugLocation(DebugLoc()); 975 } 976 977 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 978 /// is passed, the message relates to that particular instruction. 979 #ifndef NDEBUG 980 static void debugVectorizationMessage(const StringRef Prefix, 981 const StringRef DebugMsg, 982 Instruction *I) { 983 dbgs() << "LV: " << Prefix << DebugMsg; 984 if (I != nullptr) 985 dbgs() << " " << *I; 986 else 987 dbgs() << '.'; 988 dbgs() << '\n'; 989 } 990 #endif 991 992 /// Create an analysis remark that explains why vectorization failed 993 /// 994 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 995 /// RemarkName is the identifier for the remark. If \p I is passed it is an 996 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 997 /// the location of the remark. \return the remark object that can be 998 /// streamed to. 999 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1000 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1001 Value *CodeRegion = TheLoop->getHeader(); 1002 DebugLoc DL = TheLoop->getStartLoc(); 1003 1004 if (I) { 1005 CodeRegion = I->getParent(); 1006 // If there is no debug location attached to the instruction, revert back to 1007 // using the loop's. 1008 if (I->getDebugLoc()) 1009 DL = I->getDebugLoc(); 1010 } 1011 1012 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1013 } 1014 1015 namespace llvm { 1016 1017 /// Return a value for Step multiplied by VF. 1018 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, 1019 int64_t Step) { 1020 assert(Ty->isIntegerTy() && "Expected an integer step"); 1021 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 1022 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1023 } 1024 1025 /// Return the runtime value for VF. 1026 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) { 1027 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1028 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1029 } 1030 1031 static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy, 1032 ElementCount VF) { 1033 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 1034 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 1035 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 1036 return B.CreateUIToFP(RuntimeVF, FTy); 1037 } 1038 1039 void reportVectorizationFailure(const StringRef DebugMsg, 1040 const StringRef OREMsg, const StringRef ORETag, 1041 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1042 Instruction *I) { 1043 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1044 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1045 ORE->emit( 1046 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1047 << "loop not vectorized: " << OREMsg); 1048 } 1049 1050 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1051 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1052 Instruction *I) { 1053 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1054 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1055 ORE->emit( 1056 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1057 << Msg); 1058 } 1059 1060 } // end namespace llvm 1061 1062 #ifndef NDEBUG 1063 /// \return string containing a file name and a line # for the given loop. 1064 static std::string getDebugLocString(const Loop *L) { 1065 std::string Result; 1066 if (L) { 1067 raw_string_ostream OS(Result); 1068 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1069 LoopDbgLoc.print(OS); 1070 else 1071 // Just print the module name. 1072 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1073 OS.flush(); 1074 } 1075 return Result; 1076 } 1077 #endif 1078 1079 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1080 const Instruction *Orig) { 1081 // If the loop was versioned with memchecks, add the corresponding no-alias 1082 // metadata. 1083 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1084 LVer->annotateInstWithNoAlias(To, Orig); 1085 } 1086 1087 void InnerLoopVectorizer::collectPoisonGeneratingRecipes( 1088 VPTransformState &State) { 1089 1090 // Collect recipes in the backward slice of `Root` that may generate a poison 1091 // value that is used after vectorization. 1092 SmallPtrSet<VPRecipeBase *, 16> Visited; 1093 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1094 SmallVector<VPRecipeBase *, 16> Worklist; 1095 Worklist.push_back(Root); 1096 1097 // Traverse the backward slice of Root through its use-def chain. 1098 while (!Worklist.empty()) { 1099 VPRecipeBase *CurRec = Worklist.back(); 1100 Worklist.pop_back(); 1101 1102 if (!Visited.insert(CurRec).second) 1103 continue; 1104 1105 // Prune search if we find another recipe generating a widen memory 1106 // instruction. Widen memory instructions involved in address computation 1107 // will lead to gather/scatter instructions, which don't need to be 1108 // handled. 1109 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) || 1110 isa<VPInterleaveRecipe>(CurRec) || 1111 isa<VPScalarIVStepsRecipe>(CurRec) || 1112 isa<VPCanonicalIVPHIRecipe>(CurRec)) 1113 continue; 1114 1115 // This recipe contributes to the address computation of a widen 1116 // load/store. Collect recipe if its underlying instruction has 1117 // poison-generating flags. 1118 Instruction *Instr = CurRec->getUnderlyingInstr(); 1119 if (Instr && Instr->hasPoisonGeneratingFlags()) 1120 State.MayGeneratePoisonRecipes.insert(CurRec); 1121 1122 // Add new definitions to the worklist. 1123 for (VPValue *operand : CurRec->operands()) 1124 if (VPDef *OpDef = operand->getDef()) 1125 Worklist.push_back(cast<VPRecipeBase>(OpDef)); 1126 } 1127 }); 1128 1129 // Traverse all the recipes in the VPlan and collect the poison-generating 1130 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1131 // VPInterleaveRecipe. 1132 auto Iter = depth_first( 1133 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry())); 1134 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1135 for (VPRecipeBase &Recipe : *VPBB) { 1136 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) { 1137 Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr(); 1138 VPDef *AddrDef = WidenRec->getAddr()->getDef(); 1139 if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr && 1140 Legal->blockNeedsPredication(UnderlyingInstr->getParent())) 1141 collectPoisonGeneratingInstrsInBackwardSlice( 1142 cast<VPRecipeBase>(AddrDef)); 1143 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1144 VPDef *AddrDef = InterleaveRec->getAddr()->getDef(); 1145 if (AddrDef) { 1146 // Check if any member of the interleave group needs predication. 1147 const InterleaveGroup<Instruction> *InterGroup = 1148 InterleaveRec->getInterleaveGroup(); 1149 bool NeedPredication = false; 1150 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1151 I < NumMembers; ++I) { 1152 Instruction *Member = InterGroup->getMember(I); 1153 if (Member) 1154 NeedPredication |= 1155 Legal->blockNeedsPredication(Member->getParent()); 1156 } 1157 1158 if (NeedPredication) 1159 collectPoisonGeneratingInstrsInBackwardSlice( 1160 cast<VPRecipeBase>(AddrDef)); 1161 } 1162 } 1163 } 1164 } 1165 } 1166 1167 void InnerLoopVectorizer::addMetadata(Instruction *To, 1168 Instruction *From) { 1169 propagateMetadata(To, From); 1170 addNewMetadata(To, From); 1171 } 1172 1173 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1174 Instruction *From) { 1175 for (Value *V : To) { 1176 if (Instruction *I = dyn_cast<Instruction>(V)) 1177 addMetadata(I, From); 1178 } 1179 } 1180 1181 PHINode *InnerLoopVectorizer::getReductionResumeValue( 1182 const RecurrenceDescriptor &RdxDesc) { 1183 auto It = ReductionResumeValues.find(&RdxDesc); 1184 assert(It != ReductionResumeValues.end() && 1185 "Expected to find a resume value for the reduction."); 1186 return It->second; 1187 } 1188 1189 namespace llvm { 1190 1191 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1192 // lowered. 1193 enum ScalarEpilogueLowering { 1194 1195 // The default: allowing scalar epilogues. 1196 CM_ScalarEpilogueAllowed, 1197 1198 // Vectorization with OptForSize: don't allow epilogues. 1199 CM_ScalarEpilogueNotAllowedOptSize, 1200 1201 // A special case of vectorisation with OptForSize: loops with a very small 1202 // trip count are considered for vectorization under OptForSize, thereby 1203 // making sure the cost of their loop body is dominant, free of runtime 1204 // guards and scalar iteration overheads. 1205 CM_ScalarEpilogueNotAllowedLowTripLoop, 1206 1207 // Loop hint predicate indicating an epilogue is undesired. 1208 CM_ScalarEpilogueNotNeededUsePredicate, 1209 1210 // Directive indicating we must either tail fold or not vectorize 1211 CM_ScalarEpilogueNotAllowedUsePredicate 1212 }; 1213 1214 /// ElementCountComparator creates a total ordering for ElementCount 1215 /// for the purposes of using it in a set structure. 1216 struct ElementCountComparator { 1217 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1218 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1219 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1220 } 1221 }; 1222 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1223 1224 /// LoopVectorizationCostModel - estimates the expected speedups due to 1225 /// vectorization. 1226 /// In many cases vectorization is not profitable. This can happen because of 1227 /// a number of reasons. In this class we mainly attempt to predict the 1228 /// expected speedup/slowdowns due to the supported instruction set. We use the 1229 /// TargetTransformInfo to query the different backends for the cost of 1230 /// different operations. 1231 class LoopVectorizationCostModel { 1232 public: 1233 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1234 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1235 LoopVectorizationLegality *Legal, 1236 const TargetTransformInfo &TTI, 1237 const TargetLibraryInfo *TLI, DemandedBits *DB, 1238 AssumptionCache *AC, 1239 OptimizationRemarkEmitter *ORE, const Function *F, 1240 const LoopVectorizeHints *Hints, 1241 InterleavedAccessInfo &IAI) 1242 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1243 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1244 Hints(Hints), InterleaveInfo(IAI) {} 1245 1246 /// \return An upper bound for the vectorization factors (both fixed and 1247 /// scalable). If the factors are 0, vectorization and interleaving should be 1248 /// avoided up front. 1249 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1250 1251 /// \return True if runtime checks are required for vectorization, and false 1252 /// otherwise. 1253 bool runtimeChecksRequired(); 1254 1255 /// \return The most profitable vectorization factor and the cost of that VF. 1256 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1257 /// then this vectorization factor will be selected if vectorization is 1258 /// possible. 1259 VectorizationFactor 1260 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1261 1262 VectorizationFactor 1263 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1264 const LoopVectorizationPlanner &LVP); 1265 1266 /// Setup cost-based decisions for user vectorization factor. 1267 /// \return true if the UserVF is a feasible VF to be chosen. 1268 bool selectUserVectorizationFactor(ElementCount UserVF) { 1269 collectUniformsAndScalars(UserVF); 1270 collectInstsToScalarize(UserVF); 1271 return expectedCost(UserVF).first.isValid(); 1272 } 1273 1274 /// \return The size (in bits) of the smallest and widest types in the code 1275 /// that needs to be vectorized. We ignore values that remain scalar such as 1276 /// 64 bit loop indices. 1277 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1278 1279 /// \return The desired interleave count. 1280 /// If interleave count has been specified by metadata it will be returned. 1281 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1282 /// are the selected vectorization factor and the cost of the selected VF. 1283 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1284 1285 /// Memory access instruction may be vectorized in more than one way. 1286 /// Form of instruction after vectorization depends on cost. 1287 /// This function takes cost-based decisions for Load/Store instructions 1288 /// and collects them in a map. This decisions map is used for building 1289 /// the lists of loop-uniform and loop-scalar instructions. 1290 /// The calculated cost is saved with widening decision in order to 1291 /// avoid redundant calculations. 1292 void setCostBasedWideningDecision(ElementCount VF); 1293 1294 /// A struct that represents some properties of the register usage 1295 /// of a loop. 1296 struct RegisterUsage { 1297 /// Holds the number of loop invariant values that are used in the loop. 1298 /// The key is ClassID of target-provided register class. 1299 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1300 /// Holds the maximum number of concurrent live intervals in the loop. 1301 /// The key is ClassID of target-provided register class. 1302 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1303 }; 1304 1305 /// \return Returns information about the register usages of the loop for the 1306 /// given vectorization factors. 1307 SmallVector<RegisterUsage, 8> 1308 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1309 1310 /// Collect values we want to ignore in the cost model. 1311 void collectValuesToIgnore(); 1312 1313 /// Collect all element types in the loop for which widening is needed. 1314 void collectElementTypesForWidening(); 1315 1316 /// Split reductions into those that happen in the loop, and those that happen 1317 /// outside. In loop reductions are collected into InLoopReductionChains. 1318 void collectInLoopReductions(); 1319 1320 /// Returns true if we should use strict in-order reductions for the given 1321 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1322 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1323 /// of FP operations. 1324 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1325 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1326 } 1327 1328 /// \returns The smallest bitwidth each instruction can be represented with. 1329 /// The vector equivalents of these instructions should be truncated to this 1330 /// type. 1331 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1332 return MinBWs; 1333 } 1334 1335 /// \returns True if it is more profitable to scalarize instruction \p I for 1336 /// vectorization factor \p VF. 1337 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1338 assert(VF.isVector() && 1339 "Profitable to scalarize relevant only for VF > 1."); 1340 1341 // Cost model is not run in the VPlan-native path - return conservative 1342 // result until this changes. 1343 if (EnableVPlanNativePath) 1344 return false; 1345 1346 auto Scalars = InstsToScalarize.find(VF); 1347 assert(Scalars != InstsToScalarize.end() && 1348 "VF not yet analyzed for scalarization profitability"); 1349 return Scalars->second.find(I) != Scalars->second.end(); 1350 } 1351 1352 /// Returns true if \p I is known to be uniform after vectorization. 1353 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1354 if (VF.isScalar()) 1355 return true; 1356 1357 // Cost model is not run in the VPlan-native path - return conservative 1358 // result until this changes. 1359 if (EnableVPlanNativePath) 1360 return false; 1361 1362 auto UniformsPerVF = Uniforms.find(VF); 1363 assert(UniformsPerVF != Uniforms.end() && 1364 "VF not yet analyzed for uniformity"); 1365 return UniformsPerVF->second.count(I); 1366 } 1367 1368 /// Returns true if \p I is known to be scalar after vectorization. 1369 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1370 if (VF.isScalar()) 1371 return true; 1372 1373 // Cost model is not run in the VPlan-native path - return conservative 1374 // result until this changes. 1375 if (EnableVPlanNativePath) 1376 return false; 1377 1378 auto ScalarsPerVF = Scalars.find(VF); 1379 assert(ScalarsPerVF != Scalars.end() && 1380 "Scalar values are not calculated for VF"); 1381 return ScalarsPerVF->second.count(I); 1382 } 1383 1384 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1385 /// for vectorization factor \p VF. 1386 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1387 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1388 !isProfitableToScalarize(I, VF) && 1389 !isScalarAfterVectorization(I, VF); 1390 } 1391 1392 /// Decision that was taken during cost calculation for memory instruction. 1393 enum InstWidening { 1394 CM_Unknown, 1395 CM_Widen, // For consecutive accesses with stride +1. 1396 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1397 CM_Interleave, 1398 CM_GatherScatter, 1399 CM_Scalarize 1400 }; 1401 1402 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1403 /// instruction \p I and vector width \p VF. 1404 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1405 InstructionCost Cost) { 1406 assert(VF.isVector() && "Expected VF >=2"); 1407 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1408 } 1409 1410 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1411 /// interleaving group \p Grp and vector width \p VF. 1412 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1413 ElementCount VF, InstWidening W, 1414 InstructionCost Cost) { 1415 assert(VF.isVector() && "Expected VF >=2"); 1416 /// Broadcast this decicion to all instructions inside the group. 1417 /// But the cost will be assigned to one instruction only. 1418 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1419 if (auto *I = Grp->getMember(i)) { 1420 if (Grp->getInsertPos() == I) 1421 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1422 else 1423 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1424 } 1425 } 1426 } 1427 1428 /// Return the cost model decision for the given instruction \p I and vector 1429 /// width \p VF. Return CM_Unknown if this instruction did not pass 1430 /// through the cost modeling. 1431 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1432 assert(VF.isVector() && "Expected VF to be a vector VF"); 1433 // Cost model is not run in the VPlan-native path - return conservative 1434 // result until this changes. 1435 if (EnableVPlanNativePath) 1436 return CM_GatherScatter; 1437 1438 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1439 auto Itr = WideningDecisions.find(InstOnVF); 1440 if (Itr == WideningDecisions.end()) 1441 return CM_Unknown; 1442 return Itr->second.first; 1443 } 1444 1445 /// Return the vectorization cost for the given instruction \p I and vector 1446 /// width \p VF. 1447 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1448 assert(VF.isVector() && "Expected VF >=2"); 1449 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1450 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1451 "The cost is not calculated"); 1452 return WideningDecisions[InstOnVF].second; 1453 } 1454 1455 /// Return True if instruction \p I is an optimizable truncate whose operand 1456 /// is an induction variable. Such a truncate will be removed by adding a new 1457 /// induction variable with the destination type. 1458 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1459 // If the instruction is not a truncate, return false. 1460 auto *Trunc = dyn_cast<TruncInst>(I); 1461 if (!Trunc) 1462 return false; 1463 1464 // Get the source and destination types of the truncate. 1465 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1466 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1467 1468 // If the truncate is free for the given types, return false. Replacing a 1469 // free truncate with an induction variable would add an induction variable 1470 // update instruction to each iteration of the loop. We exclude from this 1471 // check the primary induction variable since it will need an update 1472 // instruction regardless. 1473 Value *Op = Trunc->getOperand(0); 1474 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1475 return false; 1476 1477 // If the truncated value is not an induction variable, return false. 1478 return Legal->isInductionPhi(Op); 1479 } 1480 1481 /// Collects the instructions to scalarize for each predicated instruction in 1482 /// the loop. 1483 void collectInstsToScalarize(ElementCount VF); 1484 1485 /// Collect Uniform and Scalar values for the given \p VF. 1486 /// The sets depend on CM decision for Load/Store instructions 1487 /// that may be vectorized as interleave, gather-scatter or scalarized. 1488 void collectUniformsAndScalars(ElementCount VF) { 1489 // Do the analysis once. 1490 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1491 return; 1492 setCostBasedWideningDecision(VF); 1493 collectLoopUniforms(VF); 1494 collectLoopScalars(VF); 1495 } 1496 1497 /// Returns true if the target machine supports masked store operation 1498 /// for the given \p DataType and kind of access to \p Ptr. 1499 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1500 return Legal->isConsecutivePtr(DataType, Ptr) && 1501 TTI.isLegalMaskedStore(DataType, Alignment); 1502 } 1503 1504 /// Returns true if the target machine supports masked load operation 1505 /// for the given \p DataType and kind of access to \p Ptr. 1506 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1507 return Legal->isConsecutivePtr(DataType, Ptr) && 1508 TTI.isLegalMaskedLoad(DataType, Alignment); 1509 } 1510 1511 /// Returns true if the target machine can represent \p V as a masked gather 1512 /// or scatter operation. 1513 bool isLegalGatherOrScatter(Value *V, 1514 ElementCount VF = ElementCount::getFixed(1)) { 1515 bool LI = isa<LoadInst>(V); 1516 bool SI = isa<StoreInst>(V); 1517 if (!LI && !SI) 1518 return false; 1519 auto *Ty = getLoadStoreType(V); 1520 Align Align = getLoadStoreAlignment(V); 1521 if (VF.isVector()) 1522 Ty = VectorType::get(Ty, VF); 1523 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1524 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1525 } 1526 1527 /// Returns true if the target machine supports all of the reduction 1528 /// variables found for the given VF. 1529 bool canVectorizeReductions(ElementCount VF) const { 1530 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1531 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1532 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1533 })); 1534 } 1535 1536 /// Returns true if \p I is an instruction that will be scalarized with 1537 /// predication when vectorizing \p I with vectorization factor \p VF. Such 1538 /// instructions include conditional stores and instructions that may divide 1539 /// by zero. 1540 bool isScalarWithPredication(Instruction *I, ElementCount VF) const; 1541 1542 // Returns true if \p I is an instruction that will be predicated either 1543 // through scalar predication or masked load/store or masked gather/scatter. 1544 // \p VF is the vectorization factor that will be used to vectorize \p I. 1545 // Superset of instructions that return true for isScalarWithPredication. 1546 bool isPredicatedInst(Instruction *I, ElementCount VF, 1547 bool IsKnownUniform = false) { 1548 // When we know the load is uniform and the original scalar loop was not 1549 // predicated we don't need to mark it as a predicated instruction. Any 1550 // vectorised blocks created when tail-folding are something artificial we 1551 // have introduced and we know there is always at least one active lane. 1552 // That's why we call Legal->blockNeedsPredication here because it doesn't 1553 // query tail-folding. 1554 if (IsKnownUniform && isa<LoadInst>(I) && 1555 !Legal->blockNeedsPredication(I->getParent())) 1556 return false; 1557 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1558 return false; 1559 // Loads and stores that need some form of masked operation are predicated 1560 // instructions. 1561 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1562 return Legal->isMaskRequired(I); 1563 return isScalarWithPredication(I, VF); 1564 } 1565 1566 /// Returns true if \p I is a memory instruction with consecutive memory 1567 /// access that can be widened. 1568 bool 1569 memoryInstructionCanBeWidened(Instruction *I, 1570 ElementCount VF = ElementCount::getFixed(1)); 1571 1572 /// Returns true if \p I is a memory instruction in an interleaved-group 1573 /// of memory accesses that can be vectorized with wide vector loads/stores 1574 /// and shuffles. 1575 bool 1576 interleavedAccessCanBeWidened(Instruction *I, 1577 ElementCount VF = ElementCount::getFixed(1)); 1578 1579 /// Check if \p Instr belongs to any interleaved access group. 1580 bool isAccessInterleaved(Instruction *Instr) { 1581 return InterleaveInfo.isInterleaved(Instr); 1582 } 1583 1584 /// Get the interleaved access group that \p Instr belongs to. 1585 const InterleaveGroup<Instruction> * 1586 getInterleavedAccessGroup(Instruction *Instr) { 1587 return InterleaveInfo.getInterleaveGroup(Instr); 1588 } 1589 1590 /// Returns true if we're required to use a scalar epilogue for at least 1591 /// the final iteration of the original loop. 1592 bool requiresScalarEpilogue(ElementCount VF) const { 1593 if (!isScalarEpilogueAllowed()) 1594 return false; 1595 // If we might exit from anywhere but the latch, must run the exiting 1596 // iteration in scalar form. 1597 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1598 return true; 1599 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1600 } 1601 1602 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1603 /// loop hint annotation. 1604 bool isScalarEpilogueAllowed() const { 1605 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1606 } 1607 1608 /// Returns true if all loop blocks should be masked to fold tail loop. 1609 bool foldTailByMasking() const { return FoldTailByMasking; } 1610 1611 /// Returns true if the instructions in this block requires predication 1612 /// for any reason, e.g. because tail folding now requires a predicate 1613 /// or because the block in the original loop was predicated. 1614 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1615 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1616 } 1617 1618 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1619 /// nodes to the chain of instructions representing the reductions. Uses a 1620 /// MapVector to ensure deterministic iteration order. 1621 using ReductionChainMap = 1622 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1623 1624 /// Return the chain of instructions representing an inloop reduction. 1625 const ReductionChainMap &getInLoopReductionChains() const { 1626 return InLoopReductionChains; 1627 } 1628 1629 /// Returns true if the Phi is part of an inloop reduction. 1630 bool isInLoopReduction(PHINode *Phi) const { 1631 return InLoopReductionChains.count(Phi); 1632 } 1633 1634 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1635 /// with factor VF. Return the cost of the instruction, including 1636 /// scalarization overhead if it's needed. 1637 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1638 1639 /// Estimate cost of a call instruction CI if it were vectorized with factor 1640 /// VF. Return the cost of the instruction, including scalarization overhead 1641 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1642 /// scalarized - 1643 /// i.e. either vector version isn't available, or is too expensive. 1644 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1645 bool &NeedToScalarize) const; 1646 1647 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1648 /// that of B. 1649 bool isMoreProfitable(const VectorizationFactor &A, 1650 const VectorizationFactor &B) const; 1651 1652 /// Invalidates decisions already taken by the cost model. 1653 void invalidateCostModelingDecisions() { 1654 WideningDecisions.clear(); 1655 Uniforms.clear(); 1656 Scalars.clear(); 1657 } 1658 1659 private: 1660 unsigned NumPredStores = 0; 1661 1662 /// Convenience function that returns the value of vscale_range iff 1663 /// vscale_range.min == vscale_range.max or otherwise returns the value 1664 /// returned by the corresponding TLI method. 1665 Optional<unsigned> getVScaleForTuning() const; 1666 1667 /// \return An upper bound for the vectorization factors for both 1668 /// fixed and scalable vectorization, where the minimum-known number of 1669 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1670 /// disabled or unsupported, then the scalable part will be equal to 1671 /// ElementCount::getScalable(0). 1672 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1673 ElementCount UserVF, 1674 bool FoldTailByMasking); 1675 1676 /// \return the maximized element count based on the targets vector 1677 /// registers and the loop trip-count, but limited to a maximum safe VF. 1678 /// This is a helper function of computeFeasibleMaxVF. 1679 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1680 /// issue that occurred on one of the buildbots which cannot be reproduced 1681 /// without having access to the properietary compiler (see comments on 1682 /// D98509). The issue is currently under investigation and this workaround 1683 /// will be removed as soon as possible. 1684 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1685 unsigned SmallestType, 1686 unsigned WidestType, 1687 const ElementCount &MaxSafeVF, 1688 bool FoldTailByMasking); 1689 1690 /// \return the maximum legal scalable VF, based on the safe max number 1691 /// of elements. 1692 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1693 1694 /// The vectorization cost is a combination of the cost itself and a boolean 1695 /// indicating whether any of the contributing operations will actually 1696 /// operate on vector values after type legalization in the backend. If this 1697 /// latter value is false, then all operations will be scalarized (i.e. no 1698 /// vectorization has actually taken place). 1699 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1700 1701 /// Returns the expected execution cost. The unit of the cost does 1702 /// not matter because we use the 'cost' units to compare different 1703 /// vector widths. The cost that is returned is *not* normalized by 1704 /// the factor width. If \p Invalid is not nullptr, this function 1705 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1706 /// each instruction that has an Invalid cost for the given VF. 1707 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1708 VectorizationCostTy 1709 expectedCost(ElementCount VF, 1710 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1711 1712 /// Returns the execution time cost of an instruction for a given vector 1713 /// width. Vector width of one means scalar. 1714 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1715 1716 /// The cost-computation logic from getInstructionCost which provides 1717 /// the vector type as an output parameter. 1718 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1719 Type *&VectorTy); 1720 1721 /// Return the cost of instructions in an inloop reduction pattern, if I is 1722 /// part of that pattern. 1723 Optional<InstructionCost> 1724 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1725 TTI::TargetCostKind CostKind); 1726 1727 /// Calculate vectorization cost of memory instruction \p I. 1728 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1729 1730 /// The cost computation for scalarized memory instruction. 1731 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1732 1733 /// The cost computation for interleaving group of memory instructions. 1734 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1735 1736 /// The cost computation for Gather/Scatter instruction. 1737 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1738 1739 /// The cost computation for widening instruction \p I with consecutive 1740 /// memory access. 1741 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1742 1743 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1744 /// Load: scalar load + broadcast. 1745 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1746 /// element) 1747 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1748 1749 /// Estimate the overhead of scalarizing an instruction. This is a 1750 /// convenience wrapper for the type-based getScalarizationOverhead API. 1751 InstructionCost getScalarizationOverhead(Instruction *I, 1752 ElementCount VF) const; 1753 1754 /// Returns whether the instruction is a load or store and will be a emitted 1755 /// as a vector operation. 1756 bool isConsecutiveLoadOrStore(Instruction *I); 1757 1758 /// Returns true if an artificially high cost for emulated masked memrefs 1759 /// should be used. 1760 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF); 1761 1762 /// Map of scalar integer values to the smallest bitwidth they can be legally 1763 /// represented as. The vector equivalents of these values should be truncated 1764 /// to this type. 1765 MapVector<Instruction *, uint64_t> MinBWs; 1766 1767 /// A type representing the costs for instructions if they were to be 1768 /// scalarized rather than vectorized. The entries are Instruction-Cost 1769 /// pairs. 1770 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1771 1772 /// A set containing all BasicBlocks that are known to present after 1773 /// vectorization as a predicated block. 1774 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1775 1776 /// Records whether it is allowed to have the original scalar loop execute at 1777 /// least once. This may be needed as a fallback loop in case runtime 1778 /// aliasing/dependence checks fail, or to handle the tail/remainder 1779 /// iterations when the trip count is unknown or doesn't divide by the VF, 1780 /// or as a peel-loop to handle gaps in interleave-groups. 1781 /// Under optsize and when the trip count is very small we don't allow any 1782 /// iterations to execute in the scalar loop. 1783 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1784 1785 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1786 bool FoldTailByMasking = false; 1787 1788 /// A map holding scalar costs for different vectorization factors. The 1789 /// presence of a cost for an instruction in the mapping indicates that the 1790 /// instruction will be scalarized when vectorizing with the associated 1791 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1792 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1793 1794 /// Holds the instructions known to be uniform after vectorization. 1795 /// The data is collected per VF. 1796 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1797 1798 /// Holds the instructions known to be scalar after vectorization. 1799 /// The data is collected per VF. 1800 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1801 1802 /// Holds the instructions (address computations) that are forced to be 1803 /// scalarized. 1804 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1805 1806 /// PHINodes of the reductions that should be expanded in-loop along with 1807 /// their associated chains of reduction operations, in program order from top 1808 /// (PHI) to bottom 1809 ReductionChainMap InLoopReductionChains; 1810 1811 /// A Map of inloop reduction operations and their immediate chain operand. 1812 /// FIXME: This can be removed once reductions can be costed correctly in 1813 /// vplan. This was added to allow quick lookup to the inloop operations, 1814 /// without having to loop through InLoopReductionChains. 1815 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1816 1817 /// Returns the expected difference in cost from scalarizing the expression 1818 /// feeding a predicated instruction \p PredInst. The instructions to 1819 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1820 /// non-negative return value implies the expression will be scalarized. 1821 /// Currently, only single-use chains are considered for scalarization. 1822 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1823 ElementCount VF); 1824 1825 /// Collect the instructions that are uniform after vectorization. An 1826 /// instruction is uniform if we represent it with a single scalar value in 1827 /// the vectorized loop corresponding to each vector iteration. Examples of 1828 /// uniform instructions include pointer operands of consecutive or 1829 /// interleaved memory accesses. Note that although uniformity implies an 1830 /// instruction will be scalar, the reverse is not true. In general, a 1831 /// scalarized instruction will be represented by VF scalar values in the 1832 /// vectorized loop, each corresponding to an iteration of the original 1833 /// scalar loop. 1834 void collectLoopUniforms(ElementCount VF); 1835 1836 /// Collect the instructions that are scalar after vectorization. An 1837 /// instruction is scalar if it is known to be uniform or will be scalarized 1838 /// during vectorization. collectLoopScalars should only add non-uniform nodes 1839 /// to the list if they are used by a load/store instruction that is marked as 1840 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by 1841 /// VF values in the vectorized loop, each corresponding to an iteration of 1842 /// the original scalar loop. 1843 void collectLoopScalars(ElementCount VF); 1844 1845 /// Keeps cost model vectorization decision and cost for instructions. 1846 /// Right now it is used for memory instructions only. 1847 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1848 std::pair<InstWidening, InstructionCost>>; 1849 1850 DecisionList WideningDecisions; 1851 1852 /// Returns true if \p V is expected to be vectorized and it needs to be 1853 /// extracted. 1854 bool needsExtract(Value *V, ElementCount VF) const { 1855 Instruction *I = dyn_cast<Instruction>(V); 1856 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1857 TheLoop->isLoopInvariant(I)) 1858 return false; 1859 1860 // Assume we can vectorize V (and hence we need extraction) if the 1861 // scalars are not computed yet. This can happen, because it is called 1862 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1863 // the scalars are collected. That should be a safe assumption in most 1864 // cases, because we check if the operands have vectorizable types 1865 // beforehand in LoopVectorizationLegality. 1866 return Scalars.find(VF) == Scalars.end() || 1867 !isScalarAfterVectorization(I, VF); 1868 }; 1869 1870 /// Returns a range containing only operands needing to be extracted. 1871 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1872 ElementCount VF) const { 1873 return SmallVector<Value *, 4>(make_filter_range( 1874 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1875 } 1876 1877 /// Determines if we have the infrastructure to vectorize loop \p L and its 1878 /// epilogue, assuming the main loop is vectorized by \p VF. 1879 bool isCandidateForEpilogueVectorization(const Loop &L, 1880 const ElementCount VF) const; 1881 1882 /// Returns true if epilogue vectorization is considered profitable, and 1883 /// false otherwise. 1884 /// \p VF is the vectorization factor chosen for the original loop. 1885 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1886 1887 public: 1888 /// The loop that we evaluate. 1889 Loop *TheLoop; 1890 1891 /// Predicated scalar evolution analysis. 1892 PredicatedScalarEvolution &PSE; 1893 1894 /// Loop Info analysis. 1895 LoopInfo *LI; 1896 1897 /// Vectorization legality. 1898 LoopVectorizationLegality *Legal; 1899 1900 /// Vector target information. 1901 const TargetTransformInfo &TTI; 1902 1903 /// Target Library Info. 1904 const TargetLibraryInfo *TLI; 1905 1906 /// Demanded bits analysis. 1907 DemandedBits *DB; 1908 1909 /// Assumption cache. 1910 AssumptionCache *AC; 1911 1912 /// Interface to emit optimization remarks. 1913 OptimizationRemarkEmitter *ORE; 1914 1915 const Function *TheFunction; 1916 1917 /// Loop Vectorize Hint. 1918 const LoopVectorizeHints *Hints; 1919 1920 /// The interleave access information contains groups of interleaved accesses 1921 /// with the same stride and close to each other. 1922 InterleavedAccessInfo &InterleaveInfo; 1923 1924 /// Values to ignore in the cost model. 1925 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1926 1927 /// Values to ignore in the cost model when VF > 1. 1928 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1929 1930 /// All element types found in the loop. 1931 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1932 1933 /// Profitable vector factors. 1934 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1935 }; 1936 } // end namespace llvm 1937 1938 /// Helper struct to manage generating runtime checks for vectorization. 1939 /// 1940 /// The runtime checks are created up-front in temporary blocks to allow better 1941 /// estimating the cost and un-linked from the existing IR. After deciding to 1942 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1943 /// temporary blocks are completely removed. 1944 class GeneratedRTChecks { 1945 /// Basic block which contains the generated SCEV checks, if any. 1946 BasicBlock *SCEVCheckBlock = nullptr; 1947 1948 /// The value representing the result of the generated SCEV checks. If it is 1949 /// nullptr, either no SCEV checks have been generated or they have been used. 1950 Value *SCEVCheckCond = nullptr; 1951 1952 /// Basic block which contains the generated memory runtime checks, if any. 1953 BasicBlock *MemCheckBlock = nullptr; 1954 1955 /// The value representing the result of the generated memory runtime checks. 1956 /// If it is nullptr, either no memory runtime checks have been generated or 1957 /// they have been used. 1958 Value *MemRuntimeCheckCond = nullptr; 1959 1960 DominatorTree *DT; 1961 LoopInfo *LI; 1962 1963 SCEVExpander SCEVExp; 1964 SCEVExpander MemCheckExp; 1965 1966 public: 1967 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1968 const DataLayout &DL) 1969 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1970 MemCheckExp(SE, DL, "scev.check") {} 1971 1972 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1973 /// accurately estimate the cost of the runtime checks. The blocks are 1974 /// un-linked from the IR and is added back during vector code generation. If 1975 /// there is no vector code generation, the check blocks are removed 1976 /// completely. 1977 void Create(Loop *L, const LoopAccessInfo &LAI, 1978 const SCEVPredicate &Pred) { 1979 1980 BasicBlock *LoopHeader = L->getHeader(); 1981 BasicBlock *Preheader = L->getLoopPreheader(); 1982 1983 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1984 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1985 // may be used by SCEVExpander. The blocks will be un-linked from their 1986 // predecessors and removed from LI & DT at the end of the function. 1987 if (!Pred.isAlwaysTrue()) { 1988 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1989 nullptr, "vector.scevcheck"); 1990 1991 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1992 &Pred, SCEVCheckBlock->getTerminator()); 1993 } 1994 1995 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1996 if (RtPtrChecking.Need) { 1997 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1998 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1999 "vector.memcheck"); 2000 2001 MemRuntimeCheckCond = 2002 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 2003 RtPtrChecking.getChecks(), MemCheckExp); 2004 assert(MemRuntimeCheckCond && 2005 "no RT checks generated although RtPtrChecking " 2006 "claimed checks are required"); 2007 } 2008 2009 if (!MemCheckBlock && !SCEVCheckBlock) 2010 return; 2011 2012 // Unhook the temporary block with the checks, update various places 2013 // accordingly. 2014 if (SCEVCheckBlock) 2015 SCEVCheckBlock->replaceAllUsesWith(Preheader); 2016 if (MemCheckBlock) 2017 MemCheckBlock->replaceAllUsesWith(Preheader); 2018 2019 if (SCEVCheckBlock) { 2020 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2021 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 2022 Preheader->getTerminator()->eraseFromParent(); 2023 } 2024 if (MemCheckBlock) { 2025 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2026 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2027 Preheader->getTerminator()->eraseFromParent(); 2028 } 2029 2030 DT->changeImmediateDominator(LoopHeader, Preheader); 2031 if (MemCheckBlock) { 2032 DT->eraseNode(MemCheckBlock); 2033 LI->removeBlock(MemCheckBlock); 2034 } 2035 if (SCEVCheckBlock) { 2036 DT->eraseNode(SCEVCheckBlock); 2037 LI->removeBlock(SCEVCheckBlock); 2038 } 2039 } 2040 2041 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2042 /// unused. 2043 ~GeneratedRTChecks() { 2044 SCEVExpanderCleaner SCEVCleaner(SCEVExp); 2045 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp); 2046 if (!SCEVCheckCond) 2047 SCEVCleaner.markResultUsed(); 2048 2049 if (!MemRuntimeCheckCond) 2050 MemCheckCleaner.markResultUsed(); 2051 2052 if (MemRuntimeCheckCond) { 2053 auto &SE = *MemCheckExp.getSE(); 2054 // Memory runtime check generation creates compares that use expanded 2055 // values. Remove them before running the SCEVExpanderCleaners. 2056 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2057 if (MemCheckExp.isInsertedInstruction(&I)) 2058 continue; 2059 SE.forgetValue(&I); 2060 I.eraseFromParent(); 2061 } 2062 } 2063 MemCheckCleaner.cleanup(); 2064 SCEVCleaner.cleanup(); 2065 2066 if (SCEVCheckCond) 2067 SCEVCheckBlock->eraseFromParent(); 2068 if (MemRuntimeCheckCond) 2069 MemCheckBlock->eraseFromParent(); 2070 } 2071 2072 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2073 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2074 /// depending on the generated condition. 2075 BasicBlock *emitSCEVChecks(BasicBlock *Bypass, 2076 BasicBlock *LoopVectorPreHeader, 2077 BasicBlock *LoopExitBlock) { 2078 if (!SCEVCheckCond) 2079 return nullptr; 2080 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2081 if (C->isZero()) 2082 return nullptr; 2083 2084 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2085 2086 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2087 // Create new preheader for vector loop. 2088 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2089 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2090 2091 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2092 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2093 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2094 SCEVCheckBlock); 2095 2096 DT->addNewBlock(SCEVCheckBlock, Pred); 2097 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2098 2099 ReplaceInstWithInst( 2100 SCEVCheckBlock->getTerminator(), 2101 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2102 // Mark the check as used, to prevent it from being removed during cleanup. 2103 SCEVCheckCond = nullptr; 2104 return SCEVCheckBlock; 2105 } 2106 2107 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2108 /// the branches to branch to the vector preheader or \p Bypass, depending on 2109 /// the generated condition. 2110 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass, 2111 BasicBlock *LoopVectorPreHeader) { 2112 // Check if we generated code that checks in runtime if arrays overlap. 2113 if (!MemRuntimeCheckCond) 2114 return nullptr; 2115 2116 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2117 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2118 MemCheckBlock); 2119 2120 DT->addNewBlock(MemCheckBlock, Pred); 2121 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2122 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2123 2124 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2125 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2126 2127 ReplaceInstWithInst( 2128 MemCheckBlock->getTerminator(), 2129 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2130 MemCheckBlock->getTerminator()->setDebugLoc( 2131 Pred->getTerminator()->getDebugLoc()); 2132 2133 // Mark the check as used, to prevent it from being removed during cleanup. 2134 MemRuntimeCheckCond = nullptr; 2135 return MemCheckBlock; 2136 } 2137 }; 2138 2139 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2140 // vectorization. The loop needs to be annotated with #pragma omp simd 2141 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2142 // vector length information is not provided, vectorization is not considered 2143 // explicit. Interleave hints are not allowed either. These limitations will be 2144 // relaxed in the future. 2145 // Please, note that we are currently forced to abuse the pragma 'clang 2146 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2147 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2148 // provides *explicit vectorization hints* (LV can bypass legal checks and 2149 // assume that vectorization is legal). However, both hints are implemented 2150 // using the same metadata (llvm.loop.vectorize, processed by 2151 // LoopVectorizeHints). This will be fixed in the future when the native IR 2152 // representation for pragma 'omp simd' is introduced. 2153 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2154 OptimizationRemarkEmitter *ORE) { 2155 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2156 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2157 2158 // Only outer loops with an explicit vectorization hint are supported. 2159 // Unannotated outer loops are ignored. 2160 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2161 return false; 2162 2163 Function *Fn = OuterLp->getHeader()->getParent(); 2164 if (!Hints.allowVectorization(Fn, OuterLp, 2165 true /*VectorizeOnlyWhenForced*/)) { 2166 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2167 return false; 2168 } 2169 2170 if (Hints.getInterleave() > 1) { 2171 // TODO: Interleave support is future work. 2172 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2173 "outer loops.\n"); 2174 Hints.emitRemarkWithHints(); 2175 return false; 2176 } 2177 2178 return true; 2179 } 2180 2181 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2182 OptimizationRemarkEmitter *ORE, 2183 SmallVectorImpl<Loop *> &V) { 2184 // Collect inner loops and outer loops without irreducible control flow. For 2185 // now, only collect outer loops that have explicit vectorization hints. If we 2186 // are stress testing the VPlan H-CFG construction, we collect the outermost 2187 // loop of every loop nest. 2188 if (L.isInnermost() || VPlanBuildStressTest || 2189 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2190 LoopBlocksRPO RPOT(&L); 2191 RPOT.perform(LI); 2192 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2193 V.push_back(&L); 2194 // TODO: Collect inner loops inside marked outer loops in case 2195 // vectorization fails for the outer loop. Do not invoke 2196 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2197 // already known to be reducible. We can use an inherited attribute for 2198 // that. 2199 return; 2200 } 2201 } 2202 for (Loop *InnerL : L) 2203 collectSupportedLoops(*InnerL, LI, ORE, V); 2204 } 2205 2206 namespace { 2207 2208 /// The LoopVectorize Pass. 2209 struct LoopVectorize : public FunctionPass { 2210 /// Pass identification, replacement for typeid 2211 static char ID; 2212 2213 LoopVectorizePass Impl; 2214 2215 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2216 bool VectorizeOnlyWhenForced = false) 2217 : FunctionPass(ID), 2218 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2219 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2220 } 2221 2222 bool runOnFunction(Function &F) override { 2223 if (skipFunction(F)) 2224 return false; 2225 2226 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2227 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2228 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2229 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2230 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2231 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2232 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2233 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2234 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2235 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2236 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2237 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2238 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2239 2240 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2241 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2242 2243 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2244 GetLAA, *ORE, PSI).MadeAnyChange; 2245 } 2246 2247 void getAnalysisUsage(AnalysisUsage &AU) const override { 2248 AU.addRequired<AssumptionCacheTracker>(); 2249 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2250 AU.addRequired<DominatorTreeWrapperPass>(); 2251 AU.addRequired<LoopInfoWrapperPass>(); 2252 AU.addRequired<ScalarEvolutionWrapperPass>(); 2253 AU.addRequired<TargetTransformInfoWrapperPass>(); 2254 AU.addRequired<AAResultsWrapperPass>(); 2255 AU.addRequired<LoopAccessLegacyAnalysis>(); 2256 AU.addRequired<DemandedBitsWrapperPass>(); 2257 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2258 AU.addRequired<InjectTLIMappingsLegacy>(); 2259 2260 // We currently do not preserve loopinfo/dominator analyses with outer loop 2261 // vectorization. Until this is addressed, mark these analyses as preserved 2262 // only for non-VPlan-native path. 2263 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2264 if (!EnableVPlanNativePath) { 2265 AU.addPreserved<LoopInfoWrapperPass>(); 2266 AU.addPreserved<DominatorTreeWrapperPass>(); 2267 } 2268 2269 AU.addPreserved<BasicAAWrapperPass>(); 2270 AU.addPreserved<GlobalsAAWrapperPass>(); 2271 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2272 } 2273 }; 2274 2275 } // end anonymous namespace 2276 2277 //===----------------------------------------------------------------------===// 2278 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2279 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2280 //===----------------------------------------------------------------------===// 2281 2282 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2283 // We need to place the broadcast of invariant variables outside the loop, 2284 // but only if it's proven safe to do so. Else, broadcast will be inside 2285 // vector loop body. 2286 Instruction *Instr = dyn_cast<Instruction>(V); 2287 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2288 (!Instr || 2289 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2290 // Place the code for broadcasting invariant variables in the new preheader. 2291 IRBuilder<>::InsertPointGuard Guard(Builder); 2292 if (SafeToHoist) 2293 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2294 2295 // Broadcast the scalar into all locations in the vector. 2296 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2297 2298 return Shuf; 2299 } 2300 2301 /// This function adds 2302 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 2303 /// to each vector element of Val. The sequence starts at StartIndex. 2304 /// \p Opcode is relevant for FP induction variable. 2305 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step, 2306 Instruction::BinaryOps BinOp, ElementCount VF, 2307 IRBuilderBase &Builder) { 2308 assert(VF.isVector() && "only vector VFs are supported"); 2309 2310 // Create and check the types. 2311 auto *ValVTy = cast<VectorType>(Val->getType()); 2312 ElementCount VLen = ValVTy->getElementCount(); 2313 2314 Type *STy = Val->getType()->getScalarType(); 2315 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2316 "Induction Step must be an integer or FP"); 2317 assert(Step->getType() == STy && "Step has wrong type"); 2318 2319 SmallVector<Constant *, 8> Indices; 2320 2321 // Create a vector of consecutive numbers from zero to VF. 2322 VectorType *InitVecValVTy = ValVTy; 2323 if (STy->isFloatingPointTy()) { 2324 Type *InitVecValSTy = 2325 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2326 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2327 } 2328 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2329 2330 // Splat the StartIdx 2331 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2332 2333 if (STy->isIntegerTy()) { 2334 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2335 Step = Builder.CreateVectorSplat(VLen, Step); 2336 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2337 // FIXME: The newly created binary instructions should contain nsw/nuw 2338 // flags, which can be found from the original scalar operations. 2339 Step = Builder.CreateMul(InitVec, Step); 2340 return Builder.CreateAdd(Val, Step, "induction"); 2341 } 2342 2343 // Floating point induction. 2344 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2345 "Binary Opcode should be specified for FP induction"); 2346 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2347 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2348 2349 Step = Builder.CreateVectorSplat(VLen, Step); 2350 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2351 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2352 } 2353 2354 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 2355 /// variable on which to base the steps, \p Step is the size of the step. 2356 static void buildScalarSteps(Value *ScalarIV, Value *Step, 2357 const InductionDescriptor &ID, VPValue *Def, 2358 VPTransformState &State) { 2359 IRBuilderBase &Builder = State.Builder; 2360 // We shouldn't have to build scalar steps if we aren't vectorizing. 2361 assert(State.VF.isVector() && "VF should be greater than one"); 2362 // Get the value type and ensure it and the step have the same integer type. 2363 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2364 assert(ScalarIVTy == Step->getType() && 2365 "Val and Step should have the same type"); 2366 2367 // We build scalar steps for both integer and floating-point induction 2368 // variables. Here, we determine the kind of arithmetic we will perform. 2369 Instruction::BinaryOps AddOp; 2370 Instruction::BinaryOps MulOp; 2371 if (ScalarIVTy->isIntegerTy()) { 2372 AddOp = Instruction::Add; 2373 MulOp = Instruction::Mul; 2374 } else { 2375 AddOp = ID.getInductionOpcode(); 2376 MulOp = Instruction::FMul; 2377 } 2378 2379 // Determine the number of scalars we need to generate for each unroll 2380 // iteration. 2381 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def); 2382 unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue(); 2383 // Compute the scalar steps and save the results in State. 2384 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2385 ScalarIVTy->getScalarSizeInBits()); 2386 Type *VecIVTy = nullptr; 2387 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2388 if (!FirstLaneOnly && State.VF.isScalable()) { 2389 VecIVTy = VectorType::get(ScalarIVTy, State.VF); 2390 UnitStepVec = 2391 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF)); 2392 SplatStep = Builder.CreateVectorSplat(State.VF, Step); 2393 SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV); 2394 } 2395 2396 for (unsigned Part = 0; Part < State.UF; ++Part) { 2397 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part); 2398 2399 if (!FirstLaneOnly && State.VF.isScalable()) { 2400 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0); 2401 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2402 if (ScalarIVTy->isFloatingPointTy()) 2403 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2404 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2405 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2406 State.set(Def, Add, Part); 2407 // It's useful to record the lane values too for the known minimum number 2408 // of elements so we do those below. This improves the code quality when 2409 // trying to extract the first element, for example. 2410 } 2411 2412 if (ScalarIVTy->isFloatingPointTy()) 2413 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2414 2415 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2416 Value *StartIdx = Builder.CreateBinOp( 2417 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2418 // The step returned by `createStepForVF` is a runtime-evaluated value 2419 // when VF is scalable. Otherwise, it should be folded into a Constant. 2420 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) && 2421 "Expected StartIdx to be folded to a constant when VF is not " 2422 "scalable"); 2423 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2424 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2425 State.set(Def, Add, VPIteration(Part, Lane)); 2426 } 2427 } 2428 } 2429 2430 // Generate code for the induction step. Note that induction steps are 2431 // required to be loop-invariant 2432 static Value *CreateStepValue(const SCEV *Step, ScalarEvolution &SE, 2433 Instruction *InsertBefore, 2434 Loop *OrigLoop = nullptr) { 2435 const DataLayout &DL = SE.getDataLayout(); 2436 assert((!OrigLoop || SE.isLoopInvariant(Step, OrigLoop)) && 2437 "Induction step should be loop invariant"); 2438 if (auto *E = dyn_cast<SCEVUnknown>(Step)) 2439 return E->getValue(); 2440 2441 SCEVExpander Exp(SE, DL, "induction"); 2442 return Exp.expandCodeFor(Step, Step->getType(), InsertBefore); 2443 } 2444 2445 /// Compute the transformed value of Index at offset StartValue using step 2446 /// StepValue. 2447 /// For integer induction, returns StartValue + Index * StepValue. 2448 /// For pointer induction, returns StartValue[Index * StepValue]. 2449 /// FIXME: The newly created binary instructions should contain nsw/nuw 2450 /// flags, which can be found from the original scalar operations. 2451 static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index, 2452 Value *StartValue, Value *Step, 2453 const InductionDescriptor &ID) { 2454 assert(Index->getType()->getScalarType() == Step->getType() && 2455 "Index scalar type does not match StepValue type"); 2456 2457 // Note: the IR at this point is broken. We cannot use SE to create any new 2458 // SCEV and then expand it, hoping that SCEV's simplification will give us 2459 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2460 // lead to various SCEV crashes. So all we can do is to use builder and rely 2461 // on InstCombine for future simplifications. Here we handle some trivial 2462 // cases only. 2463 auto CreateAdd = [&B](Value *X, Value *Y) { 2464 assert(X->getType() == Y->getType() && "Types don't match!"); 2465 if (auto *CX = dyn_cast<ConstantInt>(X)) 2466 if (CX->isZero()) 2467 return Y; 2468 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2469 if (CY->isZero()) 2470 return X; 2471 return B.CreateAdd(X, Y); 2472 }; 2473 2474 // We allow X to be a vector type, in which case Y will potentially be 2475 // splatted into a vector with the same element count. 2476 auto CreateMul = [&B](Value *X, Value *Y) { 2477 assert(X->getType()->getScalarType() == Y->getType() && 2478 "Types don't match!"); 2479 if (auto *CX = dyn_cast<ConstantInt>(X)) 2480 if (CX->isOne()) 2481 return Y; 2482 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2483 if (CY->isOne()) 2484 return X; 2485 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 2486 if (XVTy && !isa<VectorType>(Y->getType())) 2487 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 2488 return B.CreateMul(X, Y); 2489 }; 2490 2491 switch (ID.getKind()) { 2492 case InductionDescriptor::IK_IntInduction: { 2493 assert(!isa<VectorType>(Index->getType()) && 2494 "Vector indices not supported for integer inductions yet"); 2495 assert(Index->getType() == StartValue->getType() && 2496 "Index type does not match StartValue type"); 2497 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne()) 2498 return B.CreateSub(StartValue, Index); 2499 auto *Offset = CreateMul(Index, Step); 2500 return CreateAdd(StartValue, Offset); 2501 } 2502 case InductionDescriptor::IK_PtrInduction: { 2503 assert(isa<Constant>(Step) && 2504 "Expected constant step for pointer induction"); 2505 return B.CreateGEP(ID.getElementType(), StartValue, CreateMul(Index, Step)); 2506 } 2507 case InductionDescriptor::IK_FpInduction: { 2508 assert(!isa<VectorType>(Index->getType()) && 2509 "Vector indices not supported for FP inductions yet"); 2510 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2511 auto InductionBinOp = ID.getInductionBinOp(); 2512 assert(InductionBinOp && 2513 (InductionBinOp->getOpcode() == Instruction::FAdd || 2514 InductionBinOp->getOpcode() == Instruction::FSub) && 2515 "Original bin op should be defined for FP induction"); 2516 2517 Value *MulExp = B.CreateFMul(Step, Index); 2518 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2519 "induction"); 2520 } 2521 case InductionDescriptor::IK_NoInduction: 2522 return nullptr; 2523 } 2524 llvm_unreachable("invalid enum"); 2525 } 2526 2527 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2528 const VPIteration &Instance, 2529 VPTransformState &State) { 2530 Value *ScalarInst = State.get(Def, Instance); 2531 Value *VectorValue = State.get(Def, Instance.Part); 2532 VectorValue = Builder.CreateInsertElement( 2533 VectorValue, ScalarInst, 2534 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2535 State.set(Def, VectorValue, Instance.Part); 2536 } 2537 2538 // Return whether we allow using masked interleave-groups (for dealing with 2539 // strided loads/stores that reside in predicated blocks, or for dealing 2540 // with gaps). 2541 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2542 // If an override option has been passed in for interleaved accesses, use it. 2543 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2544 return EnableMaskedInterleavedMemAccesses; 2545 2546 return TTI.enableMaskedInterleavedAccessVectorization(); 2547 } 2548 2549 // Try to vectorize the interleave group that \p Instr belongs to. 2550 // 2551 // E.g. Translate following interleaved load group (factor = 3): 2552 // for (i = 0; i < N; i+=3) { 2553 // R = Pic[i]; // Member of index 0 2554 // G = Pic[i+1]; // Member of index 1 2555 // B = Pic[i+2]; // Member of index 2 2556 // ... // do something to R, G, B 2557 // } 2558 // To: 2559 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2560 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2561 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2562 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2563 // 2564 // Or translate following interleaved store group (factor = 3): 2565 // for (i = 0; i < N; i+=3) { 2566 // ... do something to R, G, B 2567 // Pic[i] = R; // Member of index 0 2568 // Pic[i+1] = G; // Member of index 1 2569 // Pic[i+2] = B; // Member of index 2 2570 // } 2571 // To: 2572 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2573 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2574 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2575 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2576 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2577 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2578 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2579 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2580 VPValue *BlockInMask) { 2581 Instruction *Instr = Group->getInsertPos(); 2582 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2583 2584 // Prepare for the vector type of the interleaved load/store. 2585 Type *ScalarTy = getLoadStoreType(Instr); 2586 unsigned InterleaveFactor = Group->getFactor(); 2587 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2588 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2589 2590 // Prepare for the new pointers. 2591 SmallVector<Value *, 2> AddrParts; 2592 unsigned Index = Group->getIndex(Instr); 2593 2594 // TODO: extend the masked interleaved-group support to reversed access. 2595 assert((!BlockInMask || !Group->isReverse()) && 2596 "Reversed masked interleave-group not supported."); 2597 2598 // If the group is reverse, adjust the index to refer to the last vector lane 2599 // instead of the first. We adjust the index from the first vector lane, 2600 // rather than directly getting the pointer for lane VF - 1, because the 2601 // pointer operand of the interleaved access is supposed to be uniform. For 2602 // uniform instructions, we're only required to generate a value for the 2603 // first vector lane in each unroll iteration. 2604 if (Group->isReverse()) 2605 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2606 2607 for (unsigned Part = 0; Part < UF; Part++) { 2608 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2609 setDebugLocFromInst(AddrPart); 2610 2611 // Notice current instruction could be any index. Need to adjust the address 2612 // to the member of index 0. 2613 // 2614 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2615 // b = A[i]; // Member of index 0 2616 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2617 // 2618 // E.g. A[i+1] = a; // Member of index 1 2619 // A[i] = b; // Member of index 0 2620 // A[i+2] = c; // Member of index 2 (Current instruction) 2621 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2622 2623 bool InBounds = false; 2624 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2625 InBounds = gep->isInBounds(); 2626 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2627 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2628 2629 // Cast to the vector pointer type. 2630 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2631 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2632 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2633 } 2634 2635 setDebugLocFromInst(Instr); 2636 Value *PoisonVec = PoisonValue::get(VecTy); 2637 2638 Value *MaskForGaps = nullptr; 2639 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2640 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2641 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2642 } 2643 2644 // Vectorize the interleaved load group. 2645 if (isa<LoadInst>(Instr)) { 2646 // For each unroll part, create a wide load for the group. 2647 SmallVector<Value *, 2> NewLoads; 2648 for (unsigned Part = 0; Part < UF; Part++) { 2649 Instruction *NewLoad; 2650 if (BlockInMask || MaskForGaps) { 2651 assert(useMaskedInterleavedAccesses(*TTI) && 2652 "masked interleaved groups are not allowed."); 2653 Value *GroupMask = MaskForGaps; 2654 if (BlockInMask) { 2655 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2656 Value *ShuffledMask = Builder.CreateShuffleVector( 2657 BlockInMaskPart, 2658 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2659 "interleaved.mask"); 2660 GroupMask = MaskForGaps 2661 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2662 MaskForGaps) 2663 : ShuffledMask; 2664 } 2665 NewLoad = 2666 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2667 GroupMask, PoisonVec, "wide.masked.vec"); 2668 } 2669 else 2670 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2671 Group->getAlign(), "wide.vec"); 2672 Group->addMetadata(NewLoad); 2673 NewLoads.push_back(NewLoad); 2674 } 2675 2676 // For each member in the group, shuffle out the appropriate data from the 2677 // wide loads. 2678 unsigned J = 0; 2679 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2680 Instruction *Member = Group->getMember(I); 2681 2682 // Skip the gaps in the group. 2683 if (!Member) 2684 continue; 2685 2686 auto StrideMask = 2687 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2688 for (unsigned Part = 0; Part < UF; Part++) { 2689 Value *StridedVec = Builder.CreateShuffleVector( 2690 NewLoads[Part], StrideMask, "strided.vec"); 2691 2692 // If this member has different type, cast the result type. 2693 if (Member->getType() != ScalarTy) { 2694 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2695 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2696 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2697 } 2698 2699 if (Group->isReverse()) 2700 StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse"); 2701 2702 State.set(VPDefs[J], StridedVec, Part); 2703 } 2704 ++J; 2705 } 2706 return; 2707 } 2708 2709 // The sub vector type for current instruction. 2710 auto *SubVT = VectorType::get(ScalarTy, VF); 2711 2712 // Vectorize the interleaved store group. 2713 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2714 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2715 "masked interleaved groups are not allowed."); 2716 assert((!MaskForGaps || !VF.isScalable()) && 2717 "masking gaps for scalable vectors is not yet supported."); 2718 for (unsigned Part = 0; Part < UF; Part++) { 2719 // Collect the stored vector from each member. 2720 SmallVector<Value *, 4> StoredVecs; 2721 for (unsigned i = 0; i < InterleaveFactor; i++) { 2722 assert((Group->getMember(i) || MaskForGaps) && 2723 "Fail to get a member from an interleaved store group"); 2724 Instruction *Member = Group->getMember(i); 2725 2726 // Skip the gaps in the group. 2727 if (!Member) { 2728 Value *Undef = PoisonValue::get(SubVT); 2729 StoredVecs.push_back(Undef); 2730 continue; 2731 } 2732 2733 Value *StoredVec = State.get(StoredValues[i], Part); 2734 2735 if (Group->isReverse()) 2736 StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse"); 2737 2738 // If this member has different type, cast it to a unified type. 2739 2740 if (StoredVec->getType() != SubVT) 2741 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2742 2743 StoredVecs.push_back(StoredVec); 2744 } 2745 2746 // Concatenate all vectors into a wide vector. 2747 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2748 2749 // Interleave the elements in the wide vector. 2750 Value *IVec = Builder.CreateShuffleVector( 2751 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2752 "interleaved.vec"); 2753 2754 Instruction *NewStoreInstr; 2755 if (BlockInMask || MaskForGaps) { 2756 Value *GroupMask = MaskForGaps; 2757 if (BlockInMask) { 2758 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2759 Value *ShuffledMask = Builder.CreateShuffleVector( 2760 BlockInMaskPart, 2761 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2762 "interleaved.mask"); 2763 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2764 ShuffledMask, MaskForGaps) 2765 : ShuffledMask; 2766 } 2767 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2768 Group->getAlign(), GroupMask); 2769 } else 2770 NewStoreInstr = 2771 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2772 2773 Group->addMetadata(NewStoreInstr); 2774 } 2775 } 2776 2777 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2778 VPReplicateRecipe *RepRecipe, 2779 const VPIteration &Instance, 2780 bool IfPredicateInstr, 2781 VPTransformState &State) { 2782 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2783 2784 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2785 // the first lane and part. 2786 if (isa<NoAliasScopeDeclInst>(Instr)) 2787 if (!Instance.isFirstIteration()) 2788 return; 2789 2790 setDebugLocFromInst(Instr); 2791 2792 // Does this instruction return a value ? 2793 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2794 2795 Instruction *Cloned = Instr->clone(); 2796 if (!IsVoidRetTy) 2797 Cloned->setName(Instr->getName() + ".cloned"); 2798 2799 // If the scalarized instruction contributes to the address computation of a 2800 // widen masked load/store which was in a basic block that needed predication 2801 // and is not predicated after vectorization, we can't propagate 2802 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized 2803 // instruction could feed a poison value to the base address of the widen 2804 // load/store. 2805 if (State.MayGeneratePoisonRecipes.contains(RepRecipe)) 2806 Cloned->dropPoisonGeneratingFlags(); 2807 2808 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 2809 Builder.GetInsertPoint()); 2810 // Replace the operands of the cloned instructions with their scalar 2811 // equivalents in the new loop. 2812 for (auto &I : enumerate(RepRecipe->operands())) { 2813 auto InputInstance = Instance; 2814 VPValue *Operand = I.value(); 2815 VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand); 2816 if (OperandR && OperandR->isUniform()) 2817 InputInstance.Lane = VPLane::getFirstLane(); 2818 Cloned->setOperand(I.index(), State.get(Operand, InputInstance)); 2819 } 2820 addNewMetadata(Cloned, Instr); 2821 2822 // Place the cloned scalar in the new loop. 2823 Builder.Insert(Cloned); 2824 2825 State.set(RepRecipe, Cloned, Instance); 2826 2827 // If we just cloned a new assumption, add it the assumption cache. 2828 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 2829 AC->registerAssumption(II); 2830 2831 // End if-block. 2832 if (IfPredicateInstr) 2833 PredicatedInstructions.push_back(Cloned); 2834 } 2835 2836 void InnerLoopVectorizer::createHeaderBranch(Loop *L) { 2837 BasicBlock *Header = L->getHeader(); 2838 assert(!L->getLoopLatch() && "loop should not have a latch at this point"); 2839 2840 IRBuilder<> B(Header->getTerminator()); 2841 Instruction *OldInst = 2842 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 2843 setDebugLocFromInst(OldInst, &B); 2844 2845 // Connect the header to the exit and header blocks and replace the old 2846 // terminator. 2847 B.CreateCondBr(B.getTrue(), L->getUniqueExitBlock(), Header); 2848 2849 // Now we have two terminators. Remove the old one from the block. 2850 Header->getTerminator()->eraseFromParent(); 2851 } 2852 2853 Value *InnerLoopVectorizer::getOrCreateTripCount(BasicBlock *InsertBlock) { 2854 if (TripCount) 2855 return TripCount; 2856 2857 assert(InsertBlock); 2858 IRBuilder<> Builder(InsertBlock->getTerminator()); 2859 // Find the loop boundaries. 2860 ScalarEvolution *SE = PSE.getSE(); 2861 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2862 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 2863 "Invalid loop count"); 2864 2865 Type *IdxTy = Legal->getWidestInductionType(); 2866 assert(IdxTy && "No type for induction"); 2867 2868 // The exit count might have the type of i64 while the phi is i32. This can 2869 // happen if we have an induction variable that is sign extended before the 2870 // compare. The only way that we get a backedge taken count is that the 2871 // induction variable was signed and as such will not overflow. In such a case 2872 // truncation is legal. 2873 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2874 IdxTy->getPrimitiveSizeInBits()) 2875 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2876 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2877 2878 // Get the total trip count from the count by adding 1. 2879 const SCEV *ExitCount = SE->getAddExpr( 2880 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2881 2882 const DataLayout &DL = InsertBlock->getModule()->getDataLayout(); 2883 2884 // Expand the trip count and place the new instructions in the preheader. 2885 // Notice that the pre-header does not change, only the loop body. 2886 SCEVExpander Exp(*SE, DL, "induction"); 2887 2888 // Count holds the overall loop count (N). 2889 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2890 InsertBlock->getTerminator()); 2891 2892 if (TripCount->getType()->isPointerTy()) 2893 TripCount = 2894 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2895 InsertBlock->getTerminator()); 2896 2897 return TripCount; 2898 } 2899 2900 Value * 2901 InnerLoopVectorizer::getOrCreateVectorTripCount(BasicBlock *InsertBlock) { 2902 if (VectorTripCount) 2903 return VectorTripCount; 2904 2905 Value *TC = getOrCreateTripCount(InsertBlock); 2906 IRBuilder<> Builder(InsertBlock->getTerminator()); 2907 2908 Type *Ty = TC->getType(); 2909 // This is where we can make the step a runtime constant. 2910 Value *Step = createStepForVF(Builder, Ty, VF, UF); 2911 2912 // If the tail is to be folded by masking, round the number of iterations N 2913 // up to a multiple of Step instead of rounding down. This is done by first 2914 // adding Step-1 and then rounding down. Note that it's ok if this addition 2915 // overflows: the vector induction variable will eventually wrap to zero given 2916 // that it starts at zero and its Step is a power of two; the loop will then 2917 // exit, with the last early-exit vector comparison also producing all-true. 2918 if (Cost->foldTailByMasking()) { 2919 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 2920 "VF*UF must be a power of 2 when folding tail by masking"); 2921 Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF); 2922 TC = Builder.CreateAdd( 2923 TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up"); 2924 } 2925 2926 // Now we need to generate the expression for the part of the loop that the 2927 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2928 // iterations are not required for correctness, or N - Step, otherwise. Step 2929 // is equal to the vectorization factor (number of SIMD elements) times the 2930 // unroll factor (number of SIMD instructions). 2931 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2932 2933 // There are cases where we *must* run at least one iteration in the remainder 2934 // loop. See the cost model for when this can happen. If the step evenly 2935 // divides the trip count, we set the remainder to be equal to the step. If 2936 // the step does not evenly divide the trip count, no adjustment is necessary 2937 // since there will already be scalar iterations. Note that the minimum 2938 // iterations check ensures that N >= Step. 2939 if (Cost->requiresScalarEpilogue(VF)) { 2940 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2941 R = Builder.CreateSelect(IsZero, Step, R); 2942 } 2943 2944 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2945 2946 return VectorTripCount; 2947 } 2948 2949 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2950 const DataLayout &DL) { 2951 // Verify that V is a vector type with same number of elements as DstVTy. 2952 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 2953 unsigned VF = DstFVTy->getNumElements(); 2954 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 2955 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2956 Type *SrcElemTy = SrcVecTy->getElementType(); 2957 Type *DstElemTy = DstFVTy->getElementType(); 2958 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2959 "Vector elements must have same size"); 2960 2961 // Do a direct cast if element types are castable. 2962 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2963 return Builder.CreateBitOrPointerCast(V, DstFVTy); 2964 } 2965 // V cannot be directly casted to desired vector type. 2966 // May happen when V is a floating point vector but DstVTy is a vector of 2967 // pointers or vice-versa. Handle this using a two-step bitcast using an 2968 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2969 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2970 "Only one type should be a pointer type"); 2971 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2972 "Only one type should be a floating point type"); 2973 Type *IntTy = 2974 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2975 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 2976 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2977 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 2978 } 2979 2980 void InnerLoopVectorizer::emitMinimumIterationCountCheck(BasicBlock *Bypass) { 2981 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 2982 // Reuse existing vector loop preheader for TC checks. 2983 // Note that new preheader block is generated for vector loop. 2984 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 2985 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 2986 2987 // Generate code to check if the loop's trip count is less than VF * UF, or 2988 // equal to it in case a scalar epilogue is required; this implies that the 2989 // vector trip count is zero. This check also covers the case where adding one 2990 // to the backedge-taken count overflowed leading to an incorrect trip count 2991 // of zero. In this case we will also jump to the scalar loop. 2992 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 2993 : ICmpInst::ICMP_ULT; 2994 2995 // If tail is to be folded, vector loop takes care of all iterations. 2996 Value *CheckMinIters = Builder.getFalse(); 2997 if (!Cost->foldTailByMasking()) { 2998 Value *Step = createStepForVF(Builder, Count->getType(), VF, UF); 2999 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3000 } 3001 // Create new preheader for vector loop. 3002 LoopVectorPreHeader = 3003 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3004 "vector.ph"); 3005 3006 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3007 DT->getNode(Bypass)->getIDom()) && 3008 "TC check is expected to dominate Bypass"); 3009 3010 // Update dominator for Bypass & LoopExit (if needed). 3011 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3012 if (!Cost->requiresScalarEpilogue(VF)) 3013 // If there is an epilogue which must run, there's no edge from the 3014 // middle block to exit blocks and thus no need to update the immediate 3015 // dominator of the exit blocks. 3016 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3017 3018 ReplaceInstWithInst( 3019 TCCheckBlock->getTerminator(), 3020 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3021 LoopBypassBlocks.push_back(TCCheckBlock); 3022 } 3023 3024 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) { 3025 3026 BasicBlock *const SCEVCheckBlock = 3027 RTChecks.emitSCEVChecks(Bypass, LoopVectorPreHeader, LoopExitBlock); 3028 if (!SCEVCheckBlock) 3029 return nullptr; 3030 3031 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3032 (OptForSizeBasedOnProfile && 3033 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3034 "Cannot SCEV check stride or overflow when optimizing for size"); 3035 3036 3037 // Update dominator only if this is first RT check. 3038 if (LoopBypassBlocks.empty()) { 3039 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3040 if (!Cost->requiresScalarEpilogue(VF)) 3041 // If there is an epilogue which must run, there's no edge from the 3042 // middle block to exit blocks and thus no need to update the immediate 3043 // dominator of the exit blocks. 3044 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3045 } 3046 3047 LoopBypassBlocks.push_back(SCEVCheckBlock); 3048 AddedSafetyChecks = true; 3049 return SCEVCheckBlock; 3050 } 3051 3052 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) { 3053 // VPlan-native path does not do any analysis for runtime checks currently. 3054 if (EnableVPlanNativePath) 3055 return nullptr; 3056 3057 BasicBlock *const MemCheckBlock = 3058 RTChecks.emitMemRuntimeChecks(Bypass, LoopVectorPreHeader); 3059 3060 // Check if we generated code that checks in runtime if arrays overlap. We put 3061 // the checks into a separate block to make the more common case of few 3062 // elements faster. 3063 if (!MemCheckBlock) 3064 return nullptr; 3065 3066 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3067 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3068 "Cannot emit memory checks when optimizing for size, unless forced " 3069 "to vectorize."); 3070 ORE->emit([&]() { 3071 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3072 OrigLoop->getStartLoc(), 3073 OrigLoop->getHeader()) 3074 << "Code-size may be reduced by not forcing " 3075 "vectorization, or by source-code modifications " 3076 "eliminating the need for runtime checks " 3077 "(e.g., adding 'restrict')."; 3078 }); 3079 } 3080 3081 LoopBypassBlocks.push_back(MemCheckBlock); 3082 3083 AddedSafetyChecks = true; 3084 3085 // We currently don't use LoopVersioning for the actual loop cloning but we 3086 // still use it to add the noalias metadata. 3087 LVer = std::make_unique<LoopVersioning>( 3088 *Legal->getLAI(), 3089 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3090 DT, PSE.getSE()); 3091 LVer->prepareNoAliasMetadata(); 3092 return MemCheckBlock; 3093 } 3094 3095 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3096 LoopScalarBody = OrigLoop->getHeader(); 3097 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3098 assert(LoopVectorPreHeader && "Invalid loop structure"); 3099 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3100 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3101 "multiple exit loop without required epilogue?"); 3102 3103 LoopMiddleBlock = 3104 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3105 LI, nullptr, Twine(Prefix) + "middle.block"); 3106 LoopScalarPreHeader = 3107 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3108 nullptr, Twine(Prefix) + "scalar.ph"); 3109 3110 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3111 3112 // Set up the middle block terminator. Two cases: 3113 // 1) If we know that we must execute the scalar epilogue, emit an 3114 // unconditional branch. 3115 // 2) Otherwise, we must have a single unique exit block (due to how we 3116 // implement the multiple exit case). In this case, set up a conditonal 3117 // branch from the middle block to the loop scalar preheader, and the 3118 // exit block. completeLoopSkeleton will update the condition to use an 3119 // iteration check, if required to decide whether to execute the remainder. 3120 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3121 BranchInst::Create(LoopScalarPreHeader) : 3122 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3123 Builder.getTrue()); 3124 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3125 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3126 3127 // We intentionally don't let SplitBlock to update LoopInfo since 3128 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3129 // LoopVectorBody is explicitly added to the correct place few lines later. 3130 BasicBlock *LoopVectorBody = 3131 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3132 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3133 3134 // Update dominator for loop exit. 3135 if (!Cost->requiresScalarEpilogue(VF)) 3136 // If there is an epilogue which must run, there's no edge from the 3137 // middle block to exit blocks and thus no need to update the immediate 3138 // dominator of the exit blocks. 3139 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3140 3141 // Create and register the new vector loop. 3142 Loop *Lp = LI->AllocateLoop(); 3143 Loop *ParentLoop = OrigLoop->getParentLoop(); 3144 3145 // Insert the new loop into the loop nest and register the new basic blocks 3146 // before calling any utilities such as SCEV that require valid LoopInfo. 3147 if (ParentLoop) { 3148 ParentLoop->addChildLoop(Lp); 3149 } else { 3150 LI->addTopLevelLoop(Lp); 3151 } 3152 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3153 return Lp; 3154 } 3155 3156 void InnerLoopVectorizer::createInductionResumeValues( 3157 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3158 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3159 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3160 "Inconsistent information about additional bypass."); 3161 3162 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3163 assert(VectorTripCount && "Expected valid arguments"); 3164 // We are going to resume the execution of the scalar loop. 3165 // Go over all of the induction variables that we found and fix the 3166 // PHIs that are left in the scalar version of the loop. 3167 // The starting values of PHI nodes depend on the counter of the last 3168 // iteration in the vectorized loop. 3169 // If we come from a bypass edge then we need to start from the original 3170 // start value. 3171 Instruction *OldInduction = Legal->getPrimaryInduction(); 3172 for (auto &InductionEntry : Legal->getInductionVars()) { 3173 PHINode *OrigPhi = InductionEntry.first; 3174 InductionDescriptor II = InductionEntry.second; 3175 3176 // Create phi nodes to merge from the backedge-taken check block. 3177 PHINode *BCResumeVal = 3178 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3179 LoopScalarPreHeader->getTerminator()); 3180 // Copy original phi DL over to the new one. 3181 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3182 Value *&EndValue = IVEndValues[OrigPhi]; 3183 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3184 if (OrigPhi == OldInduction) { 3185 // We know what the end value is. 3186 EndValue = VectorTripCount; 3187 } else { 3188 IRBuilder<> B(LoopVectorPreHeader->getTerminator()); 3189 3190 // Fast-math-flags propagate from the original induction instruction. 3191 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3192 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3193 3194 Type *StepType = II.getStep()->getType(); 3195 Instruction::CastOps CastOp = 3196 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3197 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3198 Value *Step = 3199 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3200 EndValue = emitTransformedIndex(B, CRD, II.getStartValue(), Step, II); 3201 EndValue->setName("ind.end"); 3202 3203 // Compute the end value for the additional bypass (if applicable). 3204 if (AdditionalBypass.first) { 3205 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3206 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3207 StepType, true); 3208 Value *Step = 3209 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3210 CRD = 3211 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3212 EndValueFromAdditionalBypass = 3213 emitTransformedIndex(B, CRD, II.getStartValue(), Step, II); 3214 EndValueFromAdditionalBypass->setName("ind.end"); 3215 } 3216 } 3217 // The new PHI merges the original incoming value, in case of a bypass, 3218 // or the value at the end of the vectorized loop. 3219 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3220 3221 // Fix the scalar body counter (PHI node). 3222 // The old induction's phi node in the scalar body needs the truncated 3223 // value. 3224 for (BasicBlock *BB : LoopBypassBlocks) 3225 BCResumeVal->addIncoming(II.getStartValue(), BB); 3226 3227 if (AdditionalBypass.first) 3228 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3229 EndValueFromAdditionalBypass); 3230 3231 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3232 } 3233 } 3234 3235 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(MDNode *OrigLoopID) { 3236 // The trip counts should be cached by now. 3237 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 3238 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3239 3240 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3241 3242 // Add a check in the middle block to see if we have completed 3243 // all of the iterations in the first vector loop. Three cases: 3244 // 1) If we require a scalar epilogue, there is no conditional branch as 3245 // we unconditionally branch to the scalar preheader. Do nothing. 3246 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3247 // Thus if tail is to be folded, we know we don't need to run the 3248 // remainder and we can use the previous value for the condition (true). 3249 // 3) Otherwise, construct a runtime check. 3250 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3251 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3252 Count, VectorTripCount, "cmp.n", 3253 LoopMiddleBlock->getTerminator()); 3254 3255 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3256 // of the corresponding compare because they may have ended up with 3257 // different line numbers and we want to avoid awkward line stepping while 3258 // debugging. Eg. if the compare has got a line number inside the loop. 3259 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3260 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3261 } 3262 3263 #ifdef EXPENSIVE_CHECKS 3264 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3265 LI->verify(*DT); 3266 #endif 3267 3268 return LoopVectorPreHeader; 3269 } 3270 3271 std::pair<BasicBlock *, Value *> 3272 InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3273 /* 3274 In this function we generate a new loop. The new loop will contain 3275 the vectorized instructions while the old loop will continue to run the 3276 scalar remainder. 3277 3278 [ ] <-- loop iteration number check. 3279 / | 3280 / v 3281 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3282 | / | 3283 | / v 3284 || [ ] <-- vector pre header. 3285 |/ | 3286 | v 3287 | [ ] \ 3288 | [ ]_| <-- vector loop. 3289 | | 3290 | v 3291 \ -[ ] <--- middle-block. 3292 \/ | 3293 /\ v 3294 | ->[ ] <--- new preheader. 3295 | | 3296 (opt) v <-- edge from middle to exit iff epilogue is not required. 3297 | [ ] \ 3298 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3299 \ | 3300 \ v 3301 >[ ] <-- exit block(s). 3302 ... 3303 */ 3304 3305 // Get the metadata of the original loop before it gets modified. 3306 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3307 3308 // Workaround! Compute the trip count of the original loop and cache it 3309 // before we start modifying the CFG. This code has a systemic problem 3310 // wherein it tries to run analysis over partially constructed IR; this is 3311 // wrong, and not simply for SCEV. The trip count of the original loop 3312 // simply happens to be prone to hitting this in practice. In theory, we 3313 // can hit the same issue for any SCEV, or ValueTracking query done during 3314 // mutation. See PR49900. 3315 getOrCreateTripCount(OrigLoop->getLoopPreheader()); 3316 3317 // Create an empty vector loop, and prepare basic blocks for the runtime 3318 // checks. 3319 Loop *Lp = createVectorLoopSkeleton(""); 3320 3321 // Now, compare the new count to zero. If it is zero skip the vector loop and 3322 // jump to the scalar loop. This check also covers the case where the 3323 // backedge-taken count is uint##_max: adding one to it will overflow leading 3324 // to an incorrect trip count of zero. In this (rare) case we will also jump 3325 // to the scalar loop. 3326 emitMinimumIterationCountCheck(LoopScalarPreHeader); 3327 3328 // Generate the code to check any assumptions that we've made for SCEV 3329 // expressions. 3330 emitSCEVChecks(LoopScalarPreHeader); 3331 3332 // Generate the code that checks in runtime if arrays overlap. We put the 3333 // checks into a separate block to make the more common case of few elements 3334 // faster. 3335 emitMemRuntimeChecks(LoopScalarPreHeader); 3336 3337 createHeaderBranch(Lp); 3338 3339 // Emit phis for the new starting index of the scalar loop. 3340 createInductionResumeValues(); 3341 3342 return {completeLoopSkeleton(OrigLoopID), nullptr}; 3343 } 3344 3345 // Fix up external users of the induction variable. At this point, we are 3346 // in LCSSA form, with all external PHIs that use the IV having one input value, 3347 // coming from the remainder loop. We need those PHIs to also have a correct 3348 // value for the IV when arriving directly from the middle block. 3349 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3350 const InductionDescriptor &II, 3351 Value *CountRoundDown, Value *EndValue, 3352 BasicBlock *MiddleBlock, 3353 BasicBlock *VectorHeader) { 3354 // There are two kinds of external IV usages - those that use the value 3355 // computed in the last iteration (the PHI) and those that use the penultimate 3356 // value (the value that feeds into the phi from the loop latch). 3357 // We allow both, but they, obviously, have different values. 3358 3359 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3360 3361 DenseMap<Value *, Value *> MissingVals; 3362 3363 // An external user of the last iteration's value should see the value that 3364 // the remainder loop uses to initialize its own IV. 3365 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3366 for (User *U : PostInc->users()) { 3367 Instruction *UI = cast<Instruction>(U); 3368 if (!OrigLoop->contains(UI)) { 3369 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3370 MissingVals[UI] = EndValue; 3371 } 3372 } 3373 3374 // An external user of the penultimate value need to see EndValue - Step. 3375 // The simplest way to get this is to recompute it from the constituent SCEVs, 3376 // that is Start + (Step * (CRD - 1)). 3377 for (User *U : OrigPhi->users()) { 3378 auto *UI = cast<Instruction>(U); 3379 if (!OrigLoop->contains(UI)) { 3380 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3381 3382 IRBuilder<> B(MiddleBlock->getTerminator()); 3383 3384 // Fast-math-flags propagate from the original induction instruction. 3385 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3386 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3387 3388 Value *CountMinusOne = B.CreateSub( 3389 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3390 Value *CMO = 3391 !II.getStep()->getType()->isIntegerTy() 3392 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3393 II.getStep()->getType()) 3394 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3395 CMO->setName("cast.cmo"); 3396 3397 Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(), 3398 VectorHeader->getTerminator()); 3399 Value *Escape = 3400 emitTransformedIndex(B, CMO, II.getStartValue(), Step, II); 3401 Escape->setName("ind.escape"); 3402 MissingVals[UI] = Escape; 3403 } 3404 } 3405 3406 for (auto &I : MissingVals) { 3407 PHINode *PHI = cast<PHINode>(I.first); 3408 // One corner case we have to handle is two IVs "chasing" each-other, 3409 // that is %IV2 = phi [...], [ %IV1, %latch ] 3410 // In this case, if IV1 has an external use, we need to avoid adding both 3411 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3412 // don't already have an incoming value for the middle block. 3413 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3414 PHI->addIncoming(I.second, MiddleBlock); 3415 } 3416 } 3417 3418 namespace { 3419 3420 struct CSEDenseMapInfo { 3421 static bool canHandle(const Instruction *I) { 3422 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3423 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3424 } 3425 3426 static inline Instruction *getEmptyKey() { 3427 return DenseMapInfo<Instruction *>::getEmptyKey(); 3428 } 3429 3430 static inline Instruction *getTombstoneKey() { 3431 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3432 } 3433 3434 static unsigned getHashValue(const Instruction *I) { 3435 assert(canHandle(I) && "Unknown instruction!"); 3436 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3437 I->value_op_end())); 3438 } 3439 3440 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3441 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3442 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3443 return LHS == RHS; 3444 return LHS->isIdenticalTo(RHS); 3445 } 3446 }; 3447 3448 } // end anonymous namespace 3449 3450 ///Perform cse of induction variable instructions. 3451 static void cse(BasicBlock *BB) { 3452 // Perform simple cse. 3453 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3454 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3455 if (!CSEDenseMapInfo::canHandle(&In)) 3456 continue; 3457 3458 // Check if we can replace this instruction with any of the 3459 // visited instructions. 3460 if (Instruction *V = CSEMap.lookup(&In)) { 3461 In.replaceAllUsesWith(V); 3462 In.eraseFromParent(); 3463 continue; 3464 } 3465 3466 CSEMap[&In] = &In; 3467 } 3468 } 3469 3470 InstructionCost 3471 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3472 bool &NeedToScalarize) const { 3473 Function *F = CI->getCalledFunction(); 3474 Type *ScalarRetTy = CI->getType(); 3475 SmallVector<Type *, 4> Tys, ScalarTys; 3476 for (auto &ArgOp : CI->args()) 3477 ScalarTys.push_back(ArgOp->getType()); 3478 3479 // Estimate cost of scalarized vector call. The source operands are assumed 3480 // to be vectors, so we need to extract individual elements from there, 3481 // execute VF scalar calls, and then gather the result into the vector return 3482 // value. 3483 InstructionCost ScalarCallCost = 3484 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3485 if (VF.isScalar()) 3486 return ScalarCallCost; 3487 3488 // Compute corresponding vector type for return value and arguments. 3489 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3490 for (Type *ScalarTy : ScalarTys) 3491 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3492 3493 // Compute costs of unpacking argument values for the scalar calls and 3494 // packing the return values to a vector. 3495 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3496 3497 InstructionCost Cost = 3498 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3499 3500 // If we can't emit a vector call for this function, then the currently found 3501 // cost is the cost we need to return. 3502 NeedToScalarize = true; 3503 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3504 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3505 3506 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3507 return Cost; 3508 3509 // If the corresponding vector cost is cheaper, return its cost. 3510 InstructionCost VectorCallCost = 3511 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3512 if (VectorCallCost < Cost) { 3513 NeedToScalarize = false; 3514 Cost = VectorCallCost; 3515 } 3516 return Cost; 3517 } 3518 3519 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3520 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3521 return Elt; 3522 return VectorType::get(Elt, VF); 3523 } 3524 3525 InstructionCost 3526 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3527 ElementCount VF) const { 3528 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3529 assert(ID && "Expected intrinsic call!"); 3530 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3531 FastMathFlags FMF; 3532 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3533 FMF = FPMO->getFastMathFlags(); 3534 3535 SmallVector<const Value *> Arguments(CI->args()); 3536 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3537 SmallVector<Type *> ParamTys; 3538 std::transform(FTy->param_begin(), FTy->param_end(), 3539 std::back_inserter(ParamTys), 3540 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3541 3542 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3543 dyn_cast<IntrinsicInst>(CI)); 3544 return TTI.getIntrinsicInstrCost(CostAttrs, 3545 TargetTransformInfo::TCK_RecipThroughput); 3546 } 3547 3548 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3549 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3550 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3551 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3552 } 3553 3554 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3555 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3556 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3557 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3558 } 3559 3560 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3561 // For every instruction `I` in MinBWs, truncate the operands, create a 3562 // truncated version of `I` and reextend its result. InstCombine runs 3563 // later and will remove any ext/trunc pairs. 3564 SmallPtrSet<Value *, 4> Erased; 3565 for (const auto &KV : Cost->getMinimalBitwidths()) { 3566 // If the value wasn't vectorized, we must maintain the original scalar 3567 // type. The absence of the value from State indicates that it 3568 // wasn't vectorized. 3569 // FIXME: Should not rely on getVPValue at this point. 3570 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3571 if (!State.hasAnyVectorValue(Def)) 3572 continue; 3573 for (unsigned Part = 0; Part < UF; ++Part) { 3574 Value *I = State.get(Def, Part); 3575 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3576 continue; 3577 Type *OriginalTy = I->getType(); 3578 Type *ScalarTruncatedTy = 3579 IntegerType::get(OriginalTy->getContext(), KV.second); 3580 auto *TruncatedTy = VectorType::get( 3581 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3582 if (TruncatedTy == OriginalTy) 3583 continue; 3584 3585 IRBuilder<> B(cast<Instruction>(I)); 3586 auto ShrinkOperand = [&](Value *V) -> Value * { 3587 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3588 if (ZI->getSrcTy() == TruncatedTy) 3589 return ZI->getOperand(0); 3590 return B.CreateZExtOrTrunc(V, TruncatedTy); 3591 }; 3592 3593 // The actual instruction modification depends on the instruction type, 3594 // unfortunately. 3595 Value *NewI = nullptr; 3596 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3597 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3598 ShrinkOperand(BO->getOperand(1))); 3599 3600 // Any wrapping introduced by shrinking this operation shouldn't be 3601 // considered undefined behavior. So, we can't unconditionally copy 3602 // arithmetic wrapping flags to NewI. 3603 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3604 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3605 NewI = 3606 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3607 ShrinkOperand(CI->getOperand(1))); 3608 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3609 NewI = B.CreateSelect(SI->getCondition(), 3610 ShrinkOperand(SI->getTrueValue()), 3611 ShrinkOperand(SI->getFalseValue())); 3612 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3613 switch (CI->getOpcode()) { 3614 default: 3615 llvm_unreachable("Unhandled cast!"); 3616 case Instruction::Trunc: 3617 NewI = ShrinkOperand(CI->getOperand(0)); 3618 break; 3619 case Instruction::SExt: 3620 NewI = B.CreateSExtOrTrunc( 3621 CI->getOperand(0), 3622 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3623 break; 3624 case Instruction::ZExt: 3625 NewI = B.CreateZExtOrTrunc( 3626 CI->getOperand(0), 3627 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3628 break; 3629 } 3630 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3631 auto Elements0 = 3632 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 3633 auto *O0 = B.CreateZExtOrTrunc( 3634 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3635 auto Elements1 = 3636 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 3637 auto *O1 = B.CreateZExtOrTrunc( 3638 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3639 3640 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3641 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3642 // Don't do anything with the operands, just extend the result. 3643 continue; 3644 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3645 auto Elements = 3646 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 3647 auto *O0 = B.CreateZExtOrTrunc( 3648 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3649 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3650 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3651 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3652 auto Elements = 3653 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 3654 auto *O0 = B.CreateZExtOrTrunc( 3655 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3656 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3657 } else { 3658 // If we don't know what to do, be conservative and don't do anything. 3659 continue; 3660 } 3661 3662 // Lastly, extend the result. 3663 NewI->takeName(cast<Instruction>(I)); 3664 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3665 I->replaceAllUsesWith(Res); 3666 cast<Instruction>(I)->eraseFromParent(); 3667 Erased.insert(I); 3668 State.reset(Def, Res, Part); 3669 } 3670 } 3671 3672 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3673 for (const auto &KV : Cost->getMinimalBitwidths()) { 3674 // If the value wasn't vectorized, we must maintain the original scalar 3675 // type. The absence of the value from State indicates that it 3676 // wasn't vectorized. 3677 // FIXME: Should not rely on getVPValue at this point. 3678 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3679 if (!State.hasAnyVectorValue(Def)) 3680 continue; 3681 for (unsigned Part = 0; Part < UF; ++Part) { 3682 Value *I = State.get(Def, Part); 3683 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3684 if (Inst && Inst->use_empty()) { 3685 Value *NewI = Inst->getOperand(0); 3686 Inst->eraseFromParent(); 3687 State.reset(Def, NewI, Part); 3688 } 3689 } 3690 } 3691 } 3692 3693 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 3694 // Insert truncates and extends for any truncated instructions as hints to 3695 // InstCombine. 3696 if (VF.isVector()) 3697 truncateToMinimalBitwidths(State); 3698 3699 // Fix widened non-induction PHIs by setting up the PHI operands. 3700 if (OrigPHIsToFix.size()) { 3701 assert(EnableVPlanNativePath && 3702 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3703 fixNonInductionPHIs(State); 3704 } 3705 3706 // At this point every instruction in the original loop is widened to a 3707 // vector form. Now we need to fix the recurrences in the loop. These PHI 3708 // nodes are currently empty because we did not want to introduce cycles. 3709 // This is the second stage of vectorizing recurrences. 3710 fixCrossIterationPHIs(State); 3711 3712 // Forget the original basic block. 3713 PSE.getSE()->forgetLoop(OrigLoop); 3714 3715 Loop *VectorLoop = LI->getLoopFor(State.CFG.PrevBB); 3716 // If we inserted an edge from the middle block to the unique exit block, 3717 // update uses outside the loop (phis) to account for the newly inserted 3718 // edge. 3719 if (!Cost->requiresScalarEpilogue(VF)) { 3720 // Fix-up external users of the induction variables. 3721 for (auto &Entry : Legal->getInductionVars()) 3722 fixupIVUsers(Entry.first, Entry.second, 3723 getOrCreateVectorTripCount(VectorLoop->getLoopPreheader()), 3724 IVEndValues[Entry.first], LoopMiddleBlock, 3725 VectorLoop->getHeader()); 3726 3727 fixLCSSAPHIs(State); 3728 } 3729 3730 for (Instruction *PI : PredicatedInstructions) 3731 sinkScalarOperands(&*PI); 3732 3733 // Remove redundant induction instructions. 3734 cse(VectorLoop->getHeader()); 3735 3736 // Set/update profile weights for the vector and remainder loops as original 3737 // loop iterations are now distributed among them. Note that original loop 3738 // represented by LoopScalarBody becomes remainder loop after vectorization. 3739 // 3740 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3741 // end up getting slightly roughened result but that should be OK since 3742 // profile is not inherently precise anyway. Note also possible bypass of 3743 // vector code caused by legality checks is ignored, assigning all the weight 3744 // to the vector loop, optimistically. 3745 // 3746 // For scalable vectorization we can't know at compile time how many iterations 3747 // of the loop are handled in one vector iteration, so instead assume a pessimistic 3748 // vscale of '1'. 3749 setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), VectorLoop, 3750 LI->getLoopFor(LoopScalarBody), 3751 VF.getKnownMinValue() * UF); 3752 } 3753 3754 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 3755 // In order to support recurrences we need to be able to vectorize Phi nodes. 3756 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3757 // stage #2: We now need to fix the recurrences by adding incoming edges to 3758 // the currently empty PHI nodes. At this point every instruction in the 3759 // original loop is widened to a vector form so we can use them to construct 3760 // the incoming edges. 3761 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); 3762 for (VPRecipeBase &R : Header->phis()) { 3763 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 3764 fixReduction(ReductionPhi, State); 3765 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 3766 fixFirstOrderRecurrence(FOR, State); 3767 } 3768 } 3769 3770 void InnerLoopVectorizer::fixFirstOrderRecurrence( 3771 VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) { 3772 // This is the second phase of vectorizing first-order recurrences. An 3773 // overview of the transformation is described below. Suppose we have the 3774 // following loop. 3775 // 3776 // for (int i = 0; i < n; ++i) 3777 // b[i] = a[i] - a[i - 1]; 3778 // 3779 // There is a first-order recurrence on "a". For this loop, the shorthand 3780 // scalar IR looks like: 3781 // 3782 // scalar.ph: 3783 // s_init = a[-1] 3784 // br scalar.body 3785 // 3786 // scalar.body: 3787 // i = phi [0, scalar.ph], [i+1, scalar.body] 3788 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3789 // s2 = a[i] 3790 // b[i] = s2 - s1 3791 // br cond, scalar.body, ... 3792 // 3793 // In this example, s1 is a recurrence because it's value depends on the 3794 // previous iteration. In the first phase of vectorization, we created a 3795 // vector phi v1 for s1. We now complete the vectorization and produce the 3796 // shorthand vector IR shown below (for VF = 4, UF = 1). 3797 // 3798 // vector.ph: 3799 // v_init = vector(..., ..., ..., a[-1]) 3800 // br vector.body 3801 // 3802 // vector.body 3803 // i = phi [0, vector.ph], [i+4, vector.body] 3804 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3805 // v2 = a[i, i+1, i+2, i+3]; 3806 // v3 = vector(v1(3), v2(0, 1, 2)) 3807 // b[i, i+1, i+2, i+3] = v2 - v3 3808 // br cond, vector.body, middle.block 3809 // 3810 // middle.block: 3811 // x = v2(3) 3812 // br scalar.ph 3813 // 3814 // scalar.ph: 3815 // s_init = phi [x, middle.block], [a[-1], otherwise] 3816 // br scalar.body 3817 // 3818 // After execution completes the vector loop, we extract the next value of 3819 // the recurrence (x) to use as the initial value in the scalar loop. 3820 3821 // Extract the last vector element in the middle block. This will be the 3822 // initial value for the recurrence when jumping to the scalar loop. 3823 VPValue *PreviousDef = PhiR->getBackedgeValue(); 3824 Value *Incoming = State.get(PreviousDef, UF - 1); 3825 auto *ExtractForScalar = Incoming; 3826 auto *IdxTy = Builder.getInt32Ty(); 3827 if (VF.isVector()) { 3828 auto *One = ConstantInt::get(IdxTy, 1); 3829 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3830 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3831 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 3832 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 3833 "vector.recur.extract"); 3834 } 3835 // Extract the second last element in the middle block if the 3836 // Phi is used outside the loop. We need to extract the phi itself 3837 // and not the last element (the phi update in the current iteration). This 3838 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3839 // when the scalar loop is not run at all. 3840 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3841 if (VF.isVector()) { 3842 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3843 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 3844 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3845 Incoming, Idx, "vector.recur.extract.for.phi"); 3846 } else if (UF > 1) 3847 // When loop is unrolled without vectorizing, initialize 3848 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 3849 // of `Incoming`. This is analogous to the vectorized case above: extracting 3850 // the second last element when VF > 1. 3851 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 3852 3853 // Fix the initial value of the original recurrence in the scalar loop. 3854 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3855 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 3856 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3857 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 3858 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3859 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3860 Start->addIncoming(Incoming, BB); 3861 } 3862 3863 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 3864 Phi->setName("scalar.recur"); 3865 3866 // Finally, fix users of the recurrence outside the loop. The users will need 3867 // either the last value of the scalar recurrence or the last value of the 3868 // vector recurrence we extracted in the middle block. Since the loop is in 3869 // LCSSA form, we just need to find all the phi nodes for the original scalar 3870 // recurrence in the exit block, and then add an edge for the middle block. 3871 // Note that LCSSA does not imply single entry when the original scalar loop 3872 // had multiple exiting edges (as we always run the last iteration in the 3873 // scalar epilogue); in that case, there is no edge from middle to exit and 3874 // and thus no phis which needed updated. 3875 if (!Cost->requiresScalarEpilogue(VF)) 3876 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 3877 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) 3878 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3879 } 3880 3881 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 3882 VPTransformState &State) { 3883 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 3884 // Get it's reduction variable descriptor. 3885 assert(Legal->isReductionVariable(OrigPhi) && 3886 "Unable to find the reduction variable"); 3887 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 3888 3889 RecurKind RK = RdxDesc.getRecurrenceKind(); 3890 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3891 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3892 setDebugLocFromInst(ReductionStartValue); 3893 3894 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 3895 // This is the vector-clone of the value that leaves the loop. 3896 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 3897 3898 // Wrap flags are in general invalid after vectorization, clear them. 3899 clearReductionWrapFlags(RdxDesc, State); 3900 3901 // Before each round, move the insertion point right between 3902 // the PHIs and the values we are going to write. 3903 // This allows us to write both PHINodes and the extractelement 3904 // instructions. 3905 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3906 3907 setDebugLocFromInst(LoopExitInst); 3908 3909 Type *PhiTy = OrigPhi->getType(); 3910 BasicBlock *VectorLoopLatch = 3911 LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch(); 3912 // If tail is folded by masking, the vector value to leave the loop should be 3913 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 3914 // instead of the former. For an inloop reduction the reduction will already 3915 // be predicated, and does not need to be handled here. 3916 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 3917 for (unsigned Part = 0; Part < UF; ++Part) { 3918 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 3919 Value *Sel = nullptr; 3920 for (User *U : VecLoopExitInst->users()) { 3921 if (isa<SelectInst>(U)) { 3922 assert(!Sel && "Reduction exit feeding two selects"); 3923 Sel = U; 3924 } else 3925 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 3926 } 3927 assert(Sel && "Reduction exit feeds no select"); 3928 State.reset(LoopExitInstDef, Sel, Part); 3929 3930 // If the target can create a predicated operator for the reduction at no 3931 // extra cost in the loop (for example a predicated vadd), it can be 3932 // cheaper for the select to remain in the loop than be sunk out of it, 3933 // and so use the select value for the phi instead of the old 3934 // LoopExitValue. 3935 if (PreferPredicatedReductionSelect || 3936 TTI->preferPredicatedReductionSelect( 3937 RdxDesc.getOpcode(), PhiTy, 3938 TargetTransformInfo::ReductionFlags())) { 3939 auto *VecRdxPhi = 3940 cast<PHINode>(State.get(PhiR, Part)); 3941 VecRdxPhi->setIncomingValueForBlock(VectorLoopLatch, Sel); 3942 } 3943 } 3944 } 3945 3946 // If the vector reduction can be performed in a smaller type, we truncate 3947 // then extend the loop exit value to enable InstCombine to evaluate the 3948 // entire expression in the smaller type. 3949 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 3950 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 3951 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3952 Builder.SetInsertPoint(VectorLoopLatch->getTerminator()); 3953 VectorParts RdxParts(UF); 3954 for (unsigned Part = 0; Part < UF; ++Part) { 3955 RdxParts[Part] = State.get(LoopExitInstDef, Part); 3956 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3957 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3958 : Builder.CreateZExt(Trunc, VecTy); 3959 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 3960 if (U != Trunc) { 3961 U->replaceUsesOfWith(RdxParts[Part], Extnd); 3962 RdxParts[Part] = Extnd; 3963 } 3964 } 3965 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3966 for (unsigned Part = 0; Part < UF; ++Part) { 3967 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3968 State.reset(LoopExitInstDef, RdxParts[Part], Part); 3969 } 3970 } 3971 3972 // Reduce all of the unrolled parts into a single vector. 3973 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 3974 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 3975 3976 // The middle block terminator has already been assigned a DebugLoc here (the 3977 // OrigLoop's single latch terminator). We want the whole middle block to 3978 // appear to execute on this line because: (a) it is all compiler generated, 3979 // (b) these instructions are always executed after evaluating the latch 3980 // conditional branch, and (c) other passes may add new predecessors which 3981 // terminate on this line. This is the easiest way to ensure we don't 3982 // accidentally cause an extra step back into the loop while debugging. 3983 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 3984 if (PhiR->isOrdered()) 3985 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 3986 else { 3987 // Floating-point operations should have some FMF to enable the reduction. 3988 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 3989 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 3990 for (unsigned Part = 1; Part < UF; ++Part) { 3991 Value *RdxPart = State.get(LoopExitInstDef, Part); 3992 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 3993 ReducedPartRdx = Builder.CreateBinOp( 3994 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 3995 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 3996 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 3997 ReducedPartRdx, RdxPart); 3998 else 3999 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4000 } 4001 } 4002 4003 // Create the reduction after the loop. Note that inloop reductions create the 4004 // target reduction in the loop using a Reduction recipe. 4005 if (VF.isVector() && !PhiR->isInLoop()) { 4006 ReducedPartRdx = 4007 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 4008 // If the reduction can be performed in a smaller type, we need to extend 4009 // the reduction to the wider type before we branch to the original loop. 4010 if (PhiTy != RdxDesc.getRecurrenceType()) 4011 ReducedPartRdx = RdxDesc.isSigned() 4012 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4013 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4014 } 4015 4016 PHINode *ResumePhi = 4017 dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue()); 4018 4019 // Create a phi node that merges control-flow from the backedge-taken check 4020 // block and the middle block. 4021 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4022 LoopScalarPreHeader->getTerminator()); 4023 4024 // If we are fixing reductions in the epilogue loop then we should already 4025 // have created a bc.merge.rdx Phi after the main vector body. Ensure that 4026 // we carry over the incoming values correctly. 4027 for (auto *Incoming : predecessors(LoopScalarPreHeader)) { 4028 if (Incoming == LoopMiddleBlock) 4029 BCBlockPhi->addIncoming(ReducedPartRdx, Incoming); 4030 else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming)) 4031 BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming), 4032 Incoming); 4033 else 4034 BCBlockPhi->addIncoming(ReductionStartValue, Incoming); 4035 } 4036 4037 // Set the resume value for this reduction 4038 ReductionResumeValues.insert({&RdxDesc, BCBlockPhi}); 4039 4040 // Now, we need to fix the users of the reduction variable 4041 // inside and outside of the scalar remainder loop. 4042 4043 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4044 // in the exit blocks. See comment on analogous loop in 4045 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4046 if (!Cost->requiresScalarEpilogue(VF)) 4047 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4048 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) 4049 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4050 4051 // Fix the scalar loop reduction variable with the incoming reduction sum 4052 // from the vector body and from the backedge value. 4053 int IncomingEdgeBlockIdx = 4054 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4055 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4056 // Pick the other block. 4057 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4058 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4059 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4060 } 4061 4062 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4063 VPTransformState &State) { 4064 RecurKind RK = RdxDesc.getRecurrenceKind(); 4065 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4066 return; 4067 4068 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4069 assert(LoopExitInstr && "null loop exit instruction"); 4070 SmallVector<Instruction *, 8> Worklist; 4071 SmallPtrSet<Instruction *, 8> Visited; 4072 Worklist.push_back(LoopExitInstr); 4073 Visited.insert(LoopExitInstr); 4074 4075 while (!Worklist.empty()) { 4076 Instruction *Cur = Worklist.pop_back_val(); 4077 if (isa<OverflowingBinaryOperator>(Cur)) 4078 for (unsigned Part = 0; Part < UF; ++Part) { 4079 // FIXME: Should not rely on getVPValue at this point. 4080 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4081 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4082 } 4083 4084 for (User *U : Cur->users()) { 4085 Instruction *UI = cast<Instruction>(U); 4086 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4087 Visited.insert(UI).second) 4088 Worklist.push_back(UI); 4089 } 4090 } 4091 } 4092 4093 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4094 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4095 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4096 // Some phis were already hand updated by the reduction and recurrence 4097 // code above, leave them alone. 4098 continue; 4099 4100 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4101 // Non-instruction incoming values will have only one value. 4102 4103 VPLane Lane = VPLane::getFirstLane(); 4104 if (isa<Instruction>(IncomingValue) && 4105 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4106 VF)) 4107 Lane = VPLane::getLastLaneForVF(VF); 4108 4109 // Can be a loop invariant incoming value or the last scalar value to be 4110 // extracted from the vectorized loop. 4111 // FIXME: Should not rely on getVPValue at this point. 4112 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4113 Value *lastIncomingValue = 4114 OrigLoop->isLoopInvariant(IncomingValue) 4115 ? IncomingValue 4116 : State.get(State.Plan->getVPValue(IncomingValue, true), 4117 VPIteration(UF - 1, Lane)); 4118 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4119 } 4120 } 4121 4122 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4123 // The basic block and loop containing the predicated instruction. 4124 auto *PredBB = PredInst->getParent(); 4125 auto *VectorLoop = LI->getLoopFor(PredBB); 4126 4127 // Initialize a worklist with the operands of the predicated instruction. 4128 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4129 4130 // Holds instructions that we need to analyze again. An instruction may be 4131 // reanalyzed if we don't yet know if we can sink it or not. 4132 SmallVector<Instruction *, 8> InstsToReanalyze; 4133 4134 // Returns true if a given use occurs in the predicated block. Phi nodes use 4135 // their operands in their corresponding predecessor blocks. 4136 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4137 auto *I = cast<Instruction>(U.getUser()); 4138 BasicBlock *BB = I->getParent(); 4139 if (auto *Phi = dyn_cast<PHINode>(I)) 4140 BB = Phi->getIncomingBlock( 4141 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4142 return BB == PredBB; 4143 }; 4144 4145 // Iteratively sink the scalarized operands of the predicated instruction 4146 // into the block we created for it. When an instruction is sunk, it's 4147 // operands are then added to the worklist. The algorithm ends after one pass 4148 // through the worklist doesn't sink a single instruction. 4149 bool Changed; 4150 do { 4151 // Add the instructions that need to be reanalyzed to the worklist, and 4152 // reset the changed indicator. 4153 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4154 InstsToReanalyze.clear(); 4155 Changed = false; 4156 4157 while (!Worklist.empty()) { 4158 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4159 4160 // We can't sink an instruction if it is a phi node, is not in the loop, 4161 // or may have side effects. 4162 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4163 I->mayHaveSideEffects()) 4164 continue; 4165 4166 // If the instruction is already in PredBB, check if we can sink its 4167 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4168 // sinking the scalar instruction I, hence it appears in PredBB; but it 4169 // may have failed to sink I's operands (recursively), which we try 4170 // (again) here. 4171 if (I->getParent() == PredBB) { 4172 Worklist.insert(I->op_begin(), I->op_end()); 4173 continue; 4174 } 4175 4176 // It's legal to sink the instruction if all its uses occur in the 4177 // predicated block. Otherwise, there's nothing to do yet, and we may 4178 // need to reanalyze the instruction. 4179 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4180 InstsToReanalyze.push_back(I); 4181 continue; 4182 } 4183 4184 // Move the instruction to the beginning of the predicated block, and add 4185 // it's operands to the worklist. 4186 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4187 Worklist.insert(I->op_begin(), I->op_end()); 4188 4189 // The sinking may have enabled other instructions to be sunk, so we will 4190 // need to iterate. 4191 Changed = true; 4192 } 4193 } while (Changed); 4194 } 4195 4196 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4197 for (PHINode *OrigPhi : OrigPHIsToFix) { 4198 VPWidenPHIRecipe *VPPhi = 4199 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4200 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4201 // Make sure the builder has a valid insert point. 4202 Builder.SetInsertPoint(NewPhi); 4203 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4204 VPValue *Inc = VPPhi->getIncomingValue(i); 4205 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4206 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4207 } 4208 } 4209 } 4210 4211 bool InnerLoopVectorizer::useOrderedReductions( 4212 const RecurrenceDescriptor &RdxDesc) { 4213 return Cost->useOrderedReductions(RdxDesc); 4214 } 4215 4216 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4217 VPWidenPHIRecipe *PhiR, 4218 VPTransformState &State) { 4219 assert(EnableVPlanNativePath && 4220 "Non-native vplans are not expected to have VPWidenPHIRecipes."); 4221 // Currently we enter here in the VPlan-native path for non-induction 4222 // PHIs where all control flow is uniform. We simply widen these PHIs. 4223 // Create a vector phi with no operands - the vector phi operands will be 4224 // set at the end of vector code generation. 4225 Type *VecTy = (State.VF.isScalar()) 4226 ? PN->getType() 4227 : VectorType::get(PN->getType(), State.VF); 4228 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4229 State.set(PhiR, VecPhi, 0); 4230 OrigPHIsToFix.push_back(cast<PHINode>(PN)); 4231 } 4232 4233 /// A helper function for checking whether an integer division-related 4234 /// instruction may divide by zero (in which case it must be predicated if 4235 /// executed conditionally in the scalar code). 4236 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4237 /// Non-zero divisors that are non compile-time constants will not be 4238 /// converted into multiplication, so we will still end up scalarizing 4239 /// the division, but can do so w/o predication. 4240 static bool mayDivideByZero(Instruction &I) { 4241 assert((I.getOpcode() == Instruction::UDiv || 4242 I.getOpcode() == Instruction::SDiv || 4243 I.getOpcode() == Instruction::URem || 4244 I.getOpcode() == Instruction::SRem) && 4245 "Unexpected instruction"); 4246 Value *Divisor = I.getOperand(1); 4247 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4248 return !CInt || CInt->isZero(); 4249 } 4250 4251 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4252 VPUser &ArgOperands, 4253 VPTransformState &State) { 4254 assert(!isa<DbgInfoIntrinsic>(I) && 4255 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4256 setDebugLocFromInst(&I); 4257 4258 Module *M = I.getParent()->getParent()->getParent(); 4259 auto *CI = cast<CallInst>(&I); 4260 4261 SmallVector<Type *, 4> Tys; 4262 for (Value *ArgOperand : CI->args()) 4263 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4264 4265 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4266 4267 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4268 // version of the instruction. 4269 // Is it beneficial to perform intrinsic call compared to lib call? 4270 bool NeedToScalarize = false; 4271 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4272 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4273 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4274 assert((UseVectorIntrinsic || !NeedToScalarize) && 4275 "Instruction should be scalarized elsewhere."); 4276 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4277 "Either the intrinsic cost or vector call cost must be valid"); 4278 4279 for (unsigned Part = 0; Part < UF; ++Part) { 4280 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 4281 SmallVector<Value *, 4> Args; 4282 for (auto &I : enumerate(ArgOperands.operands())) { 4283 // Some intrinsics have a scalar argument - don't replace it with a 4284 // vector. 4285 Value *Arg; 4286 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4287 Arg = State.get(I.value(), Part); 4288 else { 4289 Arg = State.get(I.value(), VPIteration(0, 0)); 4290 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 4291 TysForDecl.push_back(Arg->getType()); 4292 } 4293 Args.push_back(Arg); 4294 } 4295 4296 Function *VectorF; 4297 if (UseVectorIntrinsic) { 4298 // Use vector version of the intrinsic. 4299 if (VF.isVector()) 4300 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4301 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4302 assert(VectorF && "Can't retrieve vector intrinsic."); 4303 } else { 4304 // Use vector version of the function call. 4305 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4306 #ifndef NDEBUG 4307 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4308 "Can't create vector function."); 4309 #endif 4310 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4311 } 4312 SmallVector<OperandBundleDef, 1> OpBundles; 4313 CI->getOperandBundlesAsDefs(OpBundles); 4314 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4315 4316 if (isa<FPMathOperator>(V)) 4317 V->copyFastMathFlags(CI); 4318 4319 State.set(Def, V, Part); 4320 addMetadata(V, &I); 4321 } 4322 } 4323 4324 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4325 // We should not collect Scalars more than once per VF. Right now, this 4326 // function is called from collectUniformsAndScalars(), which already does 4327 // this check. Collecting Scalars for VF=1 does not make any sense. 4328 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4329 "This function should not be visited twice for the same VF"); 4330 4331 // This avoids any chances of creating a REPLICATE recipe during planning 4332 // since that would result in generation of scalarized code during execution, 4333 // which is not supported for scalable vectors. 4334 if (VF.isScalable()) { 4335 Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4336 return; 4337 } 4338 4339 SmallSetVector<Instruction *, 8> Worklist; 4340 4341 // These sets are used to seed the analysis with pointers used by memory 4342 // accesses that will remain scalar. 4343 SmallSetVector<Instruction *, 8> ScalarPtrs; 4344 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4345 auto *Latch = TheLoop->getLoopLatch(); 4346 4347 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4348 // The pointer operands of loads and stores will be scalar as long as the 4349 // memory access is not a gather or scatter operation. The value operand of a 4350 // store will remain scalar if the store is scalarized. 4351 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4352 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4353 assert(WideningDecision != CM_Unknown && 4354 "Widening decision should be ready at this moment"); 4355 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4356 if (Ptr == Store->getValueOperand()) 4357 return WideningDecision == CM_Scalarize; 4358 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4359 "Ptr is neither a value or pointer operand"); 4360 return WideningDecision != CM_GatherScatter; 4361 }; 4362 4363 // A helper that returns true if the given value is a bitcast or 4364 // getelementptr instruction contained in the loop. 4365 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4366 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4367 isa<GetElementPtrInst>(V)) && 4368 !TheLoop->isLoopInvariant(V); 4369 }; 4370 4371 // A helper that evaluates a memory access's use of a pointer. If the use will 4372 // be a scalar use and the pointer is only used by memory accesses, we place 4373 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4374 // PossibleNonScalarPtrs. 4375 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4376 // We only care about bitcast and getelementptr instructions contained in 4377 // the loop. 4378 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4379 return; 4380 4381 // If the pointer has already been identified as scalar (e.g., if it was 4382 // also identified as uniform), there's nothing to do. 4383 auto *I = cast<Instruction>(Ptr); 4384 if (Worklist.count(I)) 4385 return; 4386 4387 // If the use of the pointer will be a scalar use, and all users of the 4388 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4389 // place the pointer in PossibleNonScalarPtrs. 4390 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4391 return isa<LoadInst>(U) || isa<StoreInst>(U); 4392 })) 4393 ScalarPtrs.insert(I); 4394 else 4395 PossibleNonScalarPtrs.insert(I); 4396 }; 4397 4398 // We seed the scalars analysis with three classes of instructions: (1) 4399 // instructions marked uniform-after-vectorization and (2) bitcast, 4400 // getelementptr and (pointer) phi instructions used by memory accesses 4401 // requiring a scalar use. 4402 // 4403 // (1) Add to the worklist all instructions that have been identified as 4404 // uniform-after-vectorization. 4405 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4406 4407 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4408 // memory accesses requiring a scalar use. The pointer operands of loads and 4409 // stores will be scalar as long as the memory accesses is not a gather or 4410 // scatter operation. The value operand of a store will remain scalar if the 4411 // store is scalarized. 4412 for (auto *BB : TheLoop->blocks()) 4413 for (auto &I : *BB) { 4414 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4415 evaluatePtrUse(Load, Load->getPointerOperand()); 4416 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4417 evaluatePtrUse(Store, Store->getPointerOperand()); 4418 evaluatePtrUse(Store, Store->getValueOperand()); 4419 } 4420 } 4421 for (auto *I : ScalarPtrs) 4422 if (!PossibleNonScalarPtrs.count(I)) { 4423 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4424 Worklist.insert(I); 4425 } 4426 4427 // Insert the forced scalars. 4428 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4429 // induction variable when the PHI user is scalarized. 4430 auto ForcedScalar = ForcedScalars.find(VF); 4431 if (ForcedScalar != ForcedScalars.end()) 4432 for (auto *I : ForcedScalar->second) 4433 Worklist.insert(I); 4434 4435 // Expand the worklist by looking through any bitcasts and getelementptr 4436 // instructions we've already identified as scalar. This is similar to the 4437 // expansion step in collectLoopUniforms(); however, here we're only 4438 // expanding to include additional bitcasts and getelementptr instructions. 4439 unsigned Idx = 0; 4440 while (Idx != Worklist.size()) { 4441 Instruction *Dst = Worklist[Idx++]; 4442 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4443 continue; 4444 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4445 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4446 auto *J = cast<Instruction>(U); 4447 return !TheLoop->contains(J) || Worklist.count(J) || 4448 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4449 isScalarUse(J, Src)); 4450 })) { 4451 Worklist.insert(Src); 4452 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4453 } 4454 } 4455 4456 // An induction variable will remain scalar if all users of the induction 4457 // variable and induction variable update remain scalar. 4458 for (auto &Induction : Legal->getInductionVars()) { 4459 auto *Ind = Induction.first; 4460 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4461 4462 // If tail-folding is applied, the primary induction variable will be used 4463 // to feed a vector compare. 4464 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 4465 continue; 4466 4467 // Returns true if \p Indvar is a pointer induction that is used directly by 4468 // load/store instruction \p I. 4469 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar, 4470 Instruction *I) { 4471 return Induction.second.getKind() == 4472 InductionDescriptor::IK_PtrInduction && 4473 (isa<LoadInst>(I) || isa<StoreInst>(I)) && 4474 Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar); 4475 }; 4476 4477 // Determine if all users of the induction variable are scalar after 4478 // vectorization. 4479 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4480 auto *I = cast<Instruction>(U); 4481 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4482 IsDirectLoadStoreFromPtrIndvar(Ind, I); 4483 }); 4484 if (!ScalarInd) 4485 continue; 4486 4487 // Determine if all users of the induction variable update instruction are 4488 // scalar after vectorization. 4489 auto ScalarIndUpdate = 4490 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4491 auto *I = cast<Instruction>(U); 4492 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4493 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I); 4494 }); 4495 if (!ScalarIndUpdate) 4496 continue; 4497 4498 // The induction variable and its update instruction will remain scalar. 4499 Worklist.insert(Ind); 4500 Worklist.insert(IndUpdate); 4501 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4502 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4503 << "\n"); 4504 } 4505 4506 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4507 } 4508 4509 bool LoopVectorizationCostModel::isScalarWithPredication( 4510 Instruction *I, ElementCount VF) const { 4511 if (!blockNeedsPredicationForAnyReason(I->getParent())) 4512 return false; 4513 switch(I->getOpcode()) { 4514 default: 4515 break; 4516 case Instruction::Load: 4517 case Instruction::Store: { 4518 if (!Legal->isMaskRequired(I)) 4519 return false; 4520 auto *Ptr = getLoadStorePointerOperand(I); 4521 auto *Ty = getLoadStoreType(I); 4522 Type *VTy = Ty; 4523 if (VF.isVector()) 4524 VTy = VectorType::get(Ty, VF); 4525 const Align Alignment = getLoadStoreAlignment(I); 4526 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4527 TTI.isLegalMaskedGather(VTy, Alignment)) 4528 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4529 TTI.isLegalMaskedScatter(VTy, Alignment)); 4530 } 4531 case Instruction::UDiv: 4532 case Instruction::SDiv: 4533 case Instruction::SRem: 4534 case Instruction::URem: 4535 return mayDivideByZero(*I); 4536 } 4537 return false; 4538 } 4539 4540 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 4541 Instruction *I, ElementCount VF) { 4542 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4543 assert(getWideningDecision(I, VF) == CM_Unknown && 4544 "Decision should not be set yet."); 4545 auto *Group = getInterleavedAccessGroup(I); 4546 assert(Group && "Must have a group."); 4547 4548 // If the instruction's allocated size doesn't equal it's type size, it 4549 // requires padding and will be scalarized. 4550 auto &DL = I->getModule()->getDataLayout(); 4551 auto *ScalarTy = getLoadStoreType(I); 4552 if (hasIrregularType(ScalarTy, DL)) 4553 return false; 4554 4555 // Check if masking is required. 4556 // A Group may need masking for one of two reasons: it resides in a block that 4557 // needs predication, or it was decided to use masking to deal with gaps 4558 // (either a gap at the end of a load-access that may result in a speculative 4559 // load, or any gaps in a store-access). 4560 bool PredicatedAccessRequiresMasking = 4561 blockNeedsPredicationForAnyReason(I->getParent()) && 4562 Legal->isMaskRequired(I); 4563 bool LoadAccessWithGapsRequiresEpilogMasking = 4564 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 4565 !isScalarEpilogueAllowed(); 4566 bool StoreAccessWithGapsRequiresMasking = 4567 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 4568 if (!PredicatedAccessRequiresMasking && 4569 !LoadAccessWithGapsRequiresEpilogMasking && 4570 !StoreAccessWithGapsRequiresMasking) 4571 return true; 4572 4573 // If masked interleaving is required, we expect that the user/target had 4574 // enabled it, because otherwise it either wouldn't have been created or 4575 // it should have been invalidated by the CostModel. 4576 assert(useMaskedInterleavedAccesses(TTI) && 4577 "Masked interleave-groups for predicated accesses are not enabled."); 4578 4579 if (Group->isReverse()) 4580 return false; 4581 4582 auto *Ty = getLoadStoreType(I); 4583 const Align Alignment = getLoadStoreAlignment(I); 4584 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 4585 : TTI.isLegalMaskedStore(Ty, Alignment); 4586 } 4587 4588 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 4589 Instruction *I, ElementCount VF) { 4590 // Get and ensure we have a valid memory instruction. 4591 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 4592 4593 auto *Ptr = getLoadStorePointerOperand(I); 4594 auto *ScalarTy = getLoadStoreType(I); 4595 4596 // In order to be widened, the pointer should be consecutive, first of all. 4597 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 4598 return false; 4599 4600 // If the instruction is a store located in a predicated block, it will be 4601 // scalarized. 4602 if (isScalarWithPredication(I, VF)) 4603 return false; 4604 4605 // If the instruction's allocated size doesn't equal it's type size, it 4606 // requires padding and will be scalarized. 4607 auto &DL = I->getModule()->getDataLayout(); 4608 if (hasIrregularType(ScalarTy, DL)) 4609 return false; 4610 4611 return true; 4612 } 4613 4614 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 4615 // We should not collect Uniforms more than once per VF. Right now, 4616 // this function is called from collectUniformsAndScalars(), which 4617 // already does this check. Collecting Uniforms for VF=1 does not make any 4618 // sense. 4619 4620 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 4621 "This function should not be visited twice for the same VF"); 4622 4623 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4624 // not analyze again. Uniforms.count(VF) will return 1. 4625 Uniforms[VF].clear(); 4626 4627 // We now know that the loop is vectorizable! 4628 // Collect instructions inside the loop that will remain uniform after 4629 // vectorization. 4630 4631 // Global values, params and instructions outside of current loop are out of 4632 // scope. 4633 auto isOutOfScope = [&](Value *V) -> bool { 4634 Instruction *I = dyn_cast<Instruction>(V); 4635 return (!I || !TheLoop->contains(I)); 4636 }; 4637 4638 // Worklist containing uniform instructions demanding lane 0. 4639 SetVector<Instruction *> Worklist; 4640 BasicBlock *Latch = TheLoop->getLoopLatch(); 4641 4642 // Add uniform instructions demanding lane 0 to the worklist. Instructions 4643 // that are scalar with predication must not be considered uniform after 4644 // vectorization, because that would create an erroneous replicating region 4645 // where only a single instance out of VF should be formed. 4646 // TODO: optimize such seldom cases if found important, see PR40816. 4647 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 4648 if (isOutOfScope(I)) { 4649 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 4650 << *I << "\n"); 4651 return; 4652 } 4653 if (isScalarWithPredication(I, VF)) { 4654 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 4655 << *I << "\n"); 4656 return; 4657 } 4658 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 4659 Worklist.insert(I); 4660 }; 4661 4662 // Start with the conditional branch. If the branch condition is an 4663 // instruction contained in the loop that is only used by the branch, it is 4664 // uniform. 4665 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4666 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 4667 addToWorklistIfAllowed(Cmp); 4668 4669 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 4670 InstWidening WideningDecision = getWideningDecision(I, VF); 4671 assert(WideningDecision != CM_Unknown && 4672 "Widening decision should be ready at this moment"); 4673 4674 // A uniform memory op is itself uniform. We exclude uniform stores 4675 // here as they demand the last lane, not the first one. 4676 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 4677 assert(WideningDecision == CM_Scalarize); 4678 return true; 4679 } 4680 4681 return (WideningDecision == CM_Widen || 4682 WideningDecision == CM_Widen_Reverse || 4683 WideningDecision == CM_Interleave); 4684 }; 4685 4686 4687 // Returns true if Ptr is the pointer operand of a memory access instruction 4688 // I, and I is known to not require scalarization. 4689 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4690 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4691 }; 4692 4693 // Holds a list of values which are known to have at least one uniform use. 4694 // Note that there may be other uses which aren't uniform. A "uniform use" 4695 // here is something which only demands lane 0 of the unrolled iterations; 4696 // it does not imply that all lanes produce the same value (e.g. this is not 4697 // the usual meaning of uniform) 4698 SetVector<Value *> HasUniformUse; 4699 4700 // Scan the loop for instructions which are either a) known to have only 4701 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 4702 for (auto *BB : TheLoop->blocks()) 4703 for (auto &I : *BB) { 4704 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 4705 switch (II->getIntrinsicID()) { 4706 case Intrinsic::sideeffect: 4707 case Intrinsic::experimental_noalias_scope_decl: 4708 case Intrinsic::assume: 4709 case Intrinsic::lifetime_start: 4710 case Intrinsic::lifetime_end: 4711 if (TheLoop->hasLoopInvariantOperands(&I)) 4712 addToWorklistIfAllowed(&I); 4713 break; 4714 default: 4715 break; 4716 } 4717 } 4718 4719 // ExtractValue instructions must be uniform, because the operands are 4720 // known to be loop-invariant. 4721 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 4722 assert(isOutOfScope(EVI->getAggregateOperand()) && 4723 "Expected aggregate value to be loop invariant"); 4724 addToWorklistIfAllowed(EVI); 4725 continue; 4726 } 4727 4728 // If there's no pointer operand, there's nothing to do. 4729 auto *Ptr = getLoadStorePointerOperand(&I); 4730 if (!Ptr) 4731 continue; 4732 4733 // A uniform memory op is itself uniform. We exclude uniform stores 4734 // here as they demand the last lane, not the first one. 4735 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 4736 addToWorklistIfAllowed(&I); 4737 4738 if (isUniformDecision(&I, VF)) { 4739 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 4740 HasUniformUse.insert(Ptr); 4741 } 4742 } 4743 4744 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 4745 // demanding) users. Since loops are assumed to be in LCSSA form, this 4746 // disallows uses outside the loop as well. 4747 for (auto *V : HasUniformUse) { 4748 if (isOutOfScope(V)) 4749 continue; 4750 auto *I = cast<Instruction>(V); 4751 auto UsersAreMemAccesses = 4752 llvm::all_of(I->users(), [&](User *U) -> bool { 4753 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 4754 }); 4755 if (UsersAreMemAccesses) 4756 addToWorklistIfAllowed(I); 4757 } 4758 4759 // Expand Worklist in topological order: whenever a new instruction 4760 // is added , its users should be already inside Worklist. It ensures 4761 // a uniform instruction will only be used by uniform instructions. 4762 unsigned idx = 0; 4763 while (idx != Worklist.size()) { 4764 Instruction *I = Worklist[idx++]; 4765 4766 for (auto OV : I->operand_values()) { 4767 // isOutOfScope operands cannot be uniform instructions. 4768 if (isOutOfScope(OV)) 4769 continue; 4770 // First order recurrence Phi's should typically be considered 4771 // non-uniform. 4772 auto *OP = dyn_cast<PHINode>(OV); 4773 if (OP && Legal->isFirstOrderRecurrence(OP)) 4774 continue; 4775 // If all the users of the operand are uniform, then add the 4776 // operand into the uniform worklist. 4777 auto *OI = cast<Instruction>(OV); 4778 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4779 auto *J = cast<Instruction>(U); 4780 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 4781 })) 4782 addToWorklistIfAllowed(OI); 4783 } 4784 } 4785 4786 // For an instruction to be added into Worklist above, all its users inside 4787 // the loop should also be in Worklist. However, this condition cannot be 4788 // true for phi nodes that form a cyclic dependence. We must process phi 4789 // nodes separately. An induction variable will remain uniform if all users 4790 // of the induction variable and induction variable update remain uniform. 4791 // The code below handles both pointer and non-pointer induction variables. 4792 for (auto &Induction : Legal->getInductionVars()) { 4793 auto *Ind = Induction.first; 4794 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4795 4796 // Determine if all users of the induction variable are uniform after 4797 // vectorization. 4798 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4799 auto *I = cast<Instruction>(U); 4800 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4801 isVectorizedMemAccessUse(I, Ind); 4802 }); 4803 if (!UniformInd) 4804 continue; 4805 4806 // Determine if all users of the induction variable update instruction are 4807 // uniform after vectorization. 4808 auto UniformIndUpdate = 4809 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4810 auto *I = cast<Instruction>(U); 4811 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4812 isVectorizedMemAccessUse(I, IndUpdate); 4813 }); 4814 if (!UniformIndUpdate) 4815 continue; 4816 4817 // The induction variable and its update instruction will remain uniform. 4818 addToWorklistIfAllowed(Ind); 4819 addToWorklistIfAllowed(IndUpdate); 4820 } 4821 4822 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4823 } 4824 4825 bool LoopVectorizationCostModel::runtimeChecksRequired() { 4826 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 4827 4828 if (Legal->getRuntimePointerChecking()->Need) { 4829 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 4830 "runtime pointer checks needed. Enable vectorization of this " 4831 "loop with '#pragma clang loop vectorize(enable)' when " 4832 "compiling with -Os/-Oz", 4833 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4834 return true; 4835 } 4836 4837 if (!PSE.getPredicate().isAlwaysTrue()) { 4838 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 4839 "runtime SCEV checks needed. Enable vectorization of this " 4840 "loop with '#pragma clang loop vectorize(enable)' when " 4841 "compiling with -Os/-Oz", 4842 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4843 return true; 4844 } 4845 4846 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4847 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4848 reportVectorizationFailure("Runtime stride check for small trip count", 4849 "runtime stride == 1 checks needed. Enable vectorization of " 4850 "this loop without such check by compiling with -Os/-Oz", 4851 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4852 return true; 4853 } 4854 4855 return false; 4856 } 4857 4858 ElementCount 4859 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 4860 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 4861 return ElementCount::getScalable(0); 4862 4863 if (Hints->isScalableVectorizationDisabled()) { 4864 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 4865 "ScalableVectorizationDisabled", ORE, TheLoop); 4866 return ElementCount::getScalable(0); 4867 } 4868 4869 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 4870 4871 auto MaxScalableVF = ElementCount::getScalable( 4872 std::numeric_limits<ElementCount::ScalarTy>::max()); 4873 4874 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 4875 // FIXME: While for scalable vectors this is currently sufficient, this should 4876 // be replaced by a more detailed mechanism that filters out specific VFs, 4877 // instead of invalidating vectorization for a whole set of VFs based on the 4878 // MaxVF. 4879 4880 // Disable scalable vectorization if the loop contains unsupported reductions. 4881 if (!canVectorizeReductions(MaxScalableVF)) { 4882 reportVectorizationInfo( 4883 "Scalable vectorization not supported for the reduction " 4884 "operations found in this loop.", 4885 "ScalableVFUnfeasible", ORE, TheLoop); 4886 return ElementCount::getScalable(0); 4887 } 4888 4889 // Disable scalable vectorization if the loop contains any instructions 4890 // with element types not supported for scalable vectors. 4891 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 4892 return !Ty->isVoidTy() && 4893 !this->TTI.isElementTypeLegalForScalableVector(Ty); 4894 })) { 4895 reportVectorizationInfo("Scalable vectorization is not supported " 4896 "for all element types found in this loop.", 4897 "ScalableVFUnfeasible", ORE, TheLoop); 4898 return ElementCount::getScalable(0); 4899 } 4900 4901 if (Legal->isSafeForAnyVectorWidth()) 4902 return MaxScalableVF; 4903 4904 // Limit MaxScalableVF by the maximum safe dependence distance. 4905 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 4906 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) 4907 MaxVScale = 4908 TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax(); 4909 MaxScalableVF = ElementCount::getScalable( 4910 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 4911 if (!MaxScalableVF) 4912 reportVectorizationInfo( 4913 "Max legal vector width too small, scalable vectorization " 4914 "unfeasible.", 4915 "ScalableVFUnfeasible", ORE, TheLoop); 4916 4917 return MaxScalableVF; 4918 } 4919 4920 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF( 4921 unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) { 4922 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4923 unsigned SmallestType, WidestType; 4924 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 4925 4926 // Get the maximum safe dependence distance in bits computed by LAA. 4927 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 4928 // the memory accesses that is most restrictive (involved in the smallest 4929 // dependence distance). 4930 unsigned MaxSafeElements = 4931 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 4932 4933 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 4934 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 4935 4936 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 4937 << ".\n"); 4938 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 4939 << ".\n"); 4940 4941 // First analyze the UserVF, fall back if the UserVF should be ignored. 4942 if (UserVF) { 4943 auto MaxSafeUserVF = 4944 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 4945 4946 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 4947 // If `VF=vscale x N` is safe, then so is `VF=N` 4948 if (UserVF.isScalable()) 4949 return FixedScalableVFPair( 4950 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 4951 else 4952 return UserVF; 4953 } 4954 4955 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 4956 4957 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 4958 // is better to ignore the hint and let the compiler choose a suitable VF. 4959 if (!UserVF.isScalable()) { 4960 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4961 << " is unsafe, clamping to max safe VF=" 4962 << MaxSafeFixedVF << ".\n"); 4963 ORE->emit([&]() { 4964 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4965 TheLoop->getStartLoc(), 4966 TheLoop->getHeader()) 4967 << "User-specified vectorization factor " 4968 << ore::NV("UserVectorizationFactor", UserVF) 4969 << " is unsafe, clamping to maximum safe vectorization factor " 4970 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 4971 }); 4972 return MaxSafeFixedVF; 4973 } 4974 4975 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 4976 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4977 << " is ignored because scalable vectors are not " 4978 "available.\n"); 4979 ORE->emit([&]() { 4980 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4981 TheLoop->getStartLoc(), 4982 TheLoop->getHeader()) 4983 << "User-specified vectorization factor " 4984 << ore::NV("UserVectorizationFactor", UserVF) 4985 << " is ignored because the target does not support scalable " 4986 "vectors. The compiler will pick a more suitable value."; 4987 }); 4988 } else { 4989 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4990 << " is unsafe. Ignoring scalable UserVF.\n"); 4991 ORE->emit([&]() { 4992 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4993 TheLoop->getStartLoc(), 4994 TheLoop->getHeader()) 4995 << "User-specified vectorization factor " 4996 << ore::NV("UserVectorizationFactor", UserVF) 4997 << " is unsafe. Ignoring the hint to let the compiler pick a " 4998 "more suitable value."; 4999 }); 5000 } 5001 } 5002 5003 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5004 << " / " << WidestType << " bits.\n"); 5005 5006 FixedScalableVFPair Result(ElementCount::getFixed(1), 5007 ElementCount::getScalable(0)); 5008 if (auto MaxVF = 5009 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 5010 MaxSafeFixedVF, FoldTailByMasking)) 5011 Result.FixedVF = MaxVF; 5012 5013 if (auto MaxVF = 5014 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 5015 MaxSafeScalableVF, FoldTailByMasking)) 5016 if (MaxVF.isScalable()) { 5017 Result.ScalableVF = MaxVF; 5018 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5019 << "\n"); 5020 } 5021 5022 return Result; 5023 } 5024 5025 FixedScalableVFPair 5026 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5027 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5028 // TODO: It may by useful to do since it's still likely to be dynamically 5029 // uniform if the target can skip. 5030 reportVectorizationFailure( 5031 "Not inserting runtime ptr check for divergent target", 5032 "runtime pointer checks needed. Not enabled for divergent target", 5033 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5034 return FixedScalableVFPair::getNone(); 5035 } 5036 5037 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5038 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5039 if (TC == 1) { 5040 reportVectorizationFailure("Single iteration (non) loop", 5041 "loop trip count is one, irrelevant for vectorization", 5042 "SingleIterationLoop", ORE, TheLoop); 5043 return FixedScalableVFPair::getNone(); 5044 } 5045 5046 switch (ScalarEpilogueStatus) { 5047 case CM_ScalarEpilogueAllowed: 5048 return computeFeasibleMaxVF(TC, UserVF, false); 5049 case CM_ScalarEpilogueNotAllowedUsePredicate: 5050 LLVM_FALLTHROUGH; 5051 case CM_ScalarEpilogueNotNeededUsePredicate: 5052 LLVM_DEBUG( 5053 dbgs() << "LV: vector predicate hint/switch found.\n" 5054 << "LV: Not allowing scalar epilogue, creating predicated " 5055 << "vector loop.\n"); 5056 break; 5057 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5058 // fallthrough as a special case of OptForSize 5059 case CM_ScalarEpilogueNotAllowedOptSize: 5060 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5061 LLVM_DEBUG( 5062 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5063 else 5064 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5065 << "count.\n"); 5066 5067 // Bail if runtime checks are required, which are not good when optimising 5068 // for size. 5069 if (runtimeChecksRequired()) 5070 return FixedScalableVFPair::getNone(); 5071 5072 break; 5073 } 5074 5075 // The only loops we can vectorize without a scalar epilogue, are loops with 5076 // a bottom-test and a single exiting block. We'd have to handle the fact 5077 // that not every instruction executes on the last iteration. This will 5078 // require a lane mask which varies through the vector loop body. (TODO) 5079 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5080 // If there was a tail-folding hint/switch, but we can't fold the tail by 5081 // masking, fallback to a vectorization with a scalar epilogue. 5082 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5083 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5084 "scalar epilogue instead.\n"); 5085 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5086 return computeFeasibleMaxVF(TC, UserVF, false); 5087 } 5088 return FixedScalableVFPair::getNone(); 5089 } 5090 5091 // Now try the tail folding 5092 5093 // Invalidate interleave groups that require an epilogue if we can't mask 5094 // the interleave-group. 5095 if (!useMaskedInterleavedAccesses(TTI)) { 5096 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5097 "No decisions should have been taken at this point"); 5098 // Note: There is no need to invalidate any cost modeling decisions here, as 5099 // non where taken so far. 5100 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5101 } 5102 5103 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true); 5104 // Avoid tail folding if the trip count is known to be a multiple of any VF 5105 // we chose. 5106 // FIXME: The condition below pessimises the case for fixed-width vectors, 5107 // when scalable VFs are also candidates for vectorization. 5108 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5109 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5110 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5111 "MaxFixedVF must be a power of 2"); 5112 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5113 : MaxFixedVF.getFixedValue(); 5114 ScalarEvolution *SE = PSE.getSE(); 5115 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5116 const SCEV *ExitCount = SE->getAddExpr( 5117 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5118 const SCEV *Rem = SE->getURemExpr( 5119 SE->applyLoopGuards(ExitCount, TheLoop), 5120 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5121 if (Rem->isZero()) { 5122 // Accept MaxFixedVF if we do not have a tail. 5123 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5124 return MaxFactors; 5125 } 5126 } 5127 5128 // For scalable vectors don't use tail folding for low trip counts or 5129 // optimizing for code size. We only permit this if the user has explicitly 5130 // requested it. 5131 if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate && 5132 ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate && 5133 MaxFactors.ScalableVF.isVector()) 5134 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5135 5136 // If we don't know the precise trip count, or if the trip count that we 5137 // found modulo the vectorization factor is not zero, try to fold the tail 5138 // by masking. 5139 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5140 if (Legal->prepareToFoldTailByMasking()) { 5141 FoldTailByMasking = true; 5142 return MaxFactors; 5143 } 5144 5145 // If there was a tail-folding hint/switch, but we can't fold the tail by 5146 // masking, fallback to a vectorization with a scalar epilogue. 5147 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5148 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5149 "scalar epilogue instead.\n"); 5150 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5151 return MaxFactors; 5152 } 5153 5154 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5155 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5156 return FixedScalableVFPair::getNone(); 5157 } 5158 5159 if (TC == 0) { 5160 reportVectorizationFailure( 5161 "Unable to calculate the loop count due to complex control flow", 5162 "unable to calculate the loop count due to complex control flow", 5163 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5164 return FixedScalableVFPair::getNone(); 5165 } 5166 5167 reportVectorizationFailure( 5168 "Cannot optimize for size and vectorize at the same time.", 5169 "cannot optimize for size and vectorize at the same time. " 5170 "Enable vectorization of this loop with '#pragma clang loop " 5171 "vectorize(enable)' when compiling with -Os/-Oz", 5172 "NoTailLoopWithOptForSize", ORE, TheLoop); 5173 return FixedScalableVFPair::getNone(); 5174 } 5175 5176 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5177 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5178 const ElementCount &MaxSafeVF, bool FoldTailByMasking) { 5179 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5180 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5181 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5182 : TargetTransformInfo::RGK_FixedWidthVector); 5183 5184 // Convenience function to return the minimum of two ElementCounts. 5185 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5186 assert((LHS.isScalable() == RHS.isScalable()) && 5187 "Scalable flags must match"); 5188 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5189 }; 5190 5191 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5192 // Note that both WidestRegister and WidestType may not be a powers of 2. 5193 auto MaxVectorElementCount = ElementCount::get( 5194 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5195 ComputeScalableMaxVF); 5196 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5197 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5198 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5199 5200 if (!MaxVectorElementCount) { 5201 LLVM_DEBUG(dbgs() << "LV: The target has no " 5202 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5203 << " vector registers.\n"); 5204 return ElementCount::getFixed(1); 5205 } 5206 5207 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5208 if (ConstTripCount && 5209 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5210 (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) { 5211 // If loop trip count (TC) is known at compile time there is no point in 5212 // choosing VF greater than TC (as done in the loop below). Select maximum 5213 // power of two which doesn't exceed TC. 5214 // If MaxVectorElementCount is scalable, we only fall back on a fixed VF 5215 // when the TC is less than or equal to the known number of lanes. 5216 auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount); 5217 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not " 5218 "exceeding the constant trip count: " 5219 << ClampedConstTripCount << "\n"); 5220 return ElementCount::getFixed(ClampedConstTripCount); 5221 } 5222 5223 ElementCount MaxVF = MaxVectorElementCount; 5224 if (TTI.shouldMaximizeVectorBandwidth() || 5225 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5226 auto MaxVectorElementCountMaxBW = ElementCount::get( 5227 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5228 ComputeScalableMaxVF); 5229 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5230 5231 // Collect all viable vectorization factors larger than the default MaxVF 5232 // (i.e. MaxVectorElementCount). 5233 SmallVector<ElementCount, 8> VFs; 5234 for (ElementCount VS = MaxVectorElementCount * 2; 5235 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5236 VFs.push_back(VS); 5237 5238 // For each VF calculate its register usage. 5239 auto RUs = calculateRegisterUsage(VFs); 5240 5241 // Select the largest VF which doesn't require more registers than existing 5242 // ones. 5243 for (int i = RUs.size() - 1; i >= 0; --i) { 5244 bool Selected = true; 5245 for (auto &pair : RUs[i].MaxLocalUsers) { 5246 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5247 if (pair.second > TargetNumRegisters) 5248 Selected = false; 5249 } 5250 if (Selected) { 5251 MaxVF = VFs[i]; 5252 break; 5253 } 5254 } 5255 if (ElementCount MinVF = 5256 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5257 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5258 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5259 << ") with target's minimum: " << MinVF << '\n'); 5260 MaxVF = MinVF; 5261 } 5262 } 5263 } 5264 return MaxVF; 5265 } 5266 5267 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const { 5268 if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) { 5269 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange); 5270 auto Min = Attr.getVScaleRangeMin(); 5271 auto Max = Attr.getVScaleRangeMax(); 5272 if (Max && Min == Max) 5273 return Max; 5274 } 5275 5276 return TTI.getVScaleForTuning(); 5277 } 5278 5279 bool LoopVectorizationCostModel::isMoreProfitable( 5280 const VectorizationFactor &A, const VectorizationFactor &B) const { 5281 InstructionCost CostA = A.Cost; 5282 InstructionCost CostB = B.Cost; 5283 5284 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 5285 5286 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 5287 MaxTripCount) { 5288 // If we are folding the tail and the trip count is a known (possibly small) 5289 // constant, the trip count will be rounded up to an integer number of 5290 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 5291 // which we compare directly. When not folding the tail, the total cost will 5292 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 5293 // approximated with the per-lane cost below instead of using the tripcount 5294 // as here. 5295 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 5296 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 5297 return RTCostA < RTCostB; 5298 } 5299 5300 // Improve estimate for the vector width if it is scalable. 5301 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 5302 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 5303 if (Optional<unsigned> VScale = getVScaleForTuning()) { 5304 if (A.Width.isScalable()) 5305 EstimatedWidthA *= VScale.getValue(); 5306 if (B.Width.isScalable()) 5307 EstimatedWidthB *= VScale.getValue(); 5308 } 5309 5310 // Assume vscale may be larger than 1 (or the value being tuned for), 5311 // so that scalable vectorization is slightly favorable over fixed-width 5312 // vectorization. 5313 if (A.Width.isScalable() && !B.Width.isScalable()) 5314 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 5315 5316 // To avoid the need for FP division: 5317 // (CostA / A.Width) < (CostB / B.Width) 5318 // <=> (CostA * B.Width) < (CostB * A.Width) 5319 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 5320 } 5321 5322 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 5323 const ElementCountSet &VFCandidates) { 5324 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5325 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5326 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5327 assert(VFCandidates.count(ElementCount::getFixed(1)) && 5328 "Expected Scalar VF to be a candidate"); 5329 5330 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 5331 VectorizationFactor ChosenFactor = ScalarCost; 5332 5333 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5334 if (ForceVectorization && VFCandidates.size() > 1) { 5335 // Ignore scalar width, because the user explicitly wants vectorization. 5336 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5337 // evaluation. 5338 ChosenFactor.Cost = InstructionCost::getMax(); 5339 } 5340 5341 SmallVector<InstructionVFPair> InvalidCosts; 5342 for (const auto &i : VFCandidates) { 5343 // The cost for scalar VF=1 is already calculated, so ignore it. 5344 if (i.isScalar()) 5345 continue; 5346 5347 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 5348 VectorizationFactor Candidate(i, C.first); 5349 5350 #ifndef NDEBUG 5351 unsigned AssumedMinimumVscale = 1; 5352 if (Optional<unsigned> VScale = getVScaleForTuning()) 5353 AssumedMinimumVscale = VScale.getValue(); 5354 unsigned Width = 5355 Candidate.Width.isScalable() 5356 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 5357 : Candidate.Width.getFixedValue(); 5358 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5359 << " costs: " << (Candidate.Cost / Width)); 5360 if (i.isScalable()) 5361 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 5362 << AssumedMinimumVscale << ")"); 5363 LLVM_DEBUG(dbgs() << ".\n"); 5364 #endif 5365 5366 if (!C.second && !ForceVectorization) { 5367 LLVM_DEBUG( 5368 dbgs() << "LV: Not considering vector loop of width " << i 5369 << " because it will not generate any vector instructions.\n"); 5370 continue; 5371 } 5372 5373 // If profitable add it to ProfitableVF list. 5374 if (isMoreProfitable(Candidate, ScalarCost)) 5375 ProfitableVFs.push_back(Candidate); 5376 5377 if (isMoreProfitable(Candidate, ChosenFactor)) 5378 ChosenFactor = Candidate; 5379 } 5380 5381 // Emit a report of VFs with invalid costs in the loop. 5382 if (!InvalidCosts.empty()) { 5383 // Group the remarks per instruction, keeping the instruction order from 5384 // InvalidCosts. 5385 std::map<Instruction *, unsigned> Numbering; 5386 unsigned I = 0; 5387 for (auto &Pair : InvalidCosts) 5388 if (!Numbering.count(Pair.first)) 5389 Numbering[Pair.first] = I++; 5390 5391 // Sort the list, first on instruction(number) then on VF. 5392 llvm::sort(InvalidCosts, 5393 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 5394 if (Numbering[A.first] != Numbering[B.first]) 5395 return Numbering[A.first] < Numbering[B.first]; 5396 ElementCountComparator ECC; 5397 return ECC(A.second, B.second); 5398 }); 5399 5400 // For a list of ordered instruction-vf pairs: 5401 // [(load, vf1), (load, vf2), (store, vf1)] 5402 // Group the instructions together to emit separate remarks for: 5403 // load (vf1, vf2) 5404 // store (vf1) 5405 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 5406 auto Subset = ArrayRef<InstructionVFPair>(); 5407 do { 5408 if (Subset.empty()) 5409 Subset = Tail.take_front(1); 5410 5411 Instruction *I = Subset.front().first; 5412 5413 // If the next instruction is different, or if there are no other pairs, 5414 // emit a remark for the collated subset. e.g. 5415 // [(load, vf1), (load, vf2))] 5416 // to emit: 5417 // remark: invalid costs for 'load' at VF=(vf, vf2) 5418 if (Subset == Tail || Tail[Subset.size()].first != I) { 5419 std::string OutString; 5420 raw_string_ostream OS(OutString); 5421 assert(!Subset.empty() && "Unexpected empty range"); 5422 OS << "Instruction with invalid costs prevented vectorization at VF=("; 5423 for (auto &Pair : Subset) 5424 OS << (Pair.second == Subset.front().second ? "" : ", ") 5425 << Pair.second; 5426 OS << "):"; 5427 if (auto *CI = dyn_cast<CallInst>(I)) 5428 OS << " call to " << CI->getCalledFunction()->getName(); 5429 else 5430 OS << " " << I->getOpcodeName(); 5431 OS.flush(); 5432 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 5433 Tail = Tail.drop_front(Subset.size()); 5434 Subset = {}; 5435 } else 5436 // Grow the subset by one element 5437 Subset = Tail.take_front(Subset.size() + 1); 5438 } while (!Tail.empty()); 5439 } 5440 5441 if (!EnableCondStoresVectorization && NumPredStores) { 5442 reportVectorizationFailure("There are conditional stores.", 5443 "store that is conditionally executed prevents vectorization", 5444 "ConditionalStore", ORE, TheLoop); 5445 ChosenFactor = ScalarCost; 5446 } 5447 5448 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 5449 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 5450 << "LV: Vectorization seems to be not beneficial, " 5451 << "but was forced by a user.\n"); 5452 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 5453 return ChosenFactor; 5454 } 5455 5456 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5457 const Loop &L, ElementCount VF) const { 5458 // Cross iteration phis such as reductions need special handling and are 5459 // currently unsupported. 5460 if (any_of(L.getHeader()->phis(), 5461 [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); })) 5462 return false; 5463 5464 // Phis with uses outside of the loop require special handling and are 5465 // currently unsupported. 5466 for (auto &Entry : Legal->getInductionVars()) { 5467 // Look for uses of the value of the induction at the last iteration. 5468 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5469 for (User *U : PostInc->users()) 5470 if (!L.contains(cast<Instruction>(U))) 5471 return false; 5472 // Look for uses of penultimate value of the induction. 5473 for (User *U : Entry.first->users()) 5474 if (!L.contains(cast<Instruction>(U))) 5475 return false; 5476 } 5477 5478 // Induction variables that are widened require special handling that is 5479 // currently not supported. 5480 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5481 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5482 this->isProfitableToScalarize(Entry.first, VF)); 5483 })) 5484 return false; 5485 5486 // Epilogue vectorization code has not been auditted to ensure it handles 5487 // non-latch exits properly. It may be fine, but it needs auditted and 5488 // tested. 5489 if (L.getExitingBlock() != L.getLoopLatch()) 5490 return false; 5491 5492 return true; 5493 } 5494 5495 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5496 const ElementCount VF) const { 5497 // FIXME: We need a much better cost-model to take different parameters such 5498 // as register pressure, code size increase and cost of extra branches into 5499 // account. For now we apply a very crude heuristic and only consider loops 5500 // with vectorization factors larger than a certain value. 5501 // We also consider epilogue vectorization unprofitable for targets that don't 5502 // consider interleaving beneficial (eg. MVE). 5503 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5504 return false; 5505 // FIXME: We should consider changing the threshold for scalable 5506 // vectors to take VScaleForTuning into account. 5507 if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF) 5508 return true; 5509 return false; 5510 } 5511 5512 VectorizationFactor 5513 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5514 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5515 VectorizationFactor Result = VectorizationFactor::Disabled(); 5516 if (!EnableEpilogueVectorization) { 5517 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5518 return Result; 5519 } 5520 5521 if (!isScalarEpilogueAllowed()) { 5522 LLVM_DEBUG( 5523 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5524 "allowed.\n";); 5525 return Result; 5526 } 5527 5528 // Not really a cost consideration, but check for unsupported cases here to 5529 // simplify the logic. 5530 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5531 LLVM_DEBUG( 5532 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5533 "not a supported candidate.\n";); 5534 return Result; 5535 } 5536 5537 if (EpilogueVectorizationForceVF > 1) { 5538 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5539 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 5540 if (LVP.hasPlanWithVF(ForcedEC)) 5541 return {ForcedEC, 0}; 5542 else { 5543 LLVM_DEBUG( 5544 dbgs() 5545 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5546 return Result; 5547 } 5548 } 5549 5550 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5551 TheLoop->getHeader()->getParent()->hasMinSize()) { 5552 LLVM_DEBUG( 5553 dbgs() 5554 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5555 return Result; 5556 } 5557 5558 if (!isEpilogueVectorizationProfitable(MainLoopVF)) { 5559 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 5560 "this loop\n"); 5561 return Result; 5562 } 5563 5564 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know 5565 // the main loop handles 8 lanes per iteration. We could still benefit from 5566 // vectorizing the epilogue loop with VF=4. 5567 ElementCount EstimatedRuntimeVF = MainLoopVF; 5568 if (MainLoopVF.isScalable()) { 5569 EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 5570 if (Optional<unsigned> VScale = getVScaleForTuning()) 5571 EstimatedRuntimeVF *= VScale.getValue(); 5572 } 5573 5574 for (auto &NextVF : ProfitableVFs) 5575 if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() && 5576 ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) || 5577 ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) && 5578 (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) && 5579 LVP.hasPlanWithVF(NextVF.Width)) 5580 Result = NextVF; 5581 5582 if (Result != VectorizationFactor::Disabled()) 5583 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5584 << Result.Width << "\n";); 5585 return Result; 5586 } 5587 5588 std::pair<unsigned, unsigned> 5589 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5590 unsigned MinWidth = -1U; 5591 unsigned MaxWidth = 8; 5592 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5593 // For in-loop reductions, no element types are added to ElementTypesInLoop 5594 // if there are no loads/stores in the loop. In this case, check through the 5595 // reduction variables to determine the maximum width. 5596 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) { 5597 // Reset MaxWidth so that we can find the smallest type used by recurrences 5598 // in the loop. 5599 MaxWidth = -1U; 5600 for (auto &PhiDescriptorPair : Legal->getReductionVars()) { 5601 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second; 5602 // When finding the min width used by the recurrence we need to account 5603 // for casts on the input operands of the recurrence. 5604 MaxWidth = std::min<unsigned>( 5605 MaxWidth, std::min<unsigned>( 5606 RdxDesc.getMinWidthCastToRecurrenceTypeInBits(), 5607 RdxDesc.getRecurrenceType()->getScalarSizeInBits())); 5608 } 5609 } else { 5610 for (Type *T : ElementTypesInLoop) { 5611 MinWidth = std::min<unsigned>( 5612 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5613 MaxWidth = std::max<unsigned>( 5614 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5615 } 5616 } 5617 return {MinWidth, MaxWidth}; 5618 } 5619 5620 void LoopVectorizationCostModel::collectElementTypesForWidening() { 5621 ElementTypesInLoop.clear(); 5622 // For each block. 5623 for (BasicBlock *BB : TheLoop->blocks()) { 5624 // For each instruction in the loop. 5625 for (Instruction &I : BB->instructionsWithoutDebug()) { 5626 Type *T = I.getType(); 5627 5628 // Skip ignored values. 5629 if (ValuesToIgnore.count(&I)) 5630 continue; 5631 5632 // Only examine Loads, Stores and PHINodes. 5633 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5634 continue; 5635 5636 // Examine PHI nodes that are reduction variables. Update the type to 5637 // account for the recurrence type. 5638 if (auto *PN = dyn_cast<PHINode>(&I)) { 5639 if (!Legal->isReductionVariable(PN)) 5640 continue; 5641 const RecurrenceDescriptor &RdxDesc = 5642 Legal->getReductionVars().find(PN)->second; 5643 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 5644 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 5645 RdxDesc.getRecurrenceType(), 5646 TargetTransformInfo::ReductionFlags())) 5647 continue; 5648 T = RdxDesc.getRecurrenceType(); 5649 } 5650 5651 // Examine the stored values. 5652 if (auto *ST = dyn_cast<StoreInst>(&I)) 5653 T = ST->getValueOperand()->getType(); 5654 5655 assert(T->isSized() && 5656 "Expected the load/store/recurrence type to be sized"); 5657 5658 ElementTypesInLoop.insert(T); 5659 } 5660 } 5661 } 5662 5663 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 5664 unsigned LoopCost) { 5665 // -- The interleave heuristics -- 5666 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5667 // There are many micro-architectural considerations that we can't predict 5668 // at this level. For example, frontend pressure (on decode or fetch) due to 5669 // code size, or the number and capabilities of the execution ports. 5670 // 5671 // We use the following heuristics to select the interleave count: 5672 // 1. If the code has reductions, then we interleave to break the cross 5673 // iteration dependency. 5674 // 2. If the loop is really small, then we interleave to reduce the loop 5675 // overhead. 5676 // 3. We don't interleave if we think that we will spill registers to memory 5677 // due to the increased register pressure. 5678 5679 if (!isScalarEpilogueAllowed()) 5680 return 1; 5681 5682 // We used the distance for the interleave count. 5683 if (Legal->getMaxSafeDepDistBytes() != -1U) 5684 return 1; 5685 5686 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 5687 const bool HasReductions = !Legal->getReductionVars().empty(); 5688 // Do not interleave loops with a relatively small known or estimated trip 5689 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 5690 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 5691 // because with the above conditions interleaving can expose ILP and break 5692 // cross iteration dependences for reductions. 5693 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 5694 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 5695 return 1; 5696 5697 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5698 // We divide by these constants so assume that we have at least one 5699 // instruction that uses at least one register. 5700 for (auto& pair : R.MaxLocalUsers) { 5701 pair.second = std::max(pair.second, 1U); 5702 } 5703 5704 // We calculate the interleave count using the following formula. 5705 // Subtract the number of loop invariants from the number of available 5706 // registers. These registers are used by all of the interleaved instances. 5707 // Next, divide the remaining registers by the number of registers that is 5708 // required by the loop, in order to estimate how many parallel instances 5709 // fit without causing spills. All of this is rounded down if necessary to be 5710 // a power of two. We want power of two interleave count to simplify any 5711 // addressing operations or alignment considerations. 5712 // We also want power of two interleave counts to ensure that the induction 5713 // variable of the vector loop wraps to zero, when tail is folded by masking; 5714 // this currently happens when OptForSize, in which case IC is set to 1 above. 5715 unsigned IC = UINT_MAX; 5716 5717 for (auto& pair : R.MaxLocalUsers) { 5718 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5719 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5720 << " registers of " 5721 << TTI.getRegisterClassName(pair.first) << " register class\n"); 5722 if (VF.isScalar()) { 5723 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5724 TargetNumRegisters = ForceTargetNumScalarRegs; 5725 } else { 5726 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5727 TargetNumRegisters = ForceTargetNumVectorRegs; 5728 } 5729 unsigned MaxLocalUsers = pair.second; 5730 unsigned LoopInvariantRegs = 0; 5731 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 5732 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 5733 5734 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 5735 // Don't count the induction variable as interleaved. 5736 if (EnableIndVarRegisterHeur) { 5737 TmpIC = 5738 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 5739 std::max(1U, (MaxLocalUsers - 1))); 5740 } 5741 5742 IC = std::min(IC, TmpIC); 5743 } 5744 5745 // Clamp the interleave ranges to reasonable counts. 5746 unsigned MaxInterleaveCount = 5747 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 5748 5749 // Check if the user has overridden the max. 5750 if (VF.isScalar()) { 5751 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5752 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5753 } else { 5754 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5755 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5756 } 5757 5758 // If trip count is known or estimated compile time constant, limit the 5759 // interleave count to be less than the trip count divided by VF, provided it 5760 // is at least 1. 5761 // 5762 // For scalable vectors we can't know if interleaving is beneficial. It may 5763 // not be beneficial for small loops if none of the lanes in the second vector 5764 // iterations is enabled. However, for larger loops, there is likely to be a 5765 // similar benefit as for fixed-width vectors. For now, we choose to leave 5766 // the InterleaveCount as if vscale is '1', although if some information about 5767 // the vector is known (e.g. min vector size), we can make a better decision. 5768 if (BestKnownTC) { 5769 MaxInterleaveCount = 5770 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 5771 // Make sure MaxInterleaveCount is greater than 0. 5772 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 5773 } 5774 5775 assert(MaxInterleaveCount > 0 && 5776 "Maximum interleave count must be greater than 0"); 5777 5778 // Clamp the calculated IC to be between the 1 and the max interleave count 5779 // that the target and trip count allows. 5780 if (IC > MaxInterleaveCount) 5781 IC = MaxInterleaveCount; 5782 else 5783 // Make sure IC is greater than 0. 5784 IC = std::max(1u, IC); 5785 5786 assert(IC > 0 && "Interleave count must be greater than 0."); 5787 5788 // If we did not calculate the cost for VF (because the user selected the VF) 5789 // then we calculate the cost of VF here. 5790 if (LoopCost == 0) { 5791 InstructionCost C = expectedCost(VF).first; 5792 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 5793 LoopCost = *C.getValue(); 5794 } 5795 5796 assert(LoopCost && "Non-zero loop cost expected"); 5797 5798 // Interleave if we vectorized this loop and there is a reduction that could 5799 // benefit from interleaving. 5800 if (VF.isVector() && HasReductions) { 5801 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5802 return IC; 5803 } 5804 5805 // For any scalar loop that either requires runtime checks or predication we 5806 // are better off leaving this to the unroller. Note that if we've already 5807 // vectorized the loop we will have done the runtime check and so interleaving 5808 // won't require further checks. 5809 bool ScalarInterleavingRequiresPredication = 5810 (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) { 5811 return Legal->blockNeedsPredication(BB); 5812 })); 5813 bool ScalarInterleavingRequiresRuntimePointerCheck = 5814 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 5815 5816 // We want to interleave small loops in order to reduce the loop overhead and 5817 // potentially expose ILP opportunities. 5818 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 5819 << "LV: IC is " << IC << '\n' 5820 << "LV: VF is " << VF << '\n'); 5821 const bool AggressivelyInterleaveReductions = 5822 TTI.enableAggressiveInterleaving(HasReductions); 5823 if (!ScalarInterleavingRequiresRuntimePointerCheck && 5824 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) { 5825 // We assume that the cost overhead is 1 and we use the cost model 5826 // to estimate the cost of the loop and interleave until the cost of the 5827 // loop overhead is about 5% of the cost of the loop. 5828 unsigned SmallIC = 5829 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5830 5831 // Interleave until store/load ports (estimated by max interleave count) are 5832 // saturated. 5833 unsigned NumStores = Legal->getNumStores(); 5834 unsigned NumLoads = Legal->getNumLoads(); 5835 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5836 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5837 5838 // There is little point in interleaving for reductions containing selects 5839 // and compares when VF=1 since it may just create more overhead than it's 5840 // worth for loops with small trip counts. This is because we still have to 5841 // do the final reduction after the loop. 5842 bool HasSelectCmpReductions = 5843 HasReductions && 5844 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5845 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5846 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 5847 RdxDesc.getRecurrenceKind()); 5848 }); 5849 if (HasSelectCmpReductions) { 5850 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 5851 return 1; 5852 } 5853 5854 // If we have a scalar reduction (vector reductions are already dealt with 5855 // by this point), we can increase the critical path length if the loop 5856 // we're interleaving is inside another loop. For tree-wise reductions 5857 // set the limit to 2, and for ordered reductions it's best to disable 5858 // interleaving entirely. 5859 if (HasReductions && TheLoop->getLoopDepth() > 1) { 5860 bool HasOrderedReductions = 5861 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5862 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5863 return RdxDesc.isOrdered(); 5864 }); 5865 if (HasOrderedReductions) { 5866 LLVM_DEBUG( 5867 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 5868 return 1; 5869 } 5870 5871 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5872 SmallIC = std::min(SmallIC, F); 5873 StoresIC = std::min(StoresIC, F); 5874 LoadsIC = std::min(LoadsIC, F); 5875 } 5876 5877 if (EnableLoadStoreRuntimeInterleave && 5878 std::max(StoresIC, LoadsIC) > SmallIC) { 5879 LLVM_DEBUG( 5880 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5881 return std::max(StoresIC, LoadsIC); 5882 } 5883 5884 // If there are scalar reductions and TTI has enabled aggressive 5885 // interleaving for reductions, we will interleave to expose ILP. 5886 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 5887 AggressivelyInterleaveReductions) { 5888 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5889 // Interleave no less than SmallIC but not as aggressive as the normal IC 5890 // to satisfy the rare situation when resources are too limited. 5891 return std::max(IC / 2, SmallIC); 5892 } else { 5893 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5894 return SmallIC; 5895 } 5896 } 5897 5898 // Interleave if this is a large loop (small loops are already dealt with by 5899 // this point) that could benefit from interleaving. 5900 if (AggressivelyInterleaveReductions) { 5901 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5902 return IC; 5903 } 5904 5905 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5906 return 1; 5907 } 5908 5909 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5910 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 5911 // This function calculates the register usage by measuring the highest number 5912 // of values that are alive at a single location. Obviously, this is a very 5913 // rough estimation. We scan the loop in a topological order in order and 5914 // assign a number to each instruction. We use RPO to ensure that defs are 5915 // met before their users. We assume that each instruction that has in-loop 5916 // users starts an interval. We record every time that an in-loop value is 5917 // used, so we have a list of the first and last occurrences of each 5918 // instruction. Next, we transpose this data structure into a multi map that 5919 // holds the list of intervals that *end* at a specific location. This multi 5920 // map allows us to perform a linear search. We scan the instructions linearly 5921 // and record each time that a new interval starts, by placing it in a set. 5922 // If we find this value in the multi-map then we remove it from the set. 5923 // The max register usage is the maximum size of the set. 5924 // We also search for instructions that are defined outside the loop, but are 5925 // used inside the loop. We need this number separately from the max-interval 5926 // usage number because when we unroll, loop-invariant values do not take 5927 // more register. 5928 LoopBlocksDFS DFS(TheLoop); 5929 DFS.perform(LI); 5930 5931 RegisterUsage RU; 5932 5933 // Each 'key' in the map opens a new interval. The values 5934 // of the map are the index of the 'last seen' usage of the 5935 // instruction that is the key. 5936 using IntervalMap = DenseMap<Instruction *, unsigned>; 5937 5938 // Maps instruction to its index. 5939 SmallVector<Instruction *, 64> IdxToInstr; 5940 // Marks the end of each interval. 5941 IntervalMap EndPoint; 5942 // Saves the list of instruction indices that are used in the loop. 5943 SmallPtrSet<Instruction *, 8> Ends; 5944 // Saves the list of values that are used in the loop but are 5945 // defined outside the loop, such as arguments and constants. 5946 SmallPtrSet<Value *, 8> LoopInvariants; 5947 5948 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5949 for (Instruction &I : BB->instructionsWithoutDebug()) { 5950 IdxToInstr.push_back(&I); 5951 5952 // Save the end location of each USE. 5953 for (Value *U : I.operands()) { 5954 auto *Instr = dyn_cast<Instruction>(U); 5955 5956 // Ignore non-instruction values such as arguments, constants, etc. 5957 if (!Instr) 5958 continue; 5959 5960 // If this instruction is outside the loop then record it and continue. 5961 if (!TheLoop->contains(Instr)) { 5962 LoopInvariants.insert(Instr); 5963 continue; 5964 } 5965 5966 // Overwrite previous end points. 5967 EndPoint[Instr] = IdxToInstr.size(); 5968 Ends.insert(Instr); 5969 } 5970 } 5971 } 5972 5973 // Saves the list of intervals that end with the index in 'key'. 5974 using InstrList = SmallVector<Instruction *, 2>; 5975 DenseMap<unsigned, InstrList> TransposeEnds; 5976 5977 // Transpose the EndPoints to a list of values that end at each index. 5978 for (auto &Interval : EndPoint) 5979 TransposeEnds[Interval.second].push_back(Interval.first); 5980 5981 SmallPtrSet<Instruction *, 8> OpenIntervals; 5982 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5983 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 5984 5985 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5986 5987 // A lambda that gets the register usage for the given type and VF. 5988 const auto &TTICapture = TTI; 5989 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 5990 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 5991 return 0; 5992 InstructionCost::CostType RegUsage = 5993 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 5994 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 5995 "Nonsensical values for register usage."); 5996 return RegUsage; 5997 }; 5998 5999 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6000 Instruction *I = IdxToInstr[i]; 6001 6002 // Remove all of the instructions that end at this location. 6003 InstrList &List = TransposeEnds[i]; 6004 for (Instruction *ToRemove : List) 6005 OpenIntervals.erase(ToRemove); 6006 6007 // Ignore instructions that are never used within the loop. 6008 if (!Ends.count(I)) 6009 continue; 6010 6011 // Skip ignored values. 6012 if (ValuesToIgnore.count(I)) 6013 continue; 6014 6015 // For each VF find the maximum usage of registers. 6016 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6017 // Count the number of live intervals. 6018 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6019 6020 if (VFs[j].isScalar()) { 6021 for (auto Inst : OpenIntervals) { 6022 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6023 if (RegUsage.find(ClassID) == RegUsage.end()) 6024 RegUsage[ClassID] = 1; 6025 else 6026 RegUsage[ClassID] += 1; 6027 } 6028 } else { 6029 collectUniformsAndScalars(VFs[j]); 6030 for (auto Inst : OpenIntervals) { 6031 // Skip ignored values for VF > 1. 6032 if (VecValuesToIgnore.count(Inst)) 6033 continue; 6034 if (isScalarAfterVectorization(Inst, VFs[j])) { 6035 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6036 if (RegUsage.find(ClassID) == RegUsage.end()) 6037 RegUsage[ClassID] = 1; 6038 else 6039 RegUsage[ClassID] += 1; 6040 } else { 6041 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6042 if (RegUsage.find(ClassID) == RegUsage.end()) 6043 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6044 else 6045 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6046 } 6047 } 6048 } 6049 6050 for (auto& pair : RegUsage) { 6051 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6052 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6053 else 6054 MaxUsages[j][pair.first] = pair.second; 6055 } 6056 } 6057 6058 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6059 << OpenIntervals.size() << '\n'); 6060 6061 // Add the current instruction to the list of open intervals. 6062 OpenIntervals.insert(I); 6063 } 6064 6065 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6066 SmallMapVector<unsigned, unsigned, 4> Invariant; 6067 6068 for (auto Inst : LoopInvariants) { 6069 unsigned Usage = 6070 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6071 unsigned ClassID = 6072 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6073 if (Invariant.find(ClassID) == Invariant.end()) 6074 Invariant[ClassID] = Usage; 6075 else 6076 Invariant[ClassID] += Usage; 6077 } 6078 6079 LLVM_DEBUG({ 6080 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6081 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6082 << " item\n"; 6083 for (const auto &pair : MaxUsages[i]) { 6084 dbgs() << "LV(REG): RegisterClass: " 6085 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6086 << " registers\n"; 6087 } 6088 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6089 << " item\n"; 6090 for (const auto &pair : Invariant) { 6091 dbgs() << "LV(REG): RegisterClass: " 6092 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6093 << " registers\n"; 6094 } 6095 }); 6096 6097 RU.LoopInvariantRegs = Invariant; 6098 RU.MaxLocalUsers = MaxUsages[i]; 6099 RUs[i] = RU; 6100 } 6101 6102 return RUs; 6103 } 6104 6105 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I, 6106 ElementCount VF) { 6107 // TODO: Cost model for emulated masked load/store is completely 6108 // broken. This hack guides the cost model to use an artificially 6109 // high enough value to practically disable vectorization with such 6110 // operations, except where previously deployed legality hack allowed 6111 // using very low cost values. This is to avoid regressions coming simply 6112 // from moving "masked load/store" check from legality to cost model. 6113 // Masked Load/Gather emulation was previously never allowed. 6114 // Limited number of Masked Store/Scatter emulation was allowed. 6115 assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction"); 6116 return isa<LoadInst>(I) || 6117 (isa<StoreInst>(I) && 6118 NumPredStores > NumberOfStoresToPredicate); 6119 } 6120 6121 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6122 // If we aren't vectorizing the loop, or if we've already collected the 6123 // instructions to scalarize, there's nothing to do. Collection may already 6124 // have occurred if we have a user-selected VF and are now computing the 6125 // expected cost for interleaving. 6126 if (VF.isScalar() || VF.isZero() || 6127 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6128 return; 6129 6130 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6131 // not profitable to scalarize any instructions, the presence of VF in the 6132 // map will indicate that we've analyzed it already. 6133 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6134 6135 // Find all the instructions that are scalar with predication in the loop and 6136 // determine if it would be better to not if-convert the blocks they are in. 6137 // If so, we also record the instructions to scalarize. 6138 for (BasicBlock *BB : TheLoop->blocks()) { 6139 if (!blockNeedsPredicationForAnyReason(BB)) 6140 continue; 6141 for (Instruction &I : *BB) 6142 if (isScalarWithPredication(&I, VF)) { 6143 ScalarCostsTy ScalarCosts; 6144 // Do not apply discount if scalable, because that would lead to 6145 // invalid scalarization costs. 6146 // Do not apply discount logic if hacked cost is needed 6147 // for emulated masked memrefs. 6148 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) && 6149 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6150 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6151 // Remember that BB will remain after vectorization. 6152 PredicatedBBsAfterVectorization.insert(BB); 6153 } 6154 } 6155 } 6156 6157 int LoopVectorizationCostModel::computePredInstDiscount( 6158 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6159 assert(!isUniformAfterVectorization(PredInst, VF) && 6160 "Instruction marked uniform-after-vectorization will be predicated"); 6161 6162 // Initialize the discount to zero, meaning that the scalar version and the 6163 // vector version cost the same. 6164 InstructionCost Discount = 0; 6165 6166 // Holds instructions to analyze. The instructions we visit are mapped in 6167 // ScalarCosts. Those instructions are the ones that would be scalarized if 6168 // we find that the scalar version costs less. 6169 SmallVector<Instruction *, 8> Worklist; 6170 6171 // Returns true if the given instruction can be scalarized. 6172 auto canBeScalarized = [&](Instruction *I) -> bool { 6173 // We only attempt to scalarize instructions forming a single-use chain 6174 // from the original predicated block that would otherwise be vectorized. 6175 // Although not strictly necessary, we give up on instructions we know will 6176 // already be scalar to avoid traversing chains that are unlikely to be 6177 // beneficial. 6178 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6179 isScalarAfterVectorization(I, VF)) 6180 return false; 6181 6182 // If the instruction is scalar with predication, it will be analyzed 6183 // separately. We ignore it within the context of PredInst. 6184 if (isScalarWithPredication(I, VF)) 6185 return false; 6186 6187 // If any of the instruction's operands are uniform after vectorization, 6188 // the instruction cannot be scalarized. This prevents, for example, a 6189 // masked load from being scalarized. 6190 // 6191 // We assume we will only emit a value for lane zero of an instruction 6192 // marked uniform after vectorization, rather than VF identical values. 6193 // Thus, if we scalarize an instruction that uses a uniform, we would 6194 // create uses of values corresponding to the lanes we aren't emitting code 6195 // for. This behavior can be changed by allowing getScalarValue to clone 6196 // the lane zero values for uniforms rather than asserting. 6197 for (Use &U : I->operands()) 6198 if (auto *J = dyn_cast<Instruction>(U.get())) 6199 if (isUniformAfterVectorization(J, VF)) 6200 return false; 6201 6202 // Otherwise, we can scalarize the instruction. 6203 return true; 6204 }; 6205 6206 // Compute the expected cost discount from scalarizing the entire expression 6207 // feeding the predicated instruction. We currently only consider expressions 6208 // that are single-use instruction chains. 6209 Worklist.push_back(PredInst); 6210 while (!Worklist.empty()) { 6211 Instruction *I = Worklist.pop_back_val(); 6212 6213 // If we've already analyzed the instruction, there's nothing to do. 6214 if (ScalarCosts.find(I) != ScalarCosts.end()) 6215 continue; 6216 6217 // Compute the cost of the vector instruction. Note that this cost already 6218 // includes the scalarization overhead of the predicated instruction. 6219 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6220 6221 // Compute the cost of the scalarized instruction. This cost is the cost of 6222 // the instruction as if it wasn't if-converted and instead remained in the 6223 // predicated block. We will scale this cost by block probability after 6224 // computing the scalarization overhead. 6225 InstructionCost ScalarCost = 6226 VF.getFixedValue() * 6227 getInstructionCost(I, ElementCount::getFixed(1)).first; 6228 6229 // Compute the scalarization overhead of needed insertelement instructions 6230 // and phi nodes. 6231 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) { 6232 ScalarCost += TTI.getScalarizationOverhead( 6233 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6234 APInt::getAllOnes(VF.getFixedValue()), true, false); 6235 ScalarCost += 6236 VF.getFixedValue() * 6237 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6238 } 6239 6240 // Compute the scalarization overhead of needed extractelement 6241 // instructions. For each of the instruction's operands, if the operand can 6242 // be scalarized, add it to the worklist; otherwise, account for the 6243 // overhead. 6244 for (Use &U : I->operands()) 6245 if (auto *J = dyn_cast<Instruction>(U.get())) { 6246 assert(VectorType::isValidElementType(J->getType()) && 6247 "Instruction has non-scalar type"); 6248 if (canBeScalarized(J)) 6249 Worklist.push_back(J); 6250 else if (needsExtract(J, VF)) { 6251 ScalarCost += TTI.getScalarizationOverhead( 6252 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6253 APInt::getAllOnes(VF.getFixedValue()), false, true); 6254 } 6255 } 6256 6257 // Scale the total scalar cost by block probability. 6258 ScalarCost /= getReciprocalPredBlockProb(); 6259 6260 // Compute the discount. A non-negative discount means the vector version 6261 // of the instruction costs more, and scalarizing would be beneficial. 6262 Discount += VectorCost - ScalarCost; 6263 ScalarCosts[I] = ScalarCost; 6264 } 6265 6266 return *Discount.getValue(); 6267 } 6268 6269 LoopVectorizationCostModel::VectorizationCostTy 6270 LoopVectorizationCostModel::expectedCost( 6271 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6272 VectorizationCostTy Cost; 6273 6274 // For each block. 6275 for (BasicBlock *BB : TheLoop->blocks()) { 6276 VectorizationCostTy BlockCost; 6277 6278 // For each instruction in the old loop. 6279 for (Instruction &I : BB->instructionsWithoutDebug()) { 6280 // Skip ignored values. 6281 if (ValuesToIgnore.count(&I) || 6282 (VF.isVector() && VecValuesToIgnore.count(&I))) 6283 continue; 6284 6285 VectorizationCostTy C = getInstructionCost(&I, VF); 6286 6287 // Check if we should override the cost. 6288 if (C.first.isValid() && 6289 ForceTargetInstructionCost.getNumOccurrences() > 0) 6290 C.first = InstructionCost(ForceTargetInstructionCost); 6291 6292 // Keep a list of instructions with invalid costs. 6293 if (Invalid && !C.first.isValid()) 6294 Invalid->emplace_back(&I, VF); 6295 6296 BlockCost.first += C.first; 6297 BlockCost.second |= C.second; 6298 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6299 << " for VF " << VF << " For instruction: " << I 6300 << '\n'); 6301 } 6302 6303 // If we are vectorizing a predicated block, it will have been 6304 // if-converted. This means that the block's instructions (aside from 6305 // stores and instructions that may divide by zero) will now be 6306 // unconditionally executed. For the scalar case, we may not always execute 6307 // the predicated block, if it is an if-else block. Thus, scale the block's 6308 // cost by the probability of executing it. blockNeedsPredication from 6309 // Legal is used so as to not include all blocks in tail folded loops. 6310 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6311 BlockCost.first /= getReciprocalPredBlockProb(); 6312 6313 Cost.first += BlockCost.first; 6314 Cost.second |= BlockCost.second; 6315 } 6316 6317 return Cost; 6318 } 6319 6320 /// Gets Address Access SCEV after verifying that the access pattern 6321 /// is loop invariant except the induction variable dependence. 6322 /// 6323 /// This SCEV can be sent to the Target in order to estimate the address 6324 /// calculation cost. 6325 static const SCEV *getAddressAccessSCEV( 6326 Value *Ptr, 6327 LoopVectorizationLegality *Legal, 6328 PredicatedScalarEvolution &PSE, 6329 const Loop *TheLoop) { 6330 6331 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6332 if (!Gep) 6333 return nullptr; 6334 6335 // We are looking for a gep with all loop invariant indices except for one 6336 // which should be an induction variable. 6337 auto SE = PSE.getSE(); 6338 unsigned NumOperands = Gep->getNumOperands(); 6339 for (unsigned i = 1; i < NumOperands; ++i) { 6340 Value *Opd = Gep->getOperand(i); 6341 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6342 !Legal->isInductionVariable(Opd)) 6343 return nullptr; 6344 } 6345 6346 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6347 return PSE.getSCEV(Ptr); 6348 } 6349 6350 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6351 return Legal->hasStride(I->getOperand(0)) || 6352 Legal->hasStride(I->getOperand(1)); 6353 } 6354 6355 InstructionCost 6356 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6357 ElementCount VF) { 6358 assert(VF.isVector() && 6359 "Scalarization cost of instruction implies vectorization."); 6360 if (VF.isScalable()) 6361 return InstructionCost::getInvalid(); 6362 6363 Type *ValTy = getLoadStoreType(I); 6364 auto SE = PSE.getSE(); 6365 6366 unsigned AS = getLoadStoreAddressSpace(I); 6367 Value *Ptr = getLoadStorePointerOperand(I); 6368 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6369 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost` 6370 // that it is being called from this specific place. 6371 6372 // Figure out whether the access is strided and get the stride value 6373 // if it's known in compile time 6374 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6375 6376 // Get the cost of the scalar memory instruction and address computation. 6377 InstructionCost Cost = 6378 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6379 6380 // Don't pass *I here, since it is scalar but will actually be part of a 6381 // vectorized loop where the user of it is a vectorized instruction. 6382 const Align Alignment = getLoadStoreAlignment(I); 6383 Cost += VF.getKnownMinValue() * 6384 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6385 AS, TTI::TCK_RecipThroughput); 6386 6387 // Get the overhead of the extractelement and insertelement instructions 6388 // we might create due to scalarization. 6389 Cost += getScalarizationOverhead(I, VF); 6390 6391 // If we have a predicated load/store, it will need extra i1 extracts and 6392 // conditional branches, but may not be executed for each vector lane. Scale 6393 // the cost by the probability of executing the predicated block. 6394 if (isPredicatedInst(I, VF)) { 6395 Cost /= getReciprocalPredBlockProb(); 6396 6397 // Add the cost of an i1 extract and a branch 6398 auto *Vec_i1Ty = 6399 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6400 Cost += TTI.getScalarizationOverhead( 6401 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 6402 /*Insert=*/false, /*Extract=*/true); 6403 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6404 6405 if (useEmulatedMaskMemRefHack(I, VF)) 6406 // Artificially setting to a high enough value to practically disable 6407 // vectorization with such operations. 6408 Cost = 3000000; 6409 } 6410 6411 return Cost; 6412 } 6413 6414 InstructionCost 6415 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6416 ElementCount VF) { 6417 Type *ValTy = getLoadStoreType(I); 6418 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6419 Value *Ptr = getLoadStorePointerOperand(I); 6420 unsigned AS = getLoadStoreAddressSpace(I); 6421 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 6422 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6423 6424 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6425 "Stride should be 1 or -1 for consecutive memory access"); 6426 const Align Alignment = getLoadStoreAlignment(I); 6427 InstructionCost Cost = 0; 6428 if (Legal->isMaskRequired(I)) 6429 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6430 CostKind); 6431 else 6432 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6433 CostKind, I); 6434 6435 bool Reverse = ConsecutiveStride < 0; 6436 if (Reverse) 6437 Cost += 6438 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6439 return Cost; 6440 } 6441 6442 InstructionCost 6443 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6444 ElementCount VF) { 6445 assert(Legal->isUniformMemOp(*I)); 6446 6447 Type *ValTy = getLoadStoreType(I); 6448 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6449 const Align Alignment = getLoadStoreAlignment(I); 6450 unsigned AS = getLoadStoreAddressSpace(I); 6451 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6452 if (isa<LoadInst>(I)) { 6453 return TTI.getAddressComputationCost(ValTy) + 6454 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6455 CostKind) + 6456 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6457 } 6458 StoreInst *SI = cast<StoreInst>(I); 6459 6460 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6461 return TTI.getAddressComputationCost(ValTy) + 6462 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6463 CostKind) + 6464 (isLoopInvariantStoreValue 6465 ? 0 6466 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6467 VF.getKnownMinValue() - 1)); 6468 } 6469 6470 InstructionCost 6471 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6472 ElementCount VF) { 6473 Type *ValTy = getLoadStoreType(I); 6474 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6475 const Align Alignment = getLoadStoreAlignment(I); 6476 const Value *Ptr = getLoadStorePointerOperand(I); 6477 6478 return TTI.getAddressComputationCost(VectorTy) + 6479 TTI.getGatherScatterOpCost( 6480 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6481 TargetTransformInfo::TCK_RecipThroughput, I); 6482 } 6483 6484 InstructionCost 6485 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6486 ElementCount VF) { 6487 // TODO: Once we have support for interleaving with scalable vectors 6488 // we can calculate the cost properly here. 6489 if (VF.isScalable()) 6490 return InstructionCost::getInvalid(); 6491 6492 Type *ValTy = getLoadStoreType(I); 6493 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6494 unsigned AS = getLoadStoreAddressSpace(I); 6495 6496 auto Group = getInterleavedAccessGroup(I); 6497 assert(Group && "Fail to get an interleaved access group."); 6498 6499 unsigned InterleaveFactor = Group->getFactor(); 6500 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6501 6502 // Holds the indices of existing members in the interleaved group. 6503 SmallVector<unsigned, 4> Indices; 6504 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 6505 if (Group->getMember(IF)) 6506 Indices.push_back(IF); 6507 6508 // Calculate the cost of the whole interleaved group. 6509 bool UseMaskForGaps = 6510 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 6511 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 6512 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6513 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6514 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6515 6516 if (Group->isReverse()) { 6517 // TODO: Add support for reversed masked interleaved access. 6518 assert(!Legal->isMaskRequired(I) && 6519 "Reverse masked interleaved access not supported."); 6520 Cost += 6521 Group->getNumMembers() * 6522 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6523 } 6524 return Cost; 6525 } 6526 6527 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 6528 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6529 using namespace llvm::PatternMatch; 6530 // Early exit for no inloop reductions 6531 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6532 return None; 6533 auto *VectorTy = cast<VectorType>(Ty); 6534 6535 // We are looking for a pattern of, and finding the minimal acceptable cost: 6536 // reduce(mul(ext(A), ext(B))) or 6537 // reduce(mul(A, B)) or 6538 // reduce(ext(A)) or 6539 // reduce(A). 6540 // The basic idea is that we walk down the tree to do that, finding the root 6541 // reduction instruction in InLoopReductionImmediateChains. From there we find 6542 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6543 // of the components. If the reduction cost is lower then we return it for the 6544 // reduction instruction and 0 for the other instructions in the pattern. If 6545 // it is not we return an invalid cost specifying the orignal cost method 6546 // should be used. 6547 Instruction *RetI = I; 6548 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 6549 if (!RetI->hasOneUser()) 6550 return None; 6551 RetI = RetI->user_back(); 6552 } 6553 if (match(RetI, m_Mul(m_Value(), m_Value())) && 6554 RetI->user_back()->getOpcode() == Instruction::Add) { 6555 if (!RetI->hasOneUser()) 6556 return None; 6557 RetI = RetI->user_back(); 6558 } 6559 6560 // Test if the found instruction is a reduction, and if not return an invalid 6561 // cost specifying the parent to use the original cost modelling. 6562 if (!InLoopReductionImmediateChains.count(RetI)) 6563 return None; 6564 6565 // Find the reduction this chain is a part of and calculate the basic cost of 6566 // the reduction on its own. 6567 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6568 Instruction *ReductionPhi = LastChain; 6569 while (!isa<PHINode>(ReductionPhi)) 6570 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6571 6572 const RecurrenceDescriptor &RdxDesc = 6573 Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second; 6574 6575 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 6576 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 6577 6578 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a 6579 // normal fmul instruction to the cost of the fadd reduction. 6580 if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd) 6581 BaseCost += 6582 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind); 6583 6584 // If we're using ordered reductions then we can just return the base cost 6585 // here, since getArithmeticReductionCost calculates the full ordered 6586 // reduction cost when FP reassociation is not allowed. 6587 if (useOrderedReductions(RdxDesc)) 6588 return BaseCost; 6589 6590 // Get the operand that was not the reduction chain and match it to one of the 6591 // patterns, returning the better cost if it is found. 6592 Instruction *RedOp = RetI->getOperand(1) == LastChain 6593 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6594 : dyn_cast<Instruction>(RetI->getOperand(1)); 6595 6596 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6597 6598 Instruction *Op0, *Op1; 6599 if (RedOp && 6600 match(RedOp, 6601 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 6602 match(Op0, m_ZExtOrSExt(m_Value())) && 6603 Op0->getOpcode() == Op1->getOpcode() && 6604 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6605 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 6606 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 6607 6608 // Matched reduce(ext(mul(ext(A), ext(B))) 6609 // Note that the extend opcodes need to all match, or if A==B they will have 6610 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 6611 // which is equally fine. 6612 bool IsUnsigned = isa<ZExtInst>(Op0); 6613 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6614 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 6615 6616 InstructionCost ExtCost = 6617 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 6618 TTI::CastContextHint::None, CostKind, Op0); 6619 InstructionCost MulCost = 6620 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 6621 InstructionCost Ext2Cost = 6622 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 6623 TTI::CastContextHint::None, CostKind, RedOp); 6624 6625 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6626 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6627 CostKind); 6628 6629 if (RedCost.isValid() && 6630 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 6631 return I == RetI ? RedCost : 0; 6632 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 6633 !TheLoop->isLoopInvariant(RedOp)) { 6634 // Matched reduce(ext(A)) 6635 bool IsUnsigned = isa<ZExtInst>(RedOp); 6636 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6637 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6638 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6639 CostKind); 6640 6641 InstructionCost ExtCost = 6642 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6643 TTI::CastContextHint::None, CostKind, RedOp); 6644 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6645 return I == RetI ? RedCost : 0; 6646 } else if (RedOp && 6647 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 6648 if (match(Op0, m_ZExtOrSExt(m_Value())) && 6649 Op0->getOpcode() == Op1->getOpcode() && 6650 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 6651 bool IsUnsigned = isa<ZExtInst>(Op0); 6652 Type *Op0Ty = Op0->getOperand(0)->getType(); 6653 Type *Op1Ty = Op1->getOperand(0)->getType(); 6654 Type *LargestOpTy = 6655 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty 6656 : Op0Ty; 6657 auto *ExtType = VectorType::get(LargestOpTy, VectorTy); 6658 6659 // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of 6660 // different sizes. We take the largest type as the ext to reduce, and add 6661 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))). 6662 InstructionCost ExtCost0 = TTI.getCastInstrCost( 6663 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy), 6664 TTI::CastContextHint::None, CostKind, Op0); 6665 InstructionCost ExtCost1 = TTI.getCastInstrCost( 6666 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy), 6667 TTI::CastContextHint::None, CostKind, Op1); 6668 InstructionCost MulCost = 6669 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6670 6671 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6672 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6673 CostKind); 6674 InstructionCost ExtraExtCost = 0; 6675 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) { 6676 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1; 6677 ExtraExtCost = TTI.getCastInstrCost( 6678 ExtraExtOp->getOpcode(), ExtType, 6679 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy), 6680 TTI::CastContextHint::None, CostKind, ExtraExtOp); 6681 } 6682 6683 if (RedCost.isValid() && 6684 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost)) 6685 return I == RetI ? RedCost : 0; 6686 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 6687 // Matched reduce(mul()) 6688 InstructionCost MulCost = 6689 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6690 6691 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6692 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 6693 CostKind); 6694 6695 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 6696 return I == RetI ? RedCost : 0; 6697 } 6698 } 6699 6700 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 6701 } 6702 6703 InstructionCost 6704 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6705 ElementCount VF) { 6706 // Calculate scalar cost only. Vectorization cost should be ready at this 6707 // moment. 6708 if (VF.isScalar()) { 6709 Type *ValTy = getLoadStoreType(I); 6710 const Align Alignment = getLoadStoreAlignment(I); 6711 unsigned AS = getLoadStoreAddressSpace(I); 6712 6713 return TTI.getAddressComputationCost(ValTy) + 6714 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 6715 TTI::TCK_RecipThroughput, I); 6716 } 6717 return getWideningCost(I, VF); 6718 } 6719 6720 LoopVectorizationCostModel::VectorizationCostTy 6721 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6722 ElementCount VF) { 6723 // If we know that this instruction will remain uniform, check the cost of 6724 // the scalar version. 6725 if (isUniformAfterVectorization(I, VF)) 6726 VF = ElementCount::getFixed(1); 6727 6728 if (VF.isVector() && isProfitableToScalarize(I, VF)) 6729 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6730 6731 // Forced scalars do not have any scalarization overhead. 6732 auto ForcedScalar = ForcedScalars.find(VF); 6733 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 6734 auto InstSet = ForcedScalar->second; 6735 if (InstSet.count(I)) 6736 return VectorizationCostTy( 6737 (getInstructionCost(I, ElementCount::getFixed(1)).first * 6738 VF.getKnownMinValue()), 6739 false); 6740 } 6741 6742 Type *VectorTy; 6743 InstructionCost C = getInstructionCost(I, VF, VectorTy); 6744 6745 bool TypeNotScalarized = false; 6746 if (VF.isVector() && VectorTy->isVectorTy()) { 6747 unsigned NumParts = TTI.getNumberOfParts(VectorTy); 6748 if (NumParts) 6749 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 6750 else 6751 C = InstructionCost::getInvalid(); 6752 } 6753 return VectorizationCostTy(C, TypeNotScalarized); 6754 } 6755 6756 InstructionCost 6757 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6758 ElementCount VF) const { 6759 6760 // There is no mechanism yet to create a scalable scalarization loop, 6761 // so this is currently Invalid. 6762 if (VF.isScalable()) 6763 return InstructionCost::getInvalid(); 6764 6765 if (VF.isScalar()) 6766 return 0; 6767 6768 InstructionCost Cost = 0; 6769 Type *RetTy = ToVectorTy(I->getType(), VF); 6770 if (!RetTy->isVoidTy() && 6771 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 6772 Cost += TTI.getScalarizationOverhead( 6773 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 6774 false); 6775 6776 // Some targets keep addresses scalar. 6777 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 6778 return Cost; 6779 6780 // Some targets support efficient element stores. 6781 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 6782 return Cost; 6783 6784 // Collect operands to consider. 6785 CallInst *CI = dyn_cast<CallInst>(I); 6786 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 6787 6788 // Skip operands that do not require extraction/scalarization and do not incur 6789 // any overhead. 6790 SmallVector<Type *> Tys; 6791 for (auto *V : filterExtractingOperands(Ops, VF)) 6792 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 6793 return Cost + TTI.getOperandsScalarizationOverhead( 6794 filterExtractingOperands(Ops, VF), Tys); 6795 } 6796 6797 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 6798 if (VF.isScalar()) 6799 return; 6800 NumPredStores = 0; 6801 for (BasicBlock *BB : TheLoop->blocks()) { 6802 // For each instruction in the old loop. 6803 for (Instruction &I : *BB) { 6804 Value *Ptr = getLoadStorePointerOperand(&I); 6805 if (!Ptr) 6806 continue; 6807 6808 // TODO: We should generate better code and update the cost model for 6809 // predicated uniform stores. Today they are treated as any other 6810 // predicated store (see added test cases in 6811 // invariant-store-vectorization.ll). 6812 if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF)) 6813 NumPredStores++; 6814 6815 if (Legal->isUniformMemOp(I)) { 6816 // TODO: Avoid replicating loads and stores instead of 6817 // relying on instcombine to remove them. 6818 // Load: Scalar load + broadcast 6819 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 6820 InstructionCost Cost; 6821 if (isa<StoreInst>(&I) && VF.isScalable() && 6822 isLegalGatherOrScatter(&I, VF)) { 6823 Cost = getGatherScatterCost(&I, VF); 6824 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 6825 } else { 6826 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 6827 "Cannot yet scalarize uniform stores"); 6828 Cost = getUniformMemOpCost(&I, VF); 6829 setWideningDecision(&I, VF, CM_Scalarize, Cost); 6830 } 6831 continue; 6832 } 6833 6834 // We assume that widening is the best solution when possible. 6835 if (memoryInstructionCanBeWidened(&I, VF)) { 6836 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 6837 int ConsecutiveStride = Legal->isConsecutivePtr( 6838 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 6839 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6840 "Expected consecutive stride."); 6841 InstWidening Decision = 6842 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 6843 setWideningDecision(&I, VF, Decision, Cost); 6844 continue; 6845 } 6846 6847 // Choose between Interleaving, Gather/Scatter or Scalarization. 6848 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 6849 unsigned NumAccesses = 1; 6850 if (isAccessInterleaved(&I)) { 6851 auto Group = getInterleavedAccessGroup(&I); 6852 assert(Group && "Fail to get an interleaved access group."); 6853 6854 // Make one decision for the whole group. 6855 if (getWideningDecision(&I, VF) != CM_Unknown) 6856 continue; 6857 6858 NumAccesses = Group->getNumMembers(); 6859 if (interleavedAccessCanBeWidened(&I, VF)) 6860 InterleaveCost = getInterleaveGroupCost(&I, VF); 6861 } 6862 6863 InstructionCost GatherScatterCost = 6864 isLegalGatherOrScatter(&I, VF) 6865 ? getGatherScatterCost(&I, VF) * NumAccesses 6866 : InstructionCost::getInvalid(); 6867 6868 InstructionCost ScalarizationCost = 6869 getMemInstScalarizationCost(&I, VF) * NumAccesses; 6870 6871 // Choose better solution for the current VF, 6872 // write down this decision and use it during vectorization. 6873 InstructionCost Cost; 6874 InstWidening Decision; 6875 if (InterleaveCost <= GatherScatterCost && 6876 InterleaveCost < ScalarizationCost) { 6877 Decision = CM_Interleave; 6878 Cost = InterleaveCost; 6879 } else if (GatherScatterCost < ScalarizationCost) { 6880 Decision = CM_GatherScatter; 6881 Cost = GatherScatterCost; 6882 } else { 6883 Decision = CM_Scalarize; 6884 Cost = ScalarizationCost; 6885 } 6886 // If the instructions belongs to an interleave group, the whole group 6887 // receives the same decision. The whole group receives the cost, but 6888 // the cost will actually be assigned to one instruction. 6889 if (auto Group = getInterleavedAccessGroup(&I)) 6890 setWideningDecision(Group, VF, Decision, Cost); 6891 else 6892 setWideningDecision(&I, VF, Decision, Cost); 6893 } 6894 } 6895 6896 // Make sure that any load of address and any other address computation 6897 // remains scalar unless there is gather/scatter support. This avoids 6898 // inevitable extracts into address registers, and also has the benefit of 6899 // activating LSR more, since that pass can't optimize vectorized 6900 // addresses. 6901 if (TTI.prefersVectorizedAddressing()) 6902 return; 6903 6904 // Start with all scalar pointer uses. 6905 SmallPtrSet<Instruction *, 8> AddrDefs; 6906 for (BasicBlock *BB : TheLoop->blocks()) 6907 for (Instruction &I : *BB) { 6908 Instruction *PtrDef = 6909 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 6910 if (PtrDef && TheLoop->contains(PtrDef) && 6911 getWideningDecision(&I, VF) != CM_GatherScatter) 6912 AddrDefs.insert(PtrDef); 6913 } 6914 6915 // Add all instructions used to generate the addresses. 6916 SmallVector<Instruction *, 4> Worklist; 6917 append_range(Worklist, AddrDefs); 6918 while (!Worklist.empty()) { 6919 Instruction *I = Worklist.pop_back_val(); 6920 for (auto &Op : I->operands()) 6921 if (auto *InstOp = dyn_cast<Instruction>(Op)) 6922 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 6923 AddrDefs.insert(InstOp).second) 6924 Worklist.push_back(InstOp); 6925 } 6926 6927 for (auto *I : AddrDefs) { 6928 if (isa<LoadInst>(I)) { 6929 // Setting the desired widening decision should ideally be handled in 6930 // by cost functions, but since this involves the task of finding out 6931 // if the loaded register is involved in an address computation, it is 6932 // instead changed here when we know this is the case. 6933 InstWidening Decision = getWideningDecision(I, VF); 6934 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 6935 // Scalarize a widened load of address. 6936 setWideningDecision( 6937 I, VF, CM_Scalarize, 6938 (VF.getKnownMinValue() * 6939 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 6940 else if (auto Group = getInterleavedAccessGroup(I)) { 6941 // Scalarize an interleave group of address loads. 6942 for (unsigned I = 0; I < Group->getFactor(); ++I) { 6943 if (Instruction *Member = Group->getMember(I)) 6944 setWideningDecision( 6945 Member, VF, CM_Scalarize, 6946 (VF.getKnownMinValue() * 6947 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 6948 } 6949 } 6950 } else 6951 // Make sure I gets scalarized and a cost estimate without 6952 // scalarization overhead. 6953 ForcedScalars[VF].insert(I); 6954 } 6955 } 6956 6957 InstructionCost 6958 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 6959 Type *&VectorTy) { 6960 Type *RetTy = I->getType(); 6961 if (canTruncateToMinimalBitwidth(I, VF)) 6962 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6963 auto SE = PSE.getSE(); 6964 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6965 6966 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 6967 ElementCount VF) -> bool { 6968 if (VF.isScalar()) 6969 return true; 6970 6971 auto Scalarized = InstsToScalarize.find(VF); 6972 assert(Scalarized != InstsToScalarize.end() && 6973 "VF not yet analyzed for scalarization profitability"); 6974 return !Scalarized->second.count(I) && 6975 llvm::all_of(I->users(), [&](User *U) { 6976 auto *UI = cast<Instruction>(U); 6977 return !Scalarized->second.count(UI); 6978 }); 6979 }; 6980 (void) hasSingleCopyAfterVectorization; 6981 6982 if (isScalarAfterVectorization(I, VF)) { 6983 // With the exception of GEPs and PHIs, after scalarization there should 6984 // only be one copy of the instruction generated in the loop. This is 6985 // because the VF is either 1, or any instructions that need scalarizing 6986 // have already been dealt with by the the time we get here. As a result, 6987 // it means we don't have to multiply the instruction cost by VF. 6988 assert(I->getOpcode() == Instruction::GetElementPtr || 6989 I->getOpcode() == Instruction::PHI || 6990 (I->getOpcode() == Instruction::BitCast && 6991 I->getType()->isPointerTy()) || 6992 hasSingleCopyAfterVectorization(I, VF)); 6993 VectorTy = RetTy; 6994 } else 6995 VectorTy = ToVectorTy(RetTy, VF); 6996 6997 // TODO: We need to estimate the cost of intrinsic calls. 6998 switch (I->getOpcode()) { 6999 case Instruction::GetElementPtr: 7000 // We mark this instruction as zero-cost because the cost of GEPs in 7001 // vectorized code depends on whether the corresponding memory instruction 7002 // is scalarized or not. Therefore, we handle GEPs with the memory 7003 // instruction cost. 7004 return 0; 7005 case Instruction::Br: { 7006 // In cases of scalarized and predicated instructions, there will be VF 7007 // predicated blocks in the vectorized loop. Each branch around these 7008 // blocks requires also an extract of its vector compare i1 element. 7009 bool ScalarPredicatedBB = false; 7010 BranchInst *BI = cast<BranchInst>(I); 7011 if (VF.isVector() && BI->isConditional() && 7012 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7013 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7014 ScalarPredicatedBB = true; 7015 7016 if (ScalarPredicatedBB) { 7017 // Not possible to scalarize scalable vector with predicated instructions. 7018 if (VF.isScalable()) 7019 return InstructionCost::getInvalid(); 7020 // Return cost for branches around scalarized and predicated blocks. 7021 auto *Vec_i1Ty = 7022 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7023 return ( 7024 TTI.getScalarizationOverhead( 7025 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7026 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7027 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7028 // The back-edge branch will remain, as will all scalar branches. 7029 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7030 else 7031 // This branch will be eliminated by if-conversion. 7032 return 0; 7033 // Note: We currently assume zero cost for an unconditional branch inside 7034 // a predicated block since it will become a fall-through, although we 7035 // may decide in the future to call TTI for all branches. 7036 } 7037 case Instruction::PHI: { 7038 auto *Phi = cast<PHINode>(I); 7039 7040 // First-order recurrences are replaced by vector shuffles inside the loop. 7041 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7042 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7043 return TTI.getShuffleCost( 7044 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7045 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7046 7047 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7048 // converted into select instructions. We require N - 1 selects per phi 7049 // node, where N is the number of incoming values. 7050 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7051 return (Phi->getNumIncomingValues() - 1) * 7052 TTI.getCmpSelInstrCost( 7053 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7054 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7055 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7056 7057 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7058 } 7059 case Instruction::UDiv: 7060 case Instruction::SDiv: 7061 case Instruction::URem: 7062 case Instruction::SRem: 7063 // If we have a predicated instruction, it may not be executed for each 7064 // vector lane. Get the scalarization cost and scale this amount by the 7065 // probability of executing the predicated block. If the instruction is not 7066 // predicated, we fall through to the next case. 7067 if (VF.isVector() && isScalarWithPredication(I, VF)) { 7068 InstructionCost Cost = 0; 7069 7070 // These instructions have a non-void type, so account for the phi nodes 7071 // that we will create. This cost is likely to be zero. The phi node 7072 // cost, if any, should be scaled by the block probability because it 7073 // models a copy at the end of each predicated block. 7074 Cost += VF.getKnownMinValue() * 7075 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7076 7077 // The cost of the non-predicated instruction. 7078 Cost += VF.getKnownMinValue() * 7079 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7080 7081 // The cost of insertelement and extractelement instructions needed for 7082 // scalarization. 7083 Cost += getScalarizationOverhead(I, VF); 7084 7085 // Scale the cost by the probability of executing the predicated blocks. 7086 // This assumes the predicated block for each vector lane is equally 7087 // likely. 7088 return Cost / getReciprocalPredBlockProb(); 7089 } 7090 LLVM_FALLTHROUGH; 7091 case Instruction::Add: 7092 case Instruction::FAdd: 7093 case Instruction::Sub: 7094 case Instruction::FSub: 7095 case Instruction::Mul: 7096 case Instruction::FMul: 7097 case Instruction::FDiv: 7098 case Instruction::FRem: 7099 case Instruction::Shl: 7100 case Instruction::LShr: 7101 case Instruction::AShr: 7102 case Instruction::And: 7103 case Instruction::Or: 7104 case Instruction::Xor: { 7105 // Since we will replace the stride by 1 the multiplication should go away. 7106 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7107 return 0; 7108 7109 // Detect reduction patterns 7110 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7111 return *RedCost; 7112 7113 // Certain instructions can be cheaper to vectorize if they have a constant 7114 // second vector operand. One example of this are shifts on x86. 7115 Value *Op2 = I->getOperand(1); 7116 TargetTransformInfo::OperandValueProperties Op2VP; 7117 TargetTransformInfo::OperandValueKind Op2VK = 7118 TTI.getOperandInfo(Op2, Op2VP); 7119 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7120 Op2VK = TargetTransformInfo::OK_UniformValue; 7121 7122 SmallVector<const Value *, 4> Operands(I->operand_values()); 7123 return TTI.getArithmeticInstrCost( 7124 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7125 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7126 } 7127 case Instruction::FNeg: { 7128 return TTI.getArithmeticInstrCost( 7129 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7130 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7131 TargetTransformInfo::OP_None, I->getOperand(0), I); 7132 } 7133 case Instruction::Select: { 7134 SelectInst *SI = cast<SelectInst>(I); 7135 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7136 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7137 7138 const Value *Op0, *Op1; 7139 using namespace llvm::PatternMatch; 7140 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7141 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7142 // select x, y, false --> x & y 7143 // select x, true, y --> x | y 7144 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7145 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7146 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7147 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7148 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7149 Op1->getType()->getScalarSizeInBits() == 1); 7150 7151 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7152 return TTI.getArithmeticInstrCost( 7153 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7154 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7155 } 7156 7157 Type *CondTy = SI->getCondition()->getType(); 7158 if (!ScalarCond) 7159 CondTy = VectorType::get(CondTy, VF); 7160 7161 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 7162 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition())) 7163 Pred = Cmp->getPredicate(); 7164 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred, 7165 CostKind, I); 7166 } 7167 case Instruction::ICmp: 7168 case Instruction::FCmp: { 7169 Type *ValTy = I->getOperand(0)->getType(); 7170 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7171 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7172 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7173 VectorTy = ToVectorTy(ValTy, VF); 7174 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7175 cast<CmpInst>(I)->getPredicate(), CostKind, 7176 I); 7177 } 7178 case Instruction::Store: 7179 case Instruction::Load: { 7180 ElementCount Width = VF; 7181 if (Width.isVector()) { 7182 InstWidening Decision = getWideningDecision(I, Width); 7183 assert(Decision != CM_Unknown && 7184 "CM decision should be taken at this point"); 7185 if (Decision == CM_Scalarize) 7186 Width = ElementCount::getFixed(1); 7187 } 7188 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7189 return getMemoryInstructionCost(I, VF); 7190 } 7191 case Instruction::BitCast: 7192 if (I->getType()->isPointerTy()) 7193 return 0; 7194 LLVM_FALLTHROUGH; 7195 case Instruction::ZExt: 7196 case Instruction::SExt: 7197 case Instruction::FPToUI: 7198 case Instruction::FPToSI: 7199 case Instruction::FPExt: 7200 case Instruction::PtrToInt: 7201 case Instruction::IntToPtr: 7202 case Instruction::SIToFP: 7203 case Instruction::UIToFP: 7204 case Instruction::Trunc: 7205 case Instruction::FPTrunc: { 7206 // Computes the CastContextHint from a Load/Store instruction. 7207 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7208 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7209 "Expected a load or a store!"); 7210 7211 if (VF.isScalar() || !TheLoop->contains(I)) 7212 return TTI::CastContextHint::Normal; 7213 7214 switch (getWideningDecision(I, VF)) { 7215 case LoopVectorizationCostModel::CM_GatherScatter: 7216 return TTI::CastContextHint::GatherScatter; 7217 case LoopVectorizationCostModel::CM_Interleave: 7218 return TTI::CastContextHint::Interleave; 7219 case LoopVectorizationCostModel::CM_Scalarize: 7220 case LoopVectorizationCostModel::CM_Widen: 7221 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7222 : TTI::CastContextHint::Normal; 7223 case LoopVectorizationCostModel::CM_Widen_Reverse: 7224 return TTI::CastContextHint::Reversed; 7225 case LoopVectorizationCostModel::CM_Unknown: 7226 llvm_unreachable("Instr did not go through cost modelling?"); 7227 } 7228 7229 llvm_unreachable("Unhandled case!"); 7230 }; 7231 7232 unsigned Opcode = I->getOpcode(); 7233 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7234 // For Trunc, the context is the only user, which must be a StoreInst. 7235 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7236 if (I->hasOneUse()) 7237 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7238 CCH = ComputeCCH(Store); 7239 } 7240 // For Z/Sext, the context is the operand, which must be a LoadInst. 7241 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7242 Opcode == Instruction::FPExt) { 7243 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7244 CCH = ComputeCCH(Load); 7245 } 7246 7247 // We optimize the truncation of induction variables having constant 7248 // integer steps. The cost of these truncations is the same as the scalar 7249 // operation. 7250 if (isOptimizableIVTruncate(I, VF)) { 7251 auto *Trunc = cast<TruncInst>(I); 7252 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7253 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7254 } 7255 7256 // Detect reduction patterns 7257 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7258 return *RedCost; 7259 7260 Type *SrcScalarTy = I->getOperand(0)->getType(); 7261 Type *SrcVecTy = 7262 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7263 if (canTruncateToMinimalBitwidth(I, VF)) { 7264 // This cast is going to be shrunk. This may remove the cast or it might 7265 // turn it into slightly different cast. For example, if MinBW == 16, 7266 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7267 // 7268 // Calculate the modified src and dest types. 7269 Type *MinVecTy = VectorTy; 7270 if (Opcode == Instruction::Trunc) { 7271 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7272 VectorTy = 7273 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7274 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7275 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7276 VectorTy = 7277 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7278 } 7279 } 7280 7281 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7282 } 7283 case Instruction::Call: { 7284 if (RecurrenceDescriptor::isFMulAddIntrinsic(I)) 7285 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7286 return *RedCost; 7287 bool NeedToScalarize; 7288 CallInst *CI = cast<CallInst>(I); 7289 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7290 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7291 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7292 return std::min(CallCost, IntrinsicCost); 7293 } 7294 return CallCost; 7295 } 7296 case Instruction::ExtractValue: 7297 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7298 case Instruction::Alloca: 7299 // We cannot easily widen alloca to a scalable alloca, as 7300 // the result would need to be a vector of pointers. 7301 if (VF.isScalable()) 7302 return InstructionCost::getInvalid(); 7303 LLVM_FALLTHROUGH; 7304 default: 7305 // This opcode is unknown. Assume that it is the same as 'mul'. 7306 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7307 } // end of switch. 7308 } 7309 7310 char LoopVectorize::ID = 0; 7311 7312 static const char lv_name[] = "Loop Vectorization"; 7313 7314 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7315 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7316 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7317 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7318 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7319 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7320 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7321 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7322 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7323 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7324 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7325 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7326 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7327 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7328 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7329 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7330 7331 namespace llvm { 7332 7333 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7334 7335 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7336 bool VectorizeOnlyWhenForced) { 7337 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7338 } 7339 7340 } // end namespace llvm 7341 7342 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7343 // Check if the pointer operand of a load or store instruction is 7344 // consecutive. 7345 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7346 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 7347 return false; 7348 } 7349 7350 void LoopVectorizationCostModel::collectValuesToIgnore() { 7351 // Ignore ephemeral values. 7352 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7353 7354 // Ignore type-promoting instructions we identified during reduction 7355 // detection. 7356 for (auto &Reduction : Legal->getReductionVars()) { 7357 const RecurrenceDescriptor &RedDes = Reduction.second; 7358 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7359 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7360 } 7361 // Ignore type-casting instructions we identified during induction 7362 // detection. 7363 for (auto &Induction : Legal->getInductionVars()) { 7364 const InductionDescriptor &IndDes = Induction.second; 7365 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7366 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7367 } 7368 } 7369 7370 void LoopVectorizationCostModel::collectInLoopReductions() { 7371 for (auto &Reduction : Legal->getReductionVars()) { 7372 PHINode *Phi = Reduction.first; 7373 const RecurrenceDescriptor &RdxDesc = Reduction.second; 7374 7375 // We don't collect reductions that are type promoted (yet). 7376 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7377 continue; 7378 7379 // If the target would prefer this reduction to happen "in-loop", then we 7380 // want to record it as such. 7381 unsigned Opcode = RdxDesc.getOpcode(); 7382 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7383 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7384 TargetTransformInfo::ReductionFlags())) 7385 continue; 7386 7387 // Check that we can correctly put the reductions into the loop, by 7388 // finding the chain of operations that leads from the phi to the loop 7389 // exit value. 7390 SmallVector<Instruction *, 4> ReductionOperations = 7391 RdxDesc.getReductionOpChain(Phi, TheLoop); 7392 bool InLoop = !ReductionOperations.empty(); 7393 if (InLoop) { 7394 InLoopReductionChains[Phi] = ReductionOperations; 7395 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7396 Instruction *LastChain = Phi; 7397 for (auto *I : ReductionOperations) { 7398 InLoopReductionImmediateChains[I] = LastChain; 7399 LastChain = I; 7400 } 7401 } 7402 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7403 << " reduction for phi: " << *Phi << "\n"); 7404 } 7405 } 7406 7407 // TODO: we could return a pair of values that specify the max VF and 7408 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7409 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7410 // doesn't have a cost model that can choose which plan to execute if 7411 // more than one is generated. 7412 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7413 LoopVectorizationCostModel &CM) { 7414 unsigned WidestType; 7415 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7416 return WidestVectorRegBits / WidestType; 7417 } 7418 7419 VectorizationFactor 7420 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7421 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7422 ElementCount VF = UserVF; 7423 // Outer loop handling: They may require CFG and instruction level 7424 // transformations before even evaluating whether vectorization is profitable. 7425 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7426 // the vectorization pipeline. 7427 if (!OrigLoop->isInnermost()) { 7428 // If the user doesn't provide a vectorization factor, determine a 7429 // reasonable one. 7430 if (UserVF.isZero()) { 7431 VF = ElementCount::getFixed(determineVPlanVF( 7432 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7433 .getFixedSize(), 7434 CM)); 7435 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7436 7437 // Make sure we have a VF > 1 for stress testing. 7438 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7439 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7440 << "overriding computed VF.\n"); 7441 VF = ElementCount::getFixed(4); 7442 } 7443 } 7444 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7445 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7446 "VF needs to be a power of two"); 7447 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7448 << "VF " << VF << " to build VPlans.\n"); 7449 buildVPlans(VF, VF); 7450 7451 // For VPlan build stress testing, we bail out after VPlan construction. 7452 if (VPlanBuildStressTest) 7453 return VectorizationFactor::Disabled(); 7454 7455 return {VF, 0 /*Cost*/}; 7456 } 7457 7458 LLVM_DEBUG( 7459 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7460 "VPlan-native path.\n"); 7461 return VectorizationFactor::Disabled(); 7462 } 7463 7464 Optional<VectorizationFactor> 7465 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7466 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7467 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 7468 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 7469 return None; 7470 7471 // Invalidate interleave groups if all blocks of loop will be predicated. 7472 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 7473 !useMaskedInterleavedAccesses(*TTI)) { 7474 LLVM_DEBUG( 7475 dbgs() 7476 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7477 "which requires masked-interleaved support.\n"); 7478 if (CM.InterleaveInfo.invalidateGroups()) 7479 // Invalidating interleave groups also requires invalidating all decisions 7480 // based on them, which includes widening decisions and uniform and scalar 7481 // values. 7482 CM.invalidateCostModelingDecisions(); 7483 } 7484 7485 ElementCount MaxUserVF = 7486 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 7487 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 7488 if (!UserVF.isZero() && UserVFIsLegal) { 7489 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 7490 "VF needs to be a power of two"); 7491 // Collect the instructions (and their associated costs) that will be more 7492 // profitable to scalarize. 7493 if (CM.selectUserVectorizationFactor(UserVF)) { 7494 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7495 CM.collectInLoopReductions(); 7496 buildVPlansWithVPRecipes(UserVF, UserVF); 7497 LLVM_DEBUG(printPlans(dbgs())); 7498 return {{UserVF, 0}}; 7499 } else 7500 reportVectorizationInfo("UserVF ignored because of invalid costs.", 7501 "InvalidCost", ORE, OrigLoop); 7502 } 7503 7504 // Populate the set of Vectorization Factor Candidates. 7505 ElementCountSet VFCandidates; 7506 for (auto VF = ElementCount::getFixed(1); 7507 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 7508 VFCandidates.insert(VF); 7509 for (auto VF = ElementCount::getScalable(1); 7510 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 7511 VFCandidates.insert(VF); 7512 7513 for (const auto &VF : VFCandidates) { 7514 // Collect Uniform and Scalar instructions after vectorization with VF. 7515 CM.collectUniformsAndScalars(VF); 7516 7517 // Collect the instructions (and their associated costs) that will be more 7518 // profitable to scalarize. 7519 if (VF.isVector()) 7520 CM.collectInstsToScalarize(VF); 7521 } 7522 7523 CM.collectInLoopReductions(); 7524 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 7525 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 7526 7527 LLVM_DEBUG(printPlans(dbgs())); 7528 if (!MaxFactors.hasVector()) 7529 return VectorizationFactor::Disabled(); 7530 7531 // Select the optimal vectorization factor. 7532 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 7533 7534 // Check if it is profitable to vectorize with runtime checks. 7535 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 7536 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 7537 bool PragmaThresholdReached = 7538 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 7539 bool ThresholdReached = 7540 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 7541 if ((ThresholdReached && !Hints.allowReordering()) || 7542 PragmaThresholdReached) { 7543 ORE->emit([&]() { 7544 return OptimizationRemarkAnalysisAliasing( 7545 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 7546 OrigLoop->getHeader()) 7547 << "loop not vectorized: cannot prove it is safe to reorder " 7548 "memory operations"; 7549 }); 7550 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 7551 Hints.emitRemarkWithHints(); 7552 return VectorizationFactor::Disabled(); 7553 } 7554 } 7555 return SelectedVF; 7556 } 7557 7558 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 7559 assert(count_if(VPlans, 7560 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 7561 1 && 7562 "Best VF has not a single VPlan."); 7563 7564 for (const VPlanPtr &Plan : VPlans) { 7565 if (Plan->hasVF(VF)) 7566 return *Plan.get(); 7567 } 7568 llvm_unreachable("No plan found!"); 7569 } 7570 7571 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7572 SmallVector<Metadata *, 4> MDs; 7573 // Reserve first location for self reference to the LoopID metadata node. 7574 MDs.push_back(nullptr); 7575 bool IsUnrollMetadata = false; 7576 MDNode *LoopID = L->getLoopID(); 7577 if (LoopID) { 7578 // First find existing loop unrolling disable metadata. 7579 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7580 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7581 if (MD) { 7582 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7583 IsUnrollMetadata = 7584 S && S->getString().startswith("llvm.loop.unroll.disable"); 7585 } 7586 MDs.push_back(LoopID->getOperand(i)); 7587 } 7588 } 7589 7590 if (!IsUnrollMetadata) { 7591 // Add runtime unroll disable metadata. 7592 LLVMContext &Context = L->getHeader()->getContext(); 7593 SmallVector<Metadata *, 1> DisableOperands; 7594 DisableOperands.push_back( 7595 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7596 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7597 MDs.push_back(DisableNode); 7598 MDNode *NewLoopID = MDNode::get(Context, MDs); 7599 // Set operand 0 to refer to the loop id itself. 7600 NewLoopID->replaceOperandWith(0, NewLoopID); 7601 L->setLoopID(NewLoopID); 7602 } 7603 } 7604 7605 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 7606 VPlan &BestVPlan, 7607 InnerLoopVectorizer &ILV, 7608 DominatorTree *DT) { 7609 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 7610 << '\n'); 7611 7612 // Perform the actual loop transformation. 7613 7614 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7615 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 7616 Value *CanonicalIVStartValue; 7617 std::tie(State.CFG.PrevBB, CanonicalIVStartValue) = 7618 ILV.createVectorizedLoopSkeleton(); 7619 ILV.collectPoisonGeneratingRecipes(State); 7620 7621 ILV.printDebugTracesAtStart(); 7622 7623 //===------------------------------------------------===// 7624 // 7625 // Notice: any optimization or new instruction that go 7626 // into the code below should also be implemented in 7627 // the cost-model. 7628 // 7629 //===------------------------------------------------===// 7630 7631 // 2. Copy and widen instructions from the old loop into the new loop. 7632 BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr), 7633 ILV.getOrCreateVectorTripCount(nullptr), 7634 CanonicalIVStartValue, State); 7635 BestVPlan.execute(&State); 7636 7637 // Keep all loop hints from the original loop on the vector loop (we'll 7638 // replace the vectorizer-specific hints below). 7639 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7640 7641 Optional<MDNode *> VectorizedLoopID = 7642 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7643 LLVMLoopVectorizeFollowupVectorized}); 7644 7645 Loop *L = LI->getLoopFor(State.CFG.PrevBB); 7646 if (VectorizedLoopID.hasValue()) 7647 L->setLoopID(VectorizedLoopID.getValue()); 7648 else { 7649 // Keep all loop hints from the original loop on the vector loop (we'll 7650 // replace the vectorizer-specific hints below). 7651 if (MDNode *LID = OrigLoop->getLoopID()) 7652 L->setLoopID(LID); 7653 7654 LoopVectorizeHints Hints(L, true, *ORE); 7655 Hints.setAlreadyVectorized(); 7656 } 7657 // Disable runtime unrolling when vectorizing the epilogue loop. 7658 if (CanonicalIVStartValue) 7659 AddRuntimeUnrollDisableMetaData(L); 7660 7661 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7662 // predication, updating analyses. 7663 ILV.fixVectorizedLoop(State); 7664 7665 ILV.printDebugTracesAtEnd(); 7666 } 7667 7668 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 7669 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 7670 for (const auto &Plan : VPlans) 7671 if (PrintVPlansInDotFormat) 7672 Plan->printDOT(O); 7673 else 7674 Plan->print(O); 7675 } 7676 #endif 7677 7678 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7679 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7680 7681 // We create new control-flow for the vectorized loop, so the original exit 7682 // conditions will be dead after vectorization if it's only used by the 7683 // terminator 7684 SmallVector<BasicBlock*> ExitingBlocks; 7685 OrigLoop->getExitingBlocks(ExitingBlocks); 7686 for (auto *BB : ExitingBlocks) { 7687 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 7688 if (!Cmp || !Cmp->hasOneUse()) 7689 continue; 7690 7691 // TODO: we should introduce a getUniqueExitingBlocks on Loop 7692 if (!DeadInstructions.insert(Cmp).second) 7693 continue; 7694 7695 // The operands of the icmp is often a dead trunc, used by IndUpdate. 7696 // TODO: can recurse through operands in general 7697 for (Value *Op : Cmp->operands()) { 7698 if (isa<TruncInst>(Op) && Op->hasOneUse()) 7699 DeadInstructions.insert(cast<Instruction>(Op)); 7700 } 7701 } 7702 7703 // We create new "steps" for induction variable updates to which the original 7704 // induction variables map. An original update instruction will be dead if 7705 // all its users except the induction variable are dead. 7706 auto *Latch = OrigLoop->getLoopLatch(); 7707 for (auto &Induction : Legal->getInductionVars()) { 7708 PHINode *Ind = Induction.first; 7709 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7710 7711 // If the tail is to be folded by masking, the primary induction variable, 7712 // if exists, isn't dead: it will be used for masking. Don't kill it. 7713 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 7714 continue; 7715 7716 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7717 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7718 })) 7719 DeadInstructions.insert(IndUpdate); 7720 } 7721 } 7722 7723 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7724 7725 //===--------------------------------------------------------------------===// 7726 // EpilogueVectorizerMainLoop 7727 //===--------------------------------------------------------------------===// 7728 7729 /// This function is partially responsible for generating the control flow 7730 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7731 std::pair<BasicBlock *, Value *> 7732 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 7733 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7734 Loop *Lp = createVectorLoopSkeleton(""); 7735 7736 // Generate the code to check the minimum iteration count of the vector 7737 // epilogue (see below). 7738 EPI.EpilogueIterationCountCheck = 7739 emitMinimumIterationCountCheck(LoopScalarPreHeader, true); 7740 EPI.EpilogueIterationCountCheck->setName("iter.check"); 7741 7742 // Generate the code to check any assumptions that we've made for SCEV 7743 // expressions. 7744 EPI.SCEVSafetyCheck = emitSCEVChecks(LoopScalarPreHeader); 7745 7746 // Generate the code that checks at runtime if arrays overlap. We put the 7747 // checks into a separate block to make the more common case of few elements 7748 // faster. 7749 EPI.MemSafetyCheck = emitMemRuntimeChecks(LoopScalarPreHeader); 7750 7751 // Generate the iteration count check for the main loop, *after* the check 7752 // for the epilogue loop, so that the path-length is shorter for the case 7753 // that goes directly through the vector epilogue. The longer-path length for 7754 // the main loop is compensated for, by the gain from vectorizing the larger 7755 // trip count. Note: the branch will get updated later on when we vectorize 7756 // the epilogue. 7757 EPI.MainLoopIterationCountCheck = 7758 emitMinimumIterationCountCheck(LoopScalarPreHeader, false); 7759 7760 // Generate the induction variable. 7761 Value *CountRoundDown = getOrCreateVectorTripCount(LoopVectorPreHeader); 7762 EPI.VectorTripCount = CountRoundDown; 7763 createHeaderBranch(Lp); 7764 7765 // Skip induction resume value creation here because they will be created in 7766 // the second pass. If we created them here, they wouldn't be used anyway, 7767 // because the vplan in the second pass still contains the inductions from the 7768 // original loop. 7769 7770 return {completeLoopSkeleton(OrigLoopID), nullptr}; 7771 } 7772 7773 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 7774 LLVM_DEBUG({ 7775 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 7776 << "Main Loop VF:" << EPI.MainLoopVF 7777 << ", Main Loop UF:" << EPI.MainLoopUF 7778 << ", Epilogue Loop VF:" << EPI.EpilogueVF 7779 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7780 }); 7781 } 7782 7783 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 7784 DEBUG_WITH_TYPE(VerboseDebug, { 7785 dbgs() << "intermediate fn:\n" 7786 << *OrigLoop->getHeader()->getParent() << "\n"; 7787 }); 7788 } 7789 7790 BasicBlock * 7791 EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(BasicBlock *Bypass, 7792 bool ForEpilogue) { 7793 assert(Bypass && "Expected valid bypass basic block."); 7794 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 7795 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 7796 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 7797 // Reuse existing vector loop preheader for TC checks. 7798 // Note that new preheader block is generated for vector loop. 7799 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 7800 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 7801 7802 // Generate code to check if the loop's trip count is less than VF * UF of the 7803 // main vector loop. 7804 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 7805 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7806 7807 Value *CheckMinIters = Builder.CreateICmp( 7808 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 7809 "min.iters.check"); 7810 7811 if (!ForEpilogue) 7812 TCCheckBlock->setName("vector.main.loop.iter.check"); 7813 7814 // Create new preheader for vector loop. 7815 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 7816 DT, LI, nullptr, "vector.ph"); 7817 7818 if (ForEpilogue) { 7819 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 7820 DT->getNode(Bypass)->getIDom()) && 7821 "TC check is expected to dominate Bypass"); 7822 7823 // Update dominator for Bypass & LoopExit. 7824 DT->changeImmediateDominator(Bypass, TCCheckBlock); 7825 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7826 // For loops with multiple exits, there's no edge from the middle block 7827 // to exit blocks (as the epilogue must run) and thus no need to update 7828 // the immediate dominator of the exit blocks. 7829 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 7830 7831 LoopBypassBlocks.push_back(TCCheckBlock); 7832 7833 // Save the trip count so we don't have to regenerate it in the 7834 // vec.epilog.iter.check. This is safe to do because the trip count 7835 // generated here dominates the vector epilog iter check. 7836 EPI.TripCount = Count; 7837 } 7838 7839 ReplaceInstWithInst( 7840 TCCheckBlock->getTerminator(), 7841 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7842 7843 return TCCheckBlock; 7844 } 7845 7846 //===--------------------------------------------------------------------===// 7847 // EpilogueVectorizerEpilogueLoop 7848 //===--------------------------------------------------------------------===// 7849 7850 /// This function is partially responsible for generating the control flow 7851 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7852 std::pair<BasicBlock *, Value *> 7853 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 7854 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7855 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 7856 7857 // Now, compare the remaining count and if there aren't enough iterations to 7858 // execute the vectorized epilogue skip to the scalar part. 7859 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 7860 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 7861 LoopVectorPreHeader = 7862 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 7863 LI, nullptr, "vec.epilog.ph"); 7864 emitMinimumVectorEpilogueIterCountCheck(LoopScalarPreHeader, 7865 VecEpilogueIterationCountCheck); 7866 7867 // Adjust the control flow taking the state info from the main loop 7868 // vectorization into account. 7869 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 7870 "expected this to be saved from the previous pass."); 7871 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 7872 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 7873 7874 DT->changeImmediateDominator(LoopVectorPreHeader, 7875 EPI.MainLoopIterationCountCheck); 7876 7877 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 7878 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7879 7880 if (EPI.SCEVSafetyCheck) 7881 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 7882 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7883 if (EPI.MemSafetyCheck) 7884 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 7885 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7886 7887 DT->changeImmediateDominator( 7888 VecEpilogueIterationCountCheck, 7889 VecEpilogueIterationCountCheck->getSinglePredecessor()); 7890 7891 DT->changeImmediateDominator(LoopScalarPreHeader, 7892 EPI.EpilogueIterationCountCheck); 7893 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7894 // If there is an epilogue which must run, there's no edge from the 7895 // middle block to exit blocks and thus no need to update the immediate 7896 // dominator of the exit blocks. 7897 DT->changeImmediateDominator(LoopExitBlock, 7898 EPI.EpilogueIterationCountCheck); 7899 7900 // Keep track of bypass blocks, as they feed start values to the induction 7901 // phis in the scalar loop preheader. 7902 if (EPI.SCEVSafetyCheck) 7903 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 7904 if (EPI.MemSafetyCheck) 7905 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 7906 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 7907 7908 // The vec.epilog.iter.check block may contain Phi nodes from reductions which 7909 // merge control-flow from the latch block and the middle block. Update the 7910 // incoming values here and move the Phi into the preheader. 7911 SmallVector<PHINode *, 4> PhisInBlock; 7912 for (PHINode &Phi : VecEpilogueIterationCountCheck->phis()) 7913 PhisInBlock.push_back(&Phi); 7914 7915 for (PHINode *Phi : PhisInBlock) { 7916 Phi->replaceIncomingBlockWith( 7917 VecEpilogueIterationCountCheck->getSinglePredecessor(), 7918 VecEpilogueIterationCountCheck); 7919 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck); 7920 if (EPI.SCEVSafetyCheck) 7921 Phi->removeIncomingValue(EPI.SCEVSafetyCheck); 7922 if (EPI.MemSafetyCheck) 7923 Phi->removeIncomingValue(EPI.MemSafetyCheck); 7924 Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI()); 7925 } 7926 7927 // Generate a resume induction for the vector epilogue and put it in the 7928 // vector epilogue preheader 7929 Type *IdxTy = Legal->getWidestInductionType(); 7930 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 7931 LoopVectorPreHeader->getFirstNonPHI()); 7932 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 7933 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 7934 EPI.MainLoopIterationCountCheck); 7935 7936 // Generate the induction variable. 7937 createHeaderBranch(Lp); 7938 7939 // Generate induction resume values. These variables save the new starting 7940 // indexes for the scalar loop. They are used to test if there are any tail 7941 // iterations left once the vector loop has completed. 7942 // Note that when the vectorized epilogue is skipped due to iteration count 7943 // check, then the resume value for the induction variable comes from 7944 // the trip count of the main vector loop, hence passing the AdditionalBypass 7945 // argument. 7946 createInductionResumeValues({VecEpilogueIterationCountCheck, 7947 EPI.VectorTripCount} /* AdditionalBypass */); 7948 7949 return {completeLoopSkeleton(OrigLoopID), EPResumeVal}; 7950 } 7951 7952 BasicBlock * 7953 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 7954 BasicBlock *Bypass, BasicBlock *Insert) { 7955 7956 assert(EPI.TripCount && 7957 "Expected trip count to have been safed in the first pass."); 7958 assert( 7959 (!isa<Instruction>(EPI.TripCount) || 7960 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 7961 "saved trip count does not dominate insertion point."); 7962 Value *TC = EPI.TripCount; 7963 IRBuilder<> Builder(Insert->getTerminator()); 7964 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 7965 7966 // Generate code to check if the loop's trip count is less than VF * UF of the 7967 // vector epilogue loop. 7968 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 7969 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7970 7971 Value *CheckMinIters = 7972 Builder.CreateICmp(P, Count, 7973 createStepForVF(Builder, Count->getType(), 7974 EPI.EpilogueVF, EPI.EpilogueUF), 7975 "min.epilog.iters.check"); 7976 7977 ReplaceInstWithInst( 7978 Insert->getTerminator(), 7979 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7980 7981 LoopBypassBlocks.push_back(Insert); 7982 return Insert; 7983 } 7984 7985 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 7986 LLVM_DEBUG({ 7987 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 7988 << "Epilogue Loop VF:" << EPI.EpilogueVF 7989 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7990 }); 7991 } 7992 7993 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 7994 DEBUG_WITH_TYPE(VerboseDebug, { 7995 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n"; 7996 }); 7997 } 7998 7999 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8000 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8001 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8002 bool PredicateAtRangeStart = Predicate(Range.Start); 8003 8004 for (ElementCount TmpVF = Range.Start * 2; 8005 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8006 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8007 Range.End = TmpVF; 8008 break; 8009 } 8010 8011 return PredicateAtRangeStart; 8012 } 8013 8014 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8015 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8016 /// of VF's starting at a given VF and extending it as much as possible. Each 8017 /// vectorization decision can potentially shorten this sub-range during 8018 /// buildVPlan(). 8019 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8020 ElementCount MaxVF) { 8021 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8022 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8023 VFRange SubRange = {VF, MaxVFPlusOne}; 8024 VPlans.push_back(buildVPlan(SubRange)); 8025 VF = SubRange.End; 8026 } 8027 } 8028 8029 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8030 VPlanPtr &Plan) { 8031 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8032 8033 // Look for cached value. 8034 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8035 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8036 if (ECEntryIt != EdgeMaskCache.end()) 8037 return ECEntryIt->second; 8038 8039 VPValue *SrcMask = createBlockInMask(Src, Plan); 8040 8041 // The terminator has to be a branch inst! 8042 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8043 assert(BI && "Unexpected terminator found"); 8044 8045 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8046 return EdgeMaskCache[Edge] = SrcMask; 8047 8048 // If source is an exiting block, we know the exit edge is dynamically dead 8049 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8050 // adding uses of an otherwise potentially dead instruction. 8051 if (OrigLoop->isLoopExiting(Src)) 8052 return EdgeMaskCache[Edge] = SrcMask; 8053 8054 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8055 assert(EdgeMask && "No Edge Mask found for condition"); 8056 8057 if (BI->getSuccessor(0) != Dst) 8058 EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc()); 8059 8060 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8061 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8062 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8063 // The select version does not introduce new UB if SrcMask is false and 8064 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8065 VPValue *False = Plan->getOrAddVPValue( 8066 ConstantInt::getFalse(BI->getCondition()->getType())); 8067 EdgeMask = 8068 Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc()); 8069 } 8070 8071 return EdgeMaskCache[Edge] = EdgeMask; 8072 } 8073 8074 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8075 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8076 8077 // Look for cached value. 8078 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8079 if (BCEntryIt != BlockMaskCache.end()) 8080 return BCEntryIt->second; 8081 8082 // All-one mask is modelled as no-mask following the convention for masked 8083 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8084 VPValue *BlockMask = nullptr; 8085 8086 if (OrigLoop->getHeader() == BB) { 8087 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8088 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8089 8090 // Introduce the early-exit compare IV <= BTC to form header block mask. 8091 // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by 8092 // constructing the desired canonical IV in the header block as its first 8093 // non-phi instructions. 8094 assert(CM.foldTailByMasking() && "must fold the tail"); 8095 VPBasicBlock *HeaderVPBB = Plan->getEntry()->getEntryBasicBlock(); 8096 auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi(); 8097 auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV()); 8098 HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi()); 8099 8100 VPBuilder::InsertPointGuard Guard(Builder); 8101 Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint); 8102 if (CM.TTI.emitGetActiveLaneMask()) { 8103 VPValue *TC = Plan->getOrCreateTripCount(); 8104 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC}); 8105 } else { 8106 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8107 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8108 } 8109 return BlockMaskCache[BB] = BlockMask; 8110 } 8111 8112 // This is the block mask. We OR all incoming edges. 8113 for (auto *Predecessor : predecessors(BB)) { 8114 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8115 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8116 return BlockMaskCache[BB] = EdgeMask; 8117 8118 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8119 BlockMask = EdgeMask; 8120 continue; 8121 } 8122 8123 BlockMask = Builder.createOr(BlockMask, EdgeMask, {}); 8124 } 8125 8126 return BlockMaskCache[BB] = BlockMask; 8127 } 8128 8129 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8130 ArrayRef<VPValue *> Operands, 8131 VFRange &Range, 8132 VPlanPtr &Plan) { 8133 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8134 "Must be called with either a load or store"); 8135 8136 auto willWiden = [&](ElementCount VF) -> bool { 8137 if (VF.isScalar()) 8138 return false; 8139 LoopVectorizationCostModel::InstWidening Decision = 8140 CM.getWideningDecision(I, VF); 8141 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8142 "CM decision should be taken at this point."); 8143 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8144 return true; 8145 if (CM.isScalarAfterVectorization(I, VF) || 8146 CM.isProfitableToScalarize(I, VF)) 8147 return false; 8148 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8149 }; 8150 8151 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8152 return nullptr; 8153 8154 VPValue *Mask = nullptr; 8155 if (Legal->isMaskRequired(I)) 8156 Mask = createBlockInMask(I->getParent(), Plan); 8157 8158 // Determine if the pointer operand of the access is either consecutive or 8159 // reverse consecutive. 8160 LoopVectorizationCostModel::InstWidening Decision = 8161 CM.getWideningDecision(I, Range.Start); 8162 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8163 bool Consecutive = 8164 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8165 8166 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8167 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8168 Consecutive, Reverse); 8169 8170 StoreInst *Store = cast<StoreInst>(I); 8171 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8172 Mask, Consecutive, Reverse); 8173 } 8174 8175 static VPWidenIntOrFpInductionRecipe * 8176 createWidenInductionRecipe(PHINode *Phi, Instruction *PhiOrTrunc, 8177 VPValue *Start, const InductionDescriptor &IndDesc, 8178 LoopVectorizationCostModel &CM, ScalarEvolution &SE, 8179 Loop &OrigLoop, VFRange &Range) { 8180 // Returns true if an instruction \p I should be scalarized instead of 8181 // vectorized for the chosen vectorization factor. 8182 auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) { 8183 return CM.isScalarAfterVectorization(I, VF) || 8184 CM.isProfitableToScalarize(I, VF); 8185 }; 8186 8187 bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange( 8188 [&](ElementCount VF) { 8189 // Returns true if we should generate a scalar version of \p IV. 8190 if (ShouldScalarizeInstruction(PhiOrTrunc, VF)) 8191 return true; 8192 auto isScalarInst = [&](User *U) -> bool { 8193 auto *I = cast<Instruction>(U); 8194 return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF); 8195 }; 8196 return any_of(PhiOrTrunc->users(), isScalarInst); 8197 }, 8198 Range); 8199 bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange( 8200 [&](ElementCount VF) { 8201 return ShouldScalarizeInstruction(PhiOrTrunc, VF); 8202 }, 8203 Range); 8204 assert(IndDesc.getStartValue() == 8205 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader())); 8206 assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) && 8207 "step must be loop invariant"); 8208 if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) { 8209 return new VPWidenIntOrFpInductionRecipe( 8210 Phi, Start, IndDesc, TruncI, NeedsScalarIV, !NeedsScalarIVOnly, SE); 8211 } 8212 assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here"); 8213 return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, NeedsScalarIV, 8214 !NeedsScalarIVOnly, SE); 8215 } 8216 8217 VPRecipeBase *VPRecipeBuilder::tryToOptimizeInductionPHI( 8218 PHINode *Phi, ArrayRef<VPValue *> Operands, VFRange &Range) const { 8219 8220 // Check if this is an integer or fp induction. If so, build the recipe that 8221 // produces its scalar and vector values. 8222 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) 8223 return createWidenInductionRecipe(Phi, Phi, Operands[0], *II, CM, 8224 *PSE.getSE(), *OrigLoop, Range); 8225 8226 // Check if this is pointer induction. If so, build the recipe for it. 8227 if (auto *II = Legal->getPointerInductionDescriptor(Phi)) 8228 return new VPWidenPointerInductionRecipe(Phi, Operands[0], *II, 8229 *PSE.getSE()); 8230 return nullptr; 8231 } 8232 8233 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8234 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8235 VPlan &Plan) const { 8236 // Optimize the special case where the source is a constant integer 8237 // induction variable. Notice that we can only optimize the 'trunc' case 8238 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8239 // (c) other casts depend on pointer size. 8240 8241 // Determine whether \p K is a truncation based on an induction variable that 8242 // can be optimized. 8243 auto isOptimizableIVTruncate = 8244 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8245 return [=](ElementCount VF) -> bool { 8246 return CM.isOptimizableIVTruncate(K, VF); 8247 }; 8248 }; 8249 8250 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8251 isOptimizableIVTruncate(I), Range)) { 8252 8253 auto *Phi = cast<PHINode>(I->getOperand(0)); 8254 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi); 8255 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8256 return createWidenInductionRecipe(Phi, I, Start, II, CM, *PSE.getSE(), 8257 *OrigLoop, Range); 8258 } 8259 return nullptr; 8260 } 8261 8262 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8263 ArrayRef<VPValue *> Operands, 8264 VPlanPtr &Plan) { 8265 // If all incoming values are equal, the incoming VPValue can be used directly 8266 // instead of creating a new VPBlendRecipe. 8267 VPValue *FirstIncoming = Operands[0]; 8268 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8269 return FirstIncoming == Inc; 8270 })) { 8271 return Operands[0]; 8272 } 8273 8274 unsigned NumIncoming = Phi->getNumIncomingValues(); 8275 // For in-loop reductions, we do not need to create an additional select. 8276 VPValue *InLoopVal = nullptr; 8277 for (unsigned In = 0; In < NumIncoming; In++) { 8278 PHINode *PhiOp = 8279 dyn_cast_or_null<PHINode>(Operands[In]->getUnderlyingValue()); 8280 if (PhiOp && CM.isInLoopReduction(PhiOp)) { 8281 assert(!InLoopVal && "Found more than one in-loop reduction!"); 8282 InLoopVal = Operands[In]; 8283 } 8284 } 8285 8286 assert((!InLoopVal || NumIncoming == 2) && 8287 "Found an in-loop reduction for PHI with unexpected number of " 8288 "incoming values"); 8289 if (InLoopVal) 8290 return Operands[Operands[0] == InLoopVal ? 1 : 0]; 8291 8292 // We know that all PHIs in non-header blocks are converted into selects, so 8293 // we don't have to worry about the insertion order and we can just use the 8294 // builder. At this point we generate the predication tree. There may be 8295 // duplications since this is a simple recursive scan, but future 8296 // optimizations will clean it up. 8297 SmallVector<VPValue *, 2> OperandsWithMask; 8298 8299 for (unsigned In = 0; In < NumIncoming; In++) { 8300 VPValue *EdgeMask = 8301 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8302 assert((EdgeMask || NumIncoming == 1) && 8303 "Multiple predecessors with one having a full mask"); 8304 OperandsWithMask.push_back(Operands[In]); 8305 if (EdgeMask) 8306 OperandsWithMask.push_back(EdgeMask); 8307 } 8308 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8309 } 8310 8311 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8312 ArrayRef<VPValue *> Operands, 8313 VFRange &Range) const { 8314 8315 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8316 [this, CI](ElementCount VF) { 8317 return CM.isScalarWithPredication(CI, VF); 8318 }, 8319 Range); 8320 8321 if (IsPredicated) 8322 return nullptr; 8323 8324 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8325 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8326 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8327 ID == Intrinsic::pseudoprobe || 8328 ID == Intrinsic::experimental_noalias_scope_decl)) 8329 return nullptr; 8330 8331 auto willWiden = [&](ElementCount VF) -> bool { 8332 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8333 // The following case may be scalarized depending on the VF. 8334 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8335 // version of the instruction. 8336 // Is it beneficial to perform intrinsic call compared to lib call? 8337 bool NeedToScalarize = false; 8338 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8339 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8340 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8341 return UseVectorIntrinsic || !NeedToScalarize; 8342 }; 8343 8344 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8345 return nullptr; 8346 8347 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8348 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8349 } 8350 8351 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8352 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8353 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8354 // Instruction should be widened, unless it is scalar after vectorization, 8355 // scalarization is profitable or it is predicated. 8356 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8357 return CM.isScalarAfterVectorization(I, VF) || 8358 CM.isProfitableToScalarize(I, VF) || 8359 CM.isScalarWithPredication(I, VF); 8360 }; 8361 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8362 Range); 8363 } 8364 8365 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8366 ArrayRef<VPValue *> Operands) const { 8367 auto IsVectorizableOpcode = [](unsigned Opcode) { 8368 switch (Opcode) { 8369 case Instruction::Add: 8370 case Instruction::And: 8371 case Instruction::AShr: 8372 case Instruction::BitCast: 8373 case Instruction::FAdd: 8374 case Instruction::FCmp: 8375 case Instruction::FDiv: 8376 case Instruction::FMul: 8377 case Instruction::FNeg: 8378 case Instruction::FPExt: 8379 case Instruction::FPToSI: 8380 case Instruction::FPToUI: 8381 case Instruction::FPTrunc: 8382 case Instruction::FRem: 8383 case Instruction::FSub: 8384 case Instruction::ICmp: 8385 case Instruction::IntToPtr: 8386 case Instruction::LShr: 8387 case Instruction::Mul: 8388 case Instruction::Or: 8389 case Instruction::PtrToInt: 8390 case Instruction::SDiv: 8391 case Instruction::Select: 8392 case Instruction::SExt: 8393 case Instruction::Shl: 8394 case Instruction::SIToFP: 8395 case Instruction::SRem: 8396 case Instruction::Sub: 8397 case Instruction::Trunc: 8398 case Instruction::UDiv: 8399 case Instruction::UIToFP: 8400 case Instruction::URem: 8401 case Instruction::Xor: 8402 case Instruction::ZExt: 8403 return true; 8404 } 8405 return false; 8406 }; 8407 8408 if (!IsVectorizableOpcode(I->getOpcode())) 8409 return nullptr; 8410 8411 // Success: widen this instruction. 8412 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8413 } 8414 8415 void VPRecipeBuilder::fixHeaderPhis() { 8416 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8417 for (VPHeaderPHIRecipe *R : PhisToFix) { 8418 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8419 VPRecipeBase *IncR = 8420 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8421 R->addOperand(IncR->getVPSingleValue()); 8422 } 8423 } 8424 8425 VPBasicBlock *VPRecipeBuilder::handleReplication( 8426 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8427 VPlanPtr &Plan) { 8428 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8429 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8430 Range); 8431 8432 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8433 [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); }, 8434 Range); 8435 8436 // Even if the instruction is not marked as uniform, there are certain 8437 // intrinsic calls that can be effectively treated as such, so we check for 8438 // them here. Conservatively, we only do this for scalable vectors, since 8439 // for fixed-width VFs we can always fall back on full scalarization. 8440 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 8441 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 8442 case Intrinsic::assume: 8443 case Intrinsic::lifetime_start: 8444 case Intrinsic::lifetime_end: 8445 // For scalable vectors if one of the operands is variant then we still 8446 // want to mark as uniform, which will generate one instruction for just 8447 // the first lane of the vector. We can't scalarize the call in the same 8448 // way as for fixed-width vectors because we don't know how many lanes 8449 // there are. 8450 // 8451 // The reasons for doing it this way for scalable vectors are: 8452 // 1. For the assume intrinsic generating the instruction for the first 8453 // lane is still be better than not generating any at all. For 8454 // example, the input may be a splat across all lanes. 8455 // 2. For the lifetime start/end intrinsics the pointer operand only 8456 // does anything useful when the input comes from a stack object, 8457 // which suggests it should always be uniform. For non-stack objects 8458 // the effect is to poison the object, which still allows us to 8459 // remove the call. 8460 IsUniform = true; 8461 break; 8462 default: 8463 break; 8464 } 8465 } 8466 8467 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8468 IsUniform, IsPredicated); 8469 setRecipe(I, Recipe); 8470 Plan->addVPValue(I, Recipe); 8471 8472 // Find if I uses a predicated instruction. If so, it will use its scalar 8473 // value. Avoid hoisting the insert-element which packs the scalar value into 8474 // a vector value, as that happens iff all users use the vector value. 8475 for (VPValue *Op : Recipe->operands()) { 8476 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8477 if (!PredR) 8478 continue; 8479 auto *RepR = 8480 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8481 assert(RepR->isPredicated() && 8482 "expected Replicate recipe to be predicated"); 8483 RepR->setAlsoPack(false); 8484 } 8485 8486 // Finalize the recipe for Instr, first if it is not predicated. 8487 if (!IsPredicated) { 8488 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8489 VPBB->appendRecipe(Recipe); 8490 return VPBB; 8491 } 8492 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8493 8494 VPBlockBase *SingleSucc = VPBB->getSingleSuccessor(); 8495 assert(SingleSucc && "VPBB must have a single successor when handling " 8496 "predicated replication."); 8497 VPBlockUtils::disconnectBlocks(VPBB, SingleSucc); 8498 // Record predicated instructions for above packing optimizations. 8499 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8500 VPBlockUtils::insertBlockAfter(Region, VPBB); 8501 auto *RegSucc = new VPBasicBlock(); 8502 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8503 VPBlockUtils::connectBlocks(RegSucc, SingleSucc); 8504 return RegSucc; 8505 } 8506 8507 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8508 VPRecipeBase *PredRecipe, 8509 VPlanPtr &Plan) { 8510 // Instructions marked for predication are replicated and placed under an 8511 // if-then construct to prevent side-effects. 8512 8513 // Generate recipes to compute the block mask for this region. 8514 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8515 8516 // Build the triangular if-then region. 8517 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8518 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8519 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8520 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8521 auto *PHIRecipe = Instr->getType()->isVoidTy() 8522 ? nullptr 8523 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8524 if (PHIRecipe) { 8525 Plan->removeVPValueFor(Instr); 8526 Plan->addVPValue(Instr, PHIRecipe); 8527 } 8528 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8529 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8530 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8531 8532 // Note: first set Entry as region entry and then connect successors starting 8533 // from it in order, to propagate the "parent" of each VPBasicBlock. 8534 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8535 VPBlockUtils::connectBlocks(Pred, Exit); 8536 8537 return Region; 8538 } 8539 8540 VPRecipeOrVPValueTy 8541 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8542 ArrayRef<VPValue *> Operands, 8543 VFRange &Range, VPlanPtr &Plan) { 8544 // First, check for specific widening recipes that deal with calls, memory 8545 // operations, inductions and Phi nodes. 8546 if (auto *CI = dyn_cast<CallInst>(Instr)) 8547 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8548 8549 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8550 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8551 8552 VPRecipeBase *Recipe; 8553 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8554 if (Phi->getParent() != OrigLoop->getHeader()) 8555 return tryToBlend(Phi, Operands, Plan); 8556 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range))) 8557 return toVPRecipeResult(Recipe); 8558 8559 VPHeaderPHIRecipe *PhiRecipe = nullptr; 8560 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 8561 VPValue *StartV = Operands[0]; 8562 if (Legal->isReductionVariable(Phi)) { 8563 const RecurrenceDescriptor &RdxDesc = 8564 Legal->getReductionVars().find(Phi)->second; 8565 assert(RdxDesc.getRecurrenceStartValue() == 8566 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8567 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 8568 CM.isInLoopReduction(Phi), 8569 CM.useOrderedReductions(RdxDesc)); 8570 } else { 8571 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 8572 } 8573 8574 // Record the incoming value from the backedge, so we can add the incoming 8575 // value from the backedge after all recipes have been created. 8576 recordRecipeOf(cast<Instruction>( 8577 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8578 PhisToFix.push_back(PhiRecipe); 8579 } else { 8580 // TODO: record backedge value for remaining pointer induction phis. 8581 assert(Phi->getType()->isPointerTy() && 8582 "only pointer phis should be handled here"); 8583 assert(Legal->getInductionVars().count(Phi) && 8584 "Not an induction variable"); 8585 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8586 VPValue *Start = Plan->getOrAddVPValue(II.getStartValue()); 8587 PhiRecipe = new VPWidenPHIRecipe(Phi, Start); 8588 } 8589 8590 return toVPRecipeResult(PhiRecipe); 8591 } 8592 8593 if (isa<TruncInst>(Instr) && 8594 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8595 Range, *Plan))) 8596 return toVPRecipeResult(Recipe); 8597 8598 if (!shouldWiden(Instr, Range)) 8599 return nullptr; 8600 8601 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8602 return toVPRecipeResult(new VPWidenGEPRecipe( 8603 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8604 8605 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8606 bool InvariantCond = 8607 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8608 return toVPRecipeResult(new VPWidenSelectRecipe( 8609 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8610 } 8611 8612 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8613 } 8614 8615 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8616 ElementCount MaxVF) { 8617 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8618 8619 // Collect instructions from the original loop that will become trivially dead 8620 // in the vectorized loop. We don't need to vectorize these instructions. For 8621 // example, original induction update instructions can become dead because we 8622 // separately emit induction "steps" when generating code for the new loop. 8623 // Similarly, we create a new latch condition when setting up the structure 8624 // of the new loop, so the old one can become dead. 8625 SmallPtrSet<Instruction *, 4> DeadInstructions; 8626 collectTriviallyDeadInstructions(DeadInstructions); 8627 8628 // Add assume instructions we need to drop to DeadInstructions, to prevent 8629 // them from being added to the VPlan. 8630 // TODO: We only need to drop assumes in blocks that get flattend. If the 8631 // control flow is preserved, we should keep them. 8632 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8633 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8634 8635 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8636 // Dead instructions do not need sinking. Remove them from SinkAfter. 8637 for (Instruction *I : DeadInstructions) 8638 SinkAfter.erase(I); 8639 8640 // Cannot sink instructions after dead instructions (there won't be any 8641 // recipes for them). Instead, find the first non-dead previous instruction. 8642 for (auto &P : Legal->getSinkAfter()) { 8643 Instruction *SinkTarget = P.second; 8644 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 8645 (void)FirstInst; 8646 while (DeadInstructions.contains(SinkTarget)) { 8647 assert( 8648 SinkTarget != FirstInst && 8649 "Must find a live instruction (at least the one feeding the " 8650 "first-order recurrence PHI) before reaching beginning of the block"); 8651 SinkTarget = SinkTarget->getPrevNode(); 8652 assert(SinkTarget != P.first && 8653 "sink source equals target, no sinking required"); 8654 } 8655 P.second = SinkTarget; 8656 } 8657 8658 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8659 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8660 VFRange SubRange = {VF, MaxVFPlusOne}; 8661 VPlans.push_back( 8662 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8663 VF = SubRange.End; 8664 } 8665 } 8666 8667 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a 8668 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a 8669 // BranchOnCount VPInstruction to the latch. 8670 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL, 8671 bool HasNUW, bool IsVPlanNative) { 8672 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8673 auto *StartV = Plan.getOrAddVPValue(StartIdx); 8674 8675 auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL); 8676 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion(); 8677 VPBasicBlock *Header = TopRegion->getEntryBasicBlock(); 8678 if (IsVPlanNative) 8679 Header = cast<VPBasicBlock>(Header->getSingleSuccessor()); 8680 Header->insert(CanonicalIVPHI, Header->begin()); 8681 8682 auto *CanonicalIVIncrement = 8683 new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW 8684 : VPInstruction::CanonicalIVIncrement, 8685 {CanonicalIVPHI}, DL); 8686 CanonicalIVPHI->addOperand(CanonicalIVIncrement); 8687 8688 VPBasicBlock *EB = TopRegion->getExitBasicBlock(); 8689 if (IsVPlanNative) { 8690 EB = cast<VPBasicBlock>(EB->getSinglePredecessor()); 8691 EB->setCondBit(nullptr); 8692 } 8693 EB->appendRecipe(CanonicalIVIncrement); 8694 8695 auto *BranchOnCount = 8696 new VPInstruction(VPInstruction::BranchOnCount, 8697 {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL); 8698 EB->appendRecipe(BranchOnCount); 8699 } 8700 8701 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8702 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8703 const MapVector<Instruction *, Instruction *> &SinkAfter) { 8704 8705 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8706 8707 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8708 8709 // --------------------------------------------------------------------------- 8710 // Pre-construction: record ingredients whose recipes we'll need to further 8711 // process after constructing the initial VPlan. 8712 // --------------------------------------------------------------------------- 8713 8714 // Mark instructions we'll need to sink later and their targets as 8715 // ingredients whose recipe we'll need to record. 8716 for (auto &Entry : SinkAfter) { 8717 RecipeBuilder.recordRecipeOf(Entry.first); 8718 RecipeBuilder.recordRecipeOf(Entry.second); 8719 } 8720 for (auto &Reduction : CM.getInLoopReductionChains()) { 8721 PHINode *Phi = Reduction.first; 8722 RecurKind Kind = 8723 Legal->getReductionVars().find(Phi)->second.getRecurrenceKind(); 8724 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8725 8726 RecipeBuilder.recordRecipeOf(Phi); 8727 for (auto &R : ReductionOperations) { 8728 RecipeBuilder.recordRecipeOf(R); 8729 // For min/max reductions, where we have a pair of icmp/select, we also 8730 // need to record the ICmp recipe, so it can be removed later. 8731 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 8732 "Only min/max recurrences allowed for inloop reductions"); 8733 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8734 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8735 } 8736 } 8737 8738 // For each interleave group which is relevant for this (possibly trimmed) 8739 // Range, add it to the set of groups to be later applied to the VPlan and add 8740 // placeholders for its members' Recipes which we'll be replacing with a 8741 // single VPInterleaveRecipe. 8742 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8743 auto applyIG = [IG, this](ElementCount VF) -> bool { 8744 return (VF.isVector() && // Query is illegal for VF == 1 8745 CM.getWideningDecision(IG->getInsertPos(), VF) == 8746 LoopVectorizationCostModel::CM_Interleave); 8747 }; 8748 if (!getDecisionAndClampRange(applyIG, Range)) 8749 continue; 8750 InterleaveGroups.insert(IG); 8751 for (unsigned i = 0; i < IG->getFactor(); i++) 8752 if (Instruction *Member = IG->getMember(i)) 8753 RecipeBuilder.recordRecipeOf(Member); 8754 }; 8755 8756 // --------------------------------------------------------------------------- 8757 // Build initial VPlan: Scan the body of the loop in a topological order to 8758 // visit each basic block after having visited its predecessor basic blocks. 8759 // --------------------------------------------------------------------------- 8760 8761 // Create initial VPlan skeleton, with separate header and latch blocks. 8762 VPBasicBlock *HeaderVPBB = new VPBasicBlock(); 8763 VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch"); 8764 VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB); 8765 auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop"); 8766 auto Plan = std::make_unique<VPlan>(TopRegion); 8767 8768 Instruction *DLInst = 8769 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 8770 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), 8771 DLInst ? DLInst->getDebugLoc() : DebugLoc(), 8772 !CM.foldTailByMasking(), false); 8773 8774 // Scan the body of the loop in a topological order to visit each basic block 8775 // after having visited its predecessor basic blocks. 8776 LoopBlocksDFS DFS(OrigLoop); 8777 DFS.perform(LI); 8778 8779 VPBasicBlock *VPBB = HeaderVPBB; 8780 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 8781 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8782 // Relevant instructions from basic block BB will be grouped into VPRecipe 8783 // ingredients and fill a new VPBasicBlock. 8784 unsigned VPBBsForBB = 0; 8785 VPBB->setName(BB->getName()); 8786 Builder.setInsertPoint(VPBB); 8787 8788 // Introduce each ingredient into VPlan. 8789 // TODO: Model and preserve debug instrinsics in VPlan. 8790 for (Instruction &I : BB->instructionsWithoutDebug()) { 8791 Instruction *Instr = &I; 8792 8793 // First filter out irrelevant instructions, to ensure no recipes are 8794 // built for them. 8795 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 8796 continue; 8797 8798 SmallVector<VPValue *, 4> Operands; 8799 auto *Phi = dyn_cast<PHINode>(Instr); 8800 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 8801 Operands.push_back(Plan->getOrAddVPValue( 8802 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 8803 } else { 8804 auto OpRange = Plan->mapToVPValues(Instr->operands()); 8805 Operands = {OpRange.begin(), OpRange.end()}; 8806 } 8807 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 8808 Instr, Operands, Range, Plan)) { 8809 // If Instr can be simplified to an existing VPValue, use it. 8810 if (RecipeOrValue.is<VPValue *>()) { 8811 auto *VPV = RecipeOrValue.get<VPValue *>(); 8812 Plan->addVPValue(Instr, VPV); 8813 // If the re-used value is a recipe, register the recipe for the 8814 // instruction, in case the recipe for Instr needs to be recorded. 8815 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 8816 RecipeBuilder.setRecipe(Instr, R); 8817 continue; 8818 } 8819 // Otherwise, add the new recipe. 8820 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 8821 for (auto *Def : Recipe->definedValues()) { 8822 auto *UV = Def->getUnderlyingValue(); 8823 Plan->addVPValue(UV, Def); 8824 } 8825 8826 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 8827 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 8828 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 8829 // of the header block. That can happen for truncates of induction 8830 // variables. Those recipes are moved to the phi section of the header 8831 // block after applying SinkAfter, which relies on the original 8832 // position of the trunc. 8833 assert(isa<TruncInst>(Instr)); 8834 InductionsToMove.push_back( 8835 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 8836 } 8837 RecipeBuilder.setRecipe(Instr, Recipe); 8838 VPBB->appendRecipe(Recipe); 8839 continue; 8840 } 8841 8842 // Otherwise, if all widening options failed, Instruction is to be 8843 // replicated. This may create a successor for VPBB. 8844 VPBasicBlock *NextVPBB = 8845 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 8846 if (NextVPBB != VPBB) { 8847 VPBB = NextVPBB; 8848 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8849 : ""); 8850 } 8851 } 8852 8853 VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB); 8854 VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor()); 8855 } 8856 8857 // Fold the last, empty block into its predecessor. 8858 VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB); 8859 assert(VPBB && "expected to fold last (empty) block"); 8860 // After here, VPBB should not be used. 8861 VPBB = nullptr; 8862 8863 assert(isa<VPRegionBlock>(Plan->getEntry()) && 8864 !Plan->getEntry()->getEntryBasicBlock()->empty() && 8865 "entry block must be set to a VPRegionBlock having a non-empty entry " 8866 "VPBasicBlock"); 8867 RecipeBuilder.fixHeaderPhis(); 8868 8869 // --------------------------------------------------------------------------- 8870 // Transform initial VPlan: Apply previously taken decisions, in order, to 8871 // bring the VPlan to its final state. 8872 // --------------------------------------------------------------------------- 8873 8874 // Apply Sink-After legal constraints. 8875 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 8876 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 8877 if (Region && Region->isReplicator()) { 8878 assert(Region->getNumSuccessors() == 1 && 8879 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 8880 assert(R->getParent()->size() == 1 && 8881 "A recipe in an original replicator region must be the only " 8882 "recipe in its block"); 8883 return Region; 8884 } 8885 return nullptr; 8886 }; 8887 for (auto &Entry : SinkAfter) { 8888 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 8889 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 8890 8891 auto *TargetRegion = GetReplicateRegion(Target); 8892 auto *SinkRegion = GetReplicateRegion(Sink); 8893 if (!SinkRegion) { 8894 // If the sink source is not a replicate region, sink the recipe directly. 8895 if (TargetRegion) { 8896 // The target is in a replication region, make sure to move Sink to 8897 // the block after it, not into the replication region itself. 8898 VPBasicBlock *NextBlock = 8899 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 8900 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 8901 } else 8902 Sink->moveAfter(Target); 8903 continue; 8904 } 8905 8906 // The sink source is in a replicate region. Unhook the region from the CFG. 8907 auto *SinkPred = SinkRegion->getSinglePredecessor(); 8908 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 8909 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 8910 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 8911 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 8912 8913 if (TargetRegion) { 8914 // The target recipe is also in a replicate region, move the sink region 8915 // after the target region. 8916 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 8917 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 8918 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 8919 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 8920 } else { 8921 // The sink source is in a replicate region, we need to move the whole 8922 // replicate region, which should only contain a single recipe in the 8923 // main block. 8924 auto *SplitBlock = 8925 Target->getParent()->splitAt(std::next(Target->getIterator())); 8926 8927 auto *SplitPred = SplitBlock->getSinglePredecessor(); 8928 8929 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 8930 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 8931 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 8932 } 8933 } 8934 8935 VPlanTransforms::removeRedundantCanonicalIVs(*Plan); 8936 VPlanTransforms::removeRedundantInductionCasts(*Plan); 8937 8938 // Now that sink-after is done, move induction recipes for optimized truncates 8939 // to the phi section of the header block. 8940 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 8941 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 8942 8943 // Adjust the recipes for any inloop reductions. 8944 adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan, 8945 RecipeBuilder, Range.Start); 8946 8947 // Introduce a recipe to combine the incoming and previous values of a 8948 // first-order recurrence. 8949 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 8950 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 8951 if (!RecurPhi) 8952 continue; 8953 8954 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 8955 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 8956 auto *Region = GetReplicateRegion(PrevRecipe); 8957 if (Region) 8958 InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor()); 8959 if (Region || PrevRecipe->isPhi()) 8960 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 8961 else 8962 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 8963 8964 auto *RecurSplice = cast<VPInstruction>( 8965 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 8966 {RecurPhi, RecurPhi->getBackedgeValue()})); 8967 8968 RecurPhi->replaceAllUsesWith(RecurSplice); 8969 // Set the first operand of RecurSplice to RecurPhi again, after replacing 8970 // all users. 8971 RecurSplice->setOperand(0, RecurPhi); 8972 } 8973 8974 // Interleave memory: for each Interleave Group we marked earlier as relevant 8975 // for this VPlan, replace the Recipes widening its memory instructions with a 8976 // single VPInterleaveRecipe at its insertion point. 8977 for (auto IG : InterleaveGroups) { 8978 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 8979 RecipeBuilder.getRecipe(IG->getInsertPos())); 8980 SmallVector<VPValue *, 4> StoredValues; 8981 for (unsigned i = 0; i < IG->getFactor(); ++i) 8982 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 8983 auto *StoreR = 8984 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 8985 StoredValues.push_back(StoreR->getStoredValue()); 8986 } 8987 8988 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 8989 Recipe->getMask()); 8990 VPIG->insertBefore(Recipe); 8991 unsigned J = 0; 8992 for (unsigned i = 0; i < IG->getFactor(); ++i) 8993 if (Instruction *Member = IG->getMember(i)) { 8994 if (!Member->getType()->isVoidTy()) { 8995 VPValue *OriginalV = Plan->getVPValue(Member); 8996 Plan->removeVPValueFor(Member); 8997 Plan->addVPValue(Member, VPIG->getVPValue(J)); 8998 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 8999 J++; 9000 } 9001 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9002 } 9003 } 9004 9005 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9006 // in ways that accessing values using original IR values is incorrect. 9007 Plan->disableValue2VPValue(); 9008 9009 VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE()); 9010 VPlanTransforms::sinkScalarOperands(*Plan); 9011 VPlanTransforms::mergeReplicateRegions(*Plan); 9012 VPlanTransforms::removeDeadRecipes(*Plan, *OrigLoop); 9013 9014 std::string PlanName; 9015 raw_string_ostream RSO(PlanName); 9016 ElementCount VF = Range.Start; 9017 Plan->addVF(VF); 9018 RSO << "Initial VPlan for VF={" << VF; 9019 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9020 Plan->addVF(VF); 9021 RSO << "," << VF; 9022 } 9023 RSO << "},UF>=1"; 9024 RSO.flush(); 9025 Plan->setName(PlanName); 9026 9027 // Fold Exit block into its predecessor if possible. 9028 // TODO: Fold block earlier once all VPlan transforms properly maintain a 9029 // VPBasicBlock as exit. 9030 VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit()); 9031 9032 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9033 return Plan; 9034 } 9035 9036 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9037 // Outer loop handling: They may require CFG and instruction level 9038 // transformations before even evaluating whether vectorization is profitable. 9039 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9040 // the vectorization pipeline. 9041 assert(!OrigLoop->isInnermost()); 9042 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9043 9044 // Create new empty VPlan 9045 auto Plan = std::make_unique<VPlan>(); 9046 9047 // Build hierarchical CFG 9048 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9049 HCFGBuilder.buildHierarchicalCFG(); 9050 9051 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9052 VF *= 2) 9053 Plan->addVF(VF); 9054 9055 if (EnableVPlanPredication) { 9056 VPlanPredicator VPP(*Plan); 9057 VPP.predicate(); 9058 9059 // Avoid running transformation to recipes until masked code generation in 9060 // VPlan-native path is in place. 9061 return Plan; 9062 } 9063 9064 SmallPtrSet<Instruction *, 1> DeadInstructions; 9065 VPlanTransforms::VPInstructionsToVPRecipes( 9066 OrigLoop, Plan, 9067 [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); }, 9068 DeadInstructions, *PSE.getSE()); 9069 9070 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(), 9071 true, true); 9072 return Plan; 9073 } 9074 9075 // Adjust the recipes for reductions. For in-loop reductions the chain of 9076 // instructions leading from the loop exit instr to the phi need to be converted 9077 // to reductions, with one operand being vector and the other being the scalar 9078 // reduction chain. For other reductions, a select is introduced between the phi 9079 // and live-out recipes when folding the tail. 9080 void LoopVectorizationPlanner::adjustRecipesForReductions( 9081 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9082 ElementCount MinVF) { 9083 for (auto &Reduction : CM.getInLoopReductionChains()) { 9084 PHINode *Phi = Reduction.first; 9085 const RecurrenceDescriptor &RdxDesc = 9086 Legal->getReductionVars().find(Phi)->second; 9087 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9088 9089 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9090 continue; 9091 9092 // ReductionOperations are orders top-down from the phi's use to the 9093 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9094 // which of the two operands will remain scalar and which will be reduced. 9095 // For minmax the chain will be the select instructions. 9096 Instruction *Chain = Phi; 9097 for (Instruction *R : ReductionOperations) { 9098 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9099 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9100 9101 VPValue *ChainOp = Plan->getVPValue(Chain); 9102 unsigned FirstOpId; 9103 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9104 "Only min/max recurrences allowed for inloop reductions"); 9105 // Recognize a call to the llvm.fmuladd intrinsic. 9106 bool IsFMulAdd = (Kind == RecurKind::FMulAdd); 9107 assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) && 9108 "Expected instruction to be a call to the llvm.fmuladd intrinsic"); 9109 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9110 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9111 "Expected to replace a VPWidenSelectSC"); 9112 FirstOpId = 1; 9113 } else { 9114 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) || 9115 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) && 9116 "Expected to replace a VPWidenSC"); 9117 FirstOpId = 0; 9118 } 9119 unsigned VecOpId = 9120 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9121 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9122 9123 auto *CondOp = CM.blockNeedsPredicationForAnyReason(R->getParent()) 9124 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9125 : nullptr; 9126 9127 if (IsFMulAdd) { 9128 // If the instruction is a call to the llvm.fmuladd intrinsic then we 9129 // need to create an fmul recipe to use as the vector operand for the 9130 // fadd reduction. 9131 VPInstruction *FMulRecipe = new VPInstruction( 9132 Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))}); 9133 FMulRecipe->setFastMathFlags(R->getFastMathFlags()); 9134 WidenRecipe->getParent()->insert(FMulRecipe, 9135 WidenRecipe->getIterator()); 9136 VecOp = FMulRecipe; 9137 } 9138 VPReductionRecipe *RedRecipe = 9139 new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9140 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9141 Plan->removeVPValueFor(R); 9142 Plan->addVPValue(R, RedRecipe); 9143 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9144 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9145 WidenRecipe->eraseFromParent(); 9146 9147 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9148 VPRecipeBase *CompareRecipe = 9149 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9150 assert(isa<VPWidenRecipe>(CompareRecipe) && 9151 "Expected to replace a VPWidenSC"); 9152 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9153 "Expected no remaining users"); 9154 CompareRecipe->eraseFromParent(); 9155 } 9156 Chain = R; 9157 } 9158 } 9159 9160 // If tail is folded by masking, introduce selects between the phi 9161 // and the live-out instruction of each reduction, at the beginning of the 9162 // dedicated latch block. 9163 if (CM.foldTailByMasking()) { 9164 Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin()); 9165 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9166 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9167 if (!PhiR || PhiR->isInLoop()) 9168 continue; 9169 VPValue *Cond = 9170 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9171 VPValue *Red = PhiR->getBackedgeValue(); 9172 assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB && 9173 "reduction recipe must be defined before latch"); 9174 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9175 } 9176 } 9177 } 9178 9179 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9180 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9181 VPSlotTracker &SlotTracker) const { 9182 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9183 IG->getInsertPos()->printAsOperand(O, false); 9184 O << ", "; 9185 getAddr()->printAsOperand(O, SlotTracker); 9186 VPValue *Mask = getMask(); 9187 if (Mask) { 9188 O << ", "; 9189 Mask->printAsOperand(O, SlotTracker); 9190 } 9191 9192 unsigned OpIdx = 0; 9193 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9194 if (!IG->getMember(i)) 9195 continue; 9196 if (getNumStoreOperands() > 0) { 9197 O << "\n" << Indent << " store "; 9198 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9199 O << " to index " << i; 9200 } else { 9201 O << "\n" << Indent << " "; 9202 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9203 O << " = load from index " << i; 9204 } 9205 ++OpIdx; 9206 } 9207 } 9208 #endif 9209 9210 void VPWidenCallRecipe::execute(VPTransformState &State) { 9211 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9212 *this, State); 9213 } 9214 9215 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9216 auto &I = *cast<SelectInst>(getUnderlyingInstr()); 9217 State.ILV->setDebugLocFromInst(&I); 9218 9219 // The condition can be loop invariant but still defined inside the 9220 // loop. This means that we can't just use the original 'cond' value. 9221 // We have to take the 'vectorized' value and pick the first lane. 9222 // Instcombine will make this a no-op. 9223 auto *InvarCond = 9224 InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr; 9225 9226 for (unsigned Part = 0; Part < State.UF; ++Part) { 9227 Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part); 9228 Value *Op0 = State.get(getOperand(1), Part); 9229 Value *Op1 = State.get(getOperand(2), Part); 9230 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1); 9231 State.set(this, Sel, Part); 9232 State.ILV->addMetadata(Sel, &I); 9233 } 9234 } 9235 9236 void VPWidenRecipe::execute(VPTransformState &State) { 9237 auto &I = *cast<Instruction>(getUnderlyingValue()); 9238 auto &Builder = State.Builder; 9239 switch (I.getOpcode()) { 9240 case Instruction::Call: 9241 case Instruction::Br: 9242 case Instruction::PHI: 9243 case Instruction::GetElementPtr: 9244 case Instruction::Select: 9245 llvm_unreachable("This instruction is handled by a different recipe."); 9246 case Instruction::UDiv: 9247 case Instruction::SDiv: 9248 case Instruction::SRem: 9249 case Instruction::URem: 9250 case Instruction::Add: 9251 case Instruction::FAdd: 9252 case Instruction::Sub: 9253 case Instruction::FSub: 9254 case Instruction::FNeg: 9255 case Instruction::Mul: 9256 case Instruction::FMul: 9257 case Instruction::FDiv: 9258 case Instruction::FRem: 9259 case Instruction::Shl: 9260 case Instruction::LShr: 9261 case Instruction::AShr: 9262 case Instruction::And: 9263 case Instruction::Or: 9264 case Instruction::Xor: { 9265 // Just widen unops and binops. 9266 State.ILV->setDebugLocFromInst(&I); 9267 9268 for (unsigned Part = 0; Part < State.UF; ++Part) { 9269 SmallVector<Value *, 2> Ops; 9270 for (VPValue *VPOp : operands()) 9271 Ops.push_back(State.get(VPOp, Part)); 9272 9273 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 9274 9275 if (auto *VecOp = dyn_cast<Instruction>(V)) { 9276 VecOp->copyIRFlags(&I); 9277 9278 // If the instruction is vectorized and was in a basic block that needed 9279 // predication, we can't propagate poison-generating flags (nuw/nsw, 9280 // exact, etc.). The control flow has been linearized and the 9281 // instruction is no longer guarded by the predicate, which could make 9282 // the flag properties to no longer hold. 9283 if (State.MayGeneratePoisonRecipes.contains(this)) 9284 VecOp->dropPoisonGeneratingFlags(); 9285 } 9286 9287 // Use this vector value for all users of the original instruction. 9288 State.set(this, V, Part); 9289 State.ILV->addMetadata(V, &I); 9290 } 9291 9292 break; 9293 } 9294 case Instruction::ICmp: 9295 case Instruction::FCmp: { 9296 // Widen compares. Generate vector compares. 9297 bool FCmp = (I.getOpcode() == Instruction::FCmp); 9298 auto *Cmp = cast<CmpInst>(&I); 9299 State.ILV->setDebugLocFromInst(Cmp); 9300 for (unsigned Part = 0; Part < State.UF; ++Part) { 9301 Value *A = State.get(getOperand(0), Part); 9302 Value *B = State.get(getOperand(1), Part); 9303 Value *C = nullptr; 9304 if (FCmp) { 9305 // Propagate fast math flags. 9306 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9307 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 9308 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 9309 } else { 9310 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 9311 } 9312 State.set(this, C, Part); 9313 State.ILV->addMetadata(C, &I); 9314 } 9315 9316 break; 9317 } 9318 9319 case Instruction::ZExt: 9320 case Instruction::SExt: 9321 case Instruction::FPToUI: 9322 case Instruction::FPToSI: 9323 case Instruction::FPExt: 9324 case Instruction::PtrToInt: 9325 case Instruction::IntToPtr: 9326 case Instruction::SIToFP: 9327 case Instruction::UIToFP: 9328 case Instruction::Trunc: 9329 case Instruction::FPTrunc: 9330 case Instruction::BitCast: { 9331 auto *CI = cast<CastInst>(&I); 9332 State.ILV->setDebugLocFromInst(CI); 9333 9334 /// Vectorize casts. 9335 Type *DestTy = (State.VF.isScalar()) 9336 ? CI->getType() 9337 : VectorType::get(CI->getType(), State.VF); 9338 9339 for (unsigned Part = 0; Part < State.UF; ++Part) { 9340 Value *A = State.get(getOperand(0), Part); 9341 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 9342 State.set(this, Cast, Part); 9343 State.ILV->addMetadata(Cast, &I); 9344 } 9345 break; 9346 } 9347 default: 9348 // This instruction is not vectorized by simple widening. 9349 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 9350 llvm_unreachable("Unhandled instruction!"); 9351 } // end of switch. 9352 } 9353 9354 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9355 auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr()); 9356 // Construct a vector GEP by widening the operands of the scalar GEP as 9357 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 9358 // results in a vector of pointers when at least one operand of the GEP 9359 // is vector-typed. Thus, to keep the representation compact, we only use 9360 // vector-typed operands for loop-varying values. 9361 9362 if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 9363 // If we are vectorizing, but the GEP has only loop-invariant operands, 9364 // the GEP we build (by only using vector-typed operands for 9365 // loop-varying values) would be a scalar pointer. Thus, to ensure we 9366 // produce a vector of pointers, we need to either arbitrarily pick an 9367 // operand to broadcast, or broadcast a clone of the original GEP. 9368 // Here, we broadcast a clone of the original. 9369 // 9370 // TODO: If at some point we decide to scalarize instructions having 9371 // loop-invariant operands, this special case will no longer be 9372 // required. We would add the scalarization decision to 9373 // collectLoopScalars() and teach getVectorValue() to broadcast 9374 // the lane-zero scalar value. 9375 auto *Clone = State.Builder.Insert(GEP->clone()); 9376 for (unsigned Part = 0; Part < State.UF; ++Part) { 9377 Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone); 9378 State.set(this, EntryPart, Part); 9379 State.ILV->addMetadata(EntryPart, GEP); 9380 } 9381 } else { 9382 // If the GEP has at least one loop-varying operand, we are sure to 9383 // produce a vector of pointers. But if we are only unrolling, we want 9384 // to produce a scalar GEP for each unroll part. Thus, the GEP we 9385 // produce with the code below will be scalar (if VF == 1) or vector 9386 // (otherwise). Note that for the unroll-only case, we still maintain 9387 // values in the vector mapping with initVector, as we do for other 9388 // instructions. 9389 for (unsigned Part = 0; Part < State.UF; ++Part) { 9390 // The pointer operand of the new GEP. If it's loop-invariant, we 9391 // won't broadcast it. 9392 auto *Ptr = IsPtrLoopInvariant 9393 ? State.get(getOperand(0), VPIteration(0, 0)) 9394 : State.get(getOperand(0), Part); 9395 9396 // Collect all the indices for the new GEP. If any index is 9397 // loop-invariant, we won't broadcast it. 9398 SmallVector<Value *, 4> Indices; 9399 for (unsigned I = 1, E = getNumOperands(); I < E; I++) { 9400 VPValue *Operand = getOperand(I); 9401 if (IsIndexLoopInvariant[I - 1]) 9402 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 9403 else 9404 Indices.push_back(State.get(Operand, Part)); 9405 } 9406 9407 // If the GEP instruction is vectorized and was in a basic block that 9408 // needed predication, we can't propagate the poison-generating 'inbounds' 9409 // flag. The control flow has been linearized and the GEP is no longer 9410 // guarded by the predicate, which could make the 'inbounds' properties to 9411 // no longer hold. 9412 bool IsInBounds = 9413 GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0; 9414 9415 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 9416 // but it should be a vector, otherwise. 9417 auto *NewGEP = IsInBounds 9418 ? State.Builder.CreateInBoundsGEP( 9419 GEP->getSourceElementType(), Ptr, Indices) 9420 : State.Builder.CreateGEP(GEP->getSourceElementType(), 9421 Ptr, Indices); 9422 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) && 9423 "NewGEP is not a pointer vector"); 9424 State.set(this, NewGEP, Part); 9425 State.ILV->addMetadata(NewGEP, GEP); 9426 } 9427 } 9428 } 9429 9430 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9431 assert(!State.Instance && "Int or FP induction being replicated."); 9432 9433 Value *Start = getStartValue()->getLiveInIRValue(); 9434 const InductionDescriptor &ID = getInductionDescriptor(); 9435 TruncInst *Trunc = getTruncInst(); 9436 IRBuilderBase &Builder = State.Builder; 9437 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 9438 assert(State.VF.isVector() && "must have vector VF"); 9439 9440 // The value from the original loop to which we are mapping the new induction 9441 // variable. 9442 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 9443 9444 auto &DL = EntryVal->getModule()->getDataLayout(); 9445 9446 // Generate code for the induction step. Note that induction steps are 9447 // required to be loop-invariant 9448 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 9449 if (SE.isSCEVable(IV->getType())) { 9450 SCEVExpander Exp(SE, DL, "induction"); 9451 return Exp.expandCodeFor(Step, Step->getType(), 9452 State.CFG.VectorPreHeader->getTerminator()); 9453 } 9454 return cast<SCEVUnknown>(Step)->getValue(); 9455 }; 9456 9457 // Fast-math-flags propagate from the original induction instruction. 9458 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9459 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 9460 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 9461 9462 // Now do the actual transformations, and start with creating the step value. 9463 Value *Step = CreateStepValue(ID.getStep()); 9464 9465 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 9466 "Expected either an induction phi-node or a truncate of it!"); 9467 9468 // Construct the initial value of the vector IV in the vector loop preheader 9469 auto CurrIP = Builder.saveIP(); 9470 Builder.SetInsertPoint(State.CFG.VectorPreHeader->getTerminator()); 9471 if (isa<TruncInst>(EntryVal)) { 9472 assert(Start->getType()->isIntegerTy() && 9473 "Truncation requires an integer type"); 9474 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 9475 Step = Builder.CreateTrunc(Step, TruncType); 9476 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 9477 } 9478 9479 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 9480 Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start); 9481 Value *SteppedStart = getStepVector( 9482 SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder); 9483 9484 // We create vector phi nodes for both integer and floating-point induction 9485 // variables. Here, we determine the kind of arithmetic we will perform. 9486 Instruction::BinaryOps AddOp; 9487 Instruction::BinaryOps MulOp; 9488 if (Step->getType()->isIntegerTy()) { 9489 AddOp = Instruction::Add; 9490 MulOp = Instruction::Mul; 9491 } else { 9492 AddOp = ID.getInductionOpcode(); 9493 MulOp = Instruction::FMul; 9494 } 9495 9496 // Multiply the vectorization factor by the step using integer or 9497 // floating-point arithmetic as appropriate. 9498 Type *StepType = Step->getType(); 9499 Value *RuntimeVF; 9500 if (Step->getType()->isFloatingPointTy()) 9501 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF); 9502 else 9503 RuntimeVF = getRuntimeVF(Builder, StepType, State.VF); 9504 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 9505 9506 // Create a vector splat to use in the induction update. 9507 // 9508 // FIXME: If the step is non-constant, we create the vector splat with 9509 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 9510 // handle a constant vector splat. 9511 Value *SplatVF = isa<Constant>(Mul) 9512 ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul)) 9513 : Builder.CreateVectorSplat(State.VF, Mul); 9514 Builder.restoreIP(CurrIP); 9515 9516 // We may need to add the step a number of times, depending on the unroll 9517 // factor. The last of those goes into the PHI. 9518 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 9519 &*State.CFG.PrevBB->getFirstInsertionPt()); 9520 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 9521 Instruction *LastInduction = VecInd; 9522 for (unsigned Part = 0; Part < State.UF; ++Part) { 9523 State.set(this, LastInduction, Part); 9524 9525 if (isa<TruncInst>(EntryVal)) 9526 State.ILV->addMetadata(LastInduction, EntryVal); 9527 9528 LastInduction = cast<Instruction>( 9529 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 9530 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 9531 } 9532 9533 // Move the last step to the end of the latch block. This ensures consistent 9534 // placement of all induction updates. 9535 auto *LoopVectorLatch = 9536 State.LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch(); 9537 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 9538 LastInduction->moveBefore(Br); 9539 LastInduction->setName("vec.ind.next"); 9540 9541 VecInd->addIncoming(SteppedStart, State.CFG.VectorPreHeader); 9542 VecInd->addIncoming(LastInduction, LoopVectorLatch); 9543 } 9544 9545 void VPWidenPointerInductionRecipe::execute(VPTransformState &State) { 9546 assert(IndDesc.getKind() == InductionDescriptor::IK_PtrInduction && 9547 "Not a pointer induction according to InductionDescriptor!"); 9548 9549 PHINode *Phi = cast<PHINode>(getUnderlyingInstr()); 9550 assert(Phi->getType()->isPointerTy() && "Unexpected type."); 9551 9552 auto *IVR = getParent()->getPlan()->getCanonicalIV(); 9553 PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0)); 9554 9555 if (all_of(users(), [this](const VPUser *U) { 9556 return cast<VPRecipeBase>(U)->usesScalars(this); 9557 })) { 9558 // This is the normalized GEP that starts counting at zero. 9559 Value *PtrInd = State.Builder.CreateSExtOrTrunc( 9560 CanonicalIV, IndDesc.getStep()->getType()); 9561 // Determine the number of scalars we need to generate for each unroll 9562 // iteration. If the instruction is uniform, we only need to generate the 9563 // first lane. Otherwise, we generate all VF values. 9564 bool IsUniform = vputils::onlyFirstLaneUsed(this); 9565 assert((IsUniform || !State.VF.isScalable()) && 9566 "Cannot scalarize a scalable VF"); 9567 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 9568 9569 for (unsigned Part = 0; Part < State.UF; ++Part) { 9570 Value *PartStart = 9571 createStepForVF(State.Builder, PtrInd->getType(), State.VF, Part); 9572 9573 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 9574 Value *Idx = State.Builder.CreateAdd( 9575 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 9576 Value *GlobalIdx = State.Builder.CreateAdd(PtrInd, Idx); 9577 9578 Value *Step = CreateStepValue(IndDesc.getStep(), SE, 9579 State.CFG.PrevBB->getTerminator()); 9580 Value *SclrGep = emitTransformedIndex( 9581 State.Builder, GlobalIdx, IndDesc.getStartValue(), Step, IndDesc); 9582 SclrGep->setName("next.gep"); 9583 State.set(this, SclrGep, VPIteration(Part, Lane)); 9584 } 9585 } 9586 return; 9587 } 9588 9589 assert(isa<SCEVConstant>(IndDesc.getStep()) && 9590 "Induction step not a SCEV constant!"); 9591 Type *PhiType = IndDesc.getStep()->getType(); 9592 9593 // Build a pointer phi 9594 Value *ScalarStartValue = getStartValue()->getLiveInIRValue(); 9595 Type *ScStValueType = ScalarStartValue->getType(); 9596 PHINode *NewPointerPhi = 9597 PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV); 9598 NewPointerPhi->addIncoming(ScalarStartValue, State.CFG.VectorPreHeader); 9599 9600 // A pointer induction, performed by using a gep 9601 BasicBlock *LoopLatch = 9602 State.LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch(); 9603 9604 const DataLayout &DL = LoopLatch->getModule()->getDataLayout(); 9605 Instruction *InductionLoc = LoopLatch->getTerminator(); 9606 const SCEV *ScalarStep = IndDesc.getStep(); 9607 SCEVExpander Exp(SE, DL, "induction"); 9608 Value *ScalarStepValue = Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 9609 Value *RuntimeVF = getRuntimeVF(State.Builder, PhiType, State.VF); 9610 Value *NumUnrolledElems = 9611 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 9612 Value *InductionGEP = GetElementPtrInst::Create( 9613 IndDesc.getElementType(), NewPointerPhi, 9614 State.Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 9615 InductionLoc); 9616 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 9617 9618 // Create UF many actual address geps that use the pointer 9619 // phi as base and a vectorized version of the step value 9620 // (<step*0, ..., step*N>) as offset. 9621 for (unsigned Part = 0; Part < State.UF; ++Part) { 9622 Type *VecPhiType = VectorType::get(PhiType, State.VF); 9623 Value *StartOffsetScalar = 9624 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 9625 Value *StartOffset = 9626 State.Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 9627 // Create a vector of consecutive numbers from zero to VF. 9628 StartOffset = State.Builder.CreateAdd( 9629 StartOffset, State.Builder.CreateStepVector(VecPhiType)); 9630 9631 Value *GEP = State.Builder.CreateGEP( 9632 IndDesc.getElementType(), NewPointerPhi, 9633 State.Builder.CreateMul( 9634 StartOffset, 9635 State.Builder.CreateVectorSplat(State.VF, ScalarStepValue), 9636 "vector.gep")); 9637 State.set(this, GEP, Part); 9638 } 9639 } 9640 9641 void VPScalarIVStepsRecipe::execute(VPTransformState &State) { 9642 assert(!State.Instance && "VPScalarIVStepsRecipe being replicated."); 9643 9644 // Fast-math-flags propagate from the original induction instruction. 9645 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder); 9646 if (IndDesc.getInductionBinOp() && 9647 isa<FPMathOperator>(IndDesc.getInductionBinOp())) 9648 State.Builder.setFastMathFlags( 9649 IndDesc.getInductionBinOp()->getFastMathFlags()); 9650 9651 Value *Step = State.get(getStepValue(), VPIteration(0, 0)); 9652 auto CreateScalarIV = [&](Value *&Step) -> Value * { 9653 Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0)); 9654 auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0); 9655 if (!isCanonical() || CanonicalIV->getType() != Ty) { 9656 ScalarIV = 9657 Ty->isIntegerTy() 9658 ? State.Builder.CreateSExtOrTrunc(ScalarIV, Ty) 9659 : State.Builder.CreateCast(Instruction::SIToFP, ScalarIV, Ty); 9660 ScalarIV = emitTransformedIndex(State.Builder, ScalarIV, 9661 getStartValue()->getLiveInIRValue(), Step, 9662 IndDesc); 9663 ScalarIV->setName("offset.idx"); 9664 } 9665 if (TruncToTy) { 9666 assert(Step->getType()->isIntegerTy() && 9667 "Truncation requires an integer step"); 9668 ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncToTy); 9669 Step = State.Builder.CreateTrunc(Step, TruncToTy); 9670 } 9671 return ScalarIV; 9672 }; 9673 9674 Value *ScalarIV = CreateScalarIV(Step); 9675 if (State.VF.isVector()) { 9676 buildScalarSteps(ScalarIV, Step, IndDesc, this, State); 9677 return; 9678 } 9679 9680 for (unsigned Part = 0; Part < State.UF; ++Part) { 9681 assert(!State.VF.isScalable() && "scalable vectors not yet supported."); 9682 Value *EntryPart; 9683 if (Step->getType()->isFloatingPointTy()) { 9684 Value *StartIdx = 9685 getRuntimeVFAsFloat(State.Builder, Step->getType(), State.VF * Part); 9686 // Floating-point operations inherit FMF via the builder's flags. 9687 Value *MulOp = State.Builder.CreateFMul(StartIdx, Step); 9688 EntryPart = State.Builder.CreateBinOp(IndDesc.getInductionOpcode(), 9689 ScalarIV, MulOp); 9690 } else { 9691 Value *StartIdx = 9692 getRuntimeVF(State.Builder, Step->getType(), State.VF * Part); 9693 EntryPart = State.Builder.CreateAdd( 9694 ScalarIV, State.Builder.CreateMul(StartIdx, Step), "induction"); 9695 } 9696 State.set(this, EntryPart, Part); 9697 } 9698 } 9699 9700 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9701 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9702 State); 9703 } 9704 9705 void VPBlendRecipe::execute(VPTransformState &State) { 9706 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9707 // We know that all PHIs in non-header blocks are converted into 9708 // selects, so we don't have to worry about the insertion order and we 9709 // can just use the builder. 9710 // At this point we generate the predication tree. There may be 9711 // duplications since this is a simple recursive scan, but future 9712 // optimizations will clean it up. 9713 9714 unsigned NumIncoming = getNumIncomingValues(); 9715 9716 // Generate a sequence of selects of the form: 9717 // SELECT(Mask3, In3, 9718 // SELECT(Mask2, In2, 9719 // SELECT(Mask1, In1, 9720 // In0))) 9721 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9722 // are essentially undef are taken from In0. 9723 InnerLoopVectorizer::VectorParts Entry(State.UF); 9724 for (unsigned In = 0; In < NumIncoming; ++In) { 9725 for (unsigned Part = 0; Part < State.UF; ++Part) { 9726 // We might have single edge PHIs (blocks) - use an identity 9727 // 'select' for the first PHI operand. 9728 Value *In0 = State.get(getIncomingValue(In), Part); 9729 if (In == 0) 9730 Entry[Part] = In0; // Initialize with the first incoming value. 9731 else { 9732 // Select between the current value and the previous incoming edge 9733 // based on the incoming mask. 9734 Value *Cond = State.get(getMask(In), Part); 9735 Entry[Part] = 9736 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9737 } 9738 } 9739 } 9740 for (unsigned Part = 0; Part < State.UF; ++Part) 9741 State.set(this, Entry[Part], Part); 9742 } 9743 9744 void VPInterleaveRecipe::execute(VPTransformState &State) { 9745 assert(!State.Instance && "Interleave group being replicated."); 9746 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9747 getStoredValues(), getMask()); 9748 } 9749 9750 void VPReductionRecipe::execute(VPTransformState &State) { 9751 assert(!State.Instance && "Reduction being replicated."); 9752 Value *PrevInChain = State.get(getChainOp(), 0); 9753 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9754 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9755 // Propagate the fast-math flags carried by the underlying instruction. 9756 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9757 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9758 for (unsigned Part = 0; Part < State.UF; ++Part) { 9759 Value *NewVecOp = State.get(getVecOp(), Part); 9760 if (VPValue *Cond = getCondOp()) { 9761 Value *NewCond = State.get(Cond, Part); 9762 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9763 Value *Iden = RdxDesc->getRecurrenceIdentity( 9764 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9765 Value *IdenVec = 9766 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9767 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9768 NewVecOp = Select; 9769 } 9770 Value *NewRed; 9771 Value *NextInChain; 9772 if (IsOrdered) { 9773 if (State.VF.isVector()) 9774 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9775 PrevInChain); 9776 else 9777 NewRed = State.Builder.CreateBinOp( 9778 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9779 NewVecOp); 9780 PrevInChain = NewRed; 9781 } else { 9782 PrevInChain = State.get(getChainOp(), Part); 9783 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9784 } 9785 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9786 NextInChain = 9787 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9788 NewRed, PrevInChain); 9789 } else if (IsOrdered) 9790 NextInChain = NewRed; 9791 else 9792 NextInChain = State.Builder.CreateBinOp( 9793 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9794 PrevInChain); 9795 State.set(this, NextInChain, Part); 9796 } 9797 } 9798 9799 void VPReplicateRecipe::execute(VPTransformState &State) { 9800 if (State.Instance) { // Generate a single instance. 9801 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9802 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance, 9803 IsPredicated, State); 9804 // Insert scalar instance packing it into a vector. 9805 if (AlsoPack && State.VF.isVector()) { 9806 // If we're constructing lane 0, initialize to start from poison. 9807 if (State.Instance->Lane.isFirstLane()) { 9808 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9809 Value *Poison = PoisonValue::get( 9810 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9811 State.set(this, Poison, State.Instance->Part); 9812 } 9813 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9814 } 9815 return; 9816 } 9817 9818 // Generate scalar instances for all VF lanes of all UF parts, unless the 9819 // instruction is uniform inwhich case generate only the first lane for each 9820 // of the UF parts. 9821 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9822 assert((!State.VF.isScalable() || IsUniform) && 9823 "Can't scalarize a scalable vector"); 9824 for (unsigned Part = 0; Part < State.UF; ++Part) 9825 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9826 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 9827 VPIteration(Part, Lane), IsPredicated, 9828 State); 9829 } 9830 9831 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9832 assert(State.Instance && "Branch on Mask works only on single instance."); 9833 9834 unsigned Part = State.Instance->Part; 9835 unsigned Lane = State.Instance->Lane.getKnownLane(); 9836 9837 Value *ConditionBit = nullptr; 9838 VPValue *BlockInMask = getMask(); 9839 if (BlockInMask) { 9840 ConditionBit = State.get(BlockInMask, Part); 9841 if (ConditionBit->getType()->isVectorTy()) 9842 ConditionBit = State.Builder.CreateExtractElement( 9843 ConditionBit, State.Builder.getInt32(Lane)); 9844 } else // Block in mask is all-one. 9845 ConditionBit = State.Builder.getTrue(); 9846 9847 // Replace the temporary unreachable terminator with a new conditional branch, 9848 // whose two destinations will be set later when they are created. 9849 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9850 assert(isa<UnreachableInst>(CurrentTerminator) && 9851 "Expected to replace unreachable terminator with conditional branch."); 9852 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9853 CondBr->setSuccessor(0, nullptr); 9854 ReplaceInstWithInst(CurrentTerminator, CondBr); 9855 } 9856 9857 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9858 assert(State.Instance && "Predicated instruction PHI works per instance."); 9859 Instruction *ScalarPredInst = 9860 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9861 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9862 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9863 assert(PredicatingBB && "Predicated block has no single predecessor."); 9864 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9865 "operand must be VPReplicateRecipe"); 9866 9867 // By current pack/unpack logic we need to generate only a single phi node: if 9868 // a vector value for the predicated instruction exists at this point it means 9869 // the instruction has vector users only, and a phi for the vector value is 9870 // needed. In this case the recipe of the predicated instruction is marked to 9871 // also do that packing, thereby "hoisting" the insert-element sequence. 9872 // Otherwise, a phi node for the scalar value is needed. 9873 unsigned Part = State.Instance->Part; 9874 if (State.hasVectorValue(getOperand(0), Part)) { 9875 Value *VectorValue = State.get(getOperand(0), Part); 9876 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9877 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9878 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9879 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9880 if (State.hasVectorValue(this, Part)) 9881 State.reset(this, VPhi, Part); 9882 else 9883 State.set(this, VPhi, Part); 9884 // NOTE: Currently we need to update the value of the operand, so the next 9885 // predicated iteration inserts its generated value in the correct vector. 9886 State.reset(getOperand(0), VPhi, Part); 9887 } else { 9888 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9889 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9890 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9891 PredicatingBB); 9892 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9893 if (State.hasScalarValue(this, *State.Instance)) 9894 State.reset(this, Phi, *State.Instance); 9895 else 9896 State.set(this, Phi, *State.Instance); 9897 // NOTE: Currently we need to update the value of the operand, so the next 9898 // predicated iteration inserts its generated value in the correct vector. 9899 State.reset(getOperand(0), Phi, *State.Instance); 9900 } 9901 } 9902 9903 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9904 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9905 9906 // Attempt to issue a wide load. 9907 LoadInst *LI = dyn_cast<LoadInst>(&Ingredient); 9908 StoreInst *SI = dyn_cast<StoreInst>(&Ingredient); 9909 9910 assert((LI || SI) && "Invalid Load/Store instruction"); 9911 assert((!SI || StoredValue) && "No stored value provided for widened store"); 9912 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 9913 9914 Type *ScalarDataTy = getLoadStoreType(&Ingredient); 9915 9916 auto *DataTy = VectorType::get(ScalarDataTy, State.VF); 9917 const Align Alignment = getLoadStoreAlignment(&Ingredient); 9918 bool CreateGatherScatter = !Consecutive; 9919 9920 auto &Builder = State.Builder; 9921 InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF); 9922 bool isMaskRequired = getMask(); 9923 if (isMaskRequired) 9924 for (unsigned Part = 0; Part < State.UF; ++Part) 9925 BlockInMaskParts[Part] = State.get(getMask(), Part); 9926 9927 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 9928 // Calculate the pointer for the specific unroll-part. 9929 GetElementPtrInst *PartPtr = nullptr; 9930 9931 bool InBounds = false; 9932 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 9933 InBounds = gep->isInBounds(); 9934 if (Reverse) { 9935 // If the address is consecutive but reversed, then the 9936 // wide store needs to start at the last vector element. 9937 // RunTimeVF = VScale * VF.getKnownMinValue() 9938 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 9939 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF); 9940 // NumElt = -Part * RunTimeVF 9941 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 9942 // LastLane = 1 - RunTimeVF 9943 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 9944 PartPtr = 9945 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 9946 PartPtr->setIsInBounds(InBounds); 9947 PartPtr = cast<GetElementPtrInst>( 9948 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 9949 PartPtr->setIsInBounds(InBounds); 9950 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 9951 BlockInMaskParts[Part] = 9952 Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse"); 9953 } else { 9954 Value *Increment = 9955 createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part); 9956 PartPtr = cast<GetElementPtrInst>( 9957 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 9958 PartPtr->setIsInBounds(InBounds); 9959 } 9960 9961 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 9962 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 9963 }; 9964 9965 // Handle Stores: 9966 if (SI) { 9967 State.ILV->setDebugLocFromInst(SI); 9968 9969 for (unsigned Part = 0; Part < State.UF; ++Part) { 9970 Instruction *NewSI = nullptr; 9971 Value *StoredVal = State.get(StoredValue, Part); 9972 if (CreateGatherScatter) { 9973 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 9974 Value *VectorGep = State.get(getAddr(), Part); 9975 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 9976 MaskPart); 9977 } else { 9978 if (Reverse) { 9979 // If we store to reverse consecutive memory locations, then we need 9980 // to reverse the order of elements in the stored value. 9981 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse"); 9982 // We don't want to update the value in the map as it might be used in 9983 // another expression. So don't call resetVectorValue(StoredVal). 9984 } 9985 auto *VecPtr = 9986 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 9987 if (isMaskRequired) 9988 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 9989 BlockInMaskParts[Part]); 9990 else 9991 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 9992 } 9993 State.ILV->addMetadata(NewSI, SI); 9994 } 9995 return; 9996 } 9997 9998 // Handle loads. 9999 assert(LI && "Must have a load instruction"); 10000 State.ILV->setDebugLocFromInst(LI); 10001 for (unsigned Part = 0; Part < State.UF; ++Part) { 10002 Value *NewLI; 10003 if (CreateGatherScatter) { 10004 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 10005 Value *VectorGep = State.get(getAddr(), Part); 10006 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 10007 nullptr, "wide.masked.gather"); 10008 State.ILV->addMetadata(NewLI, LI); 10009 } else { 10010 auto *VecPtr = 10011 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10012 if (isMaskRequired) 10013 NewLI = Builder.CreateMaskedLoad( 10014 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 10015 PoisonValue::get(DataTy), "wide.masked.load"); 10016 else 10017 NewLI = 10018 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 10019 10020 // Add metadata to the load, but setVectorValue to the reverse shuffle. 10021 State.ILV->addMetadata(NewLI, LI); 10022 if (Reverse) 10023 NewLI = Builder.CreateVectorReverse(NewLI, "reverse"); 10024 } 10025 10026 State.set(this, NewLI, Part); 10027 } 10028 } 10029 10030 // Determine how to lower the scalar epilogue, which depends on 1) optimising 10031 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 10032 // predication, and 4) a TTI hook that analyses whether the loop is suitable 10033 // for predication. 10034 static ScalarEpilogueLowering getScalarEpilogueLowering( 10035 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 10036 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 10037 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 10038 LoopVectorizationLegality &LVL) { 10039 // 1) OptSize takes precedence over all other options, i.e. if this is set, 10040 // don't look at hints or options, and don't request a scalar epilogue. 10041 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 10042 // LoopAccessInfo (due to code dependency and not being able to reliably get 10043 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 10044 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 10045 // versioning when the vectorization is forced, unlike hasOptSize. So revert 10046 // back to the old way and vectorize with versioning when forced. See D81345.) 10047 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 10048 PGSOQueryType::IRPass) && 10049 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 10050 return CM_ScalarEpilogueNotAllowedOptSize; 10051 10052 // 2) If set, obey the directives 10053 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 10054 switch (PreferPredicateOverEpilogue) { 10055 case PreferPredicateTy::ScalarEpilogue: 10056 return CM_ScalarEpilogueAllowed; 10057 case PreferPredicateTy::PredicateElseScalarEpilogue: 10058 return CM_ScalarEpilogueNotNeededUsePredicate; 10059 case PreferPredicateTy::PredicateOrDontVectorize: 10060 return CM_ScalarEpilogueNotAllowedUsePredicate; 10061 }; 10062 } 10063 10064 // 3) If set, obey the hints 10065 switch (Hints.getPredicate()) { 10066 case LoopVectorizeHints::FK_Enabled: 10067 return CM_ScalarEpilogueNotNeededUsePredicate; 10068 case LoopVectorizeHints::FK_Disabled: 10069 return CM_ScalarEpilogueAllowed; 10070 }; 10071 10072 // 4) if the TTI hook indicates this is profitable, request predication. 10073 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 10074 LVL.getLAI())) 10075 return CM_ScalarEpilogueNotNeededUsePredicate; 10076 10077 return CM_ScalarEpilogueAllowed; 10078 } 10079 10080 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 10081 // If Values have been set for this Def return the one relevant for \p Part. 10082 if (hasVectorValue(Def, Part)) 10083 return Data.PerPartOutput[Def][Part]; 10084 10085 if (!hasScalarValue(Def, {Part, 0})) { 10086 Value *IRV = Def->getLiveInIRValue(); 10087 Value *B = ILV->getBroadcastInstrs(IRV); 10088 set(Def, B, Part); 10089 return B; 10090 } 10091 10092 Value *ScalarValue = get(Def, {Part, 0}); 10093 // If we aren't vectorizing, we can just copy the scalar map values over 10094 // to the vector map. 10095 if (VF.isScalar()) { 10096 set(Def, ScalarValue, Part); 10097 return ScalarValue; 10098 } 10099 10100 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 10101 bool IsUniform = RepR && RepR->isUniform(); 10102 10103 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 10104 // Check if there is a scalar value for the selected lane. 10105 if (!hasScalarValue(Def, {Part, LastLane})) { 10106 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 10107 assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) || 10108 isa<VPScalarIVStepsRecipe>(Def->getDef())) && 10109 "unexpected recipe found to be invariant"); 10110 IsUniform = true; 10111 LastLane = 0; 10112 } 10113 10114 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 10115 // Set the insert point after the last scalarized instruction or after the 10116 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 10117 // will directly follow the scalar definitions. 10118 auto OldIP = Builder.saveIP(); 10119 auto NewIP = 10120 isa<PHINode>(LastInst) 10121 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 10122 : std::next(BasicBlock::iterator(LastInst)); 10123 Builder.SetInsertPoint(&*NewIP); 10124 10125 // However, if we are vectorizing, we need to construct the vector values. 10126 // If the value is known to be uniform after vectorization, we can just 10127 // broadcast the scalar value corresponding to lane zero for each unroll 10128 // iteration. Otherwise, we construct the vector values using 10129 // insertelement instructions. Since the resulting vectors are stored in 10130 // State, we will only generate the insertelements once. 10131 Value *VectorValue = nullptr; 10132 if (IsUniform) { 10133 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 10134 set(Def, VectorValue, Part); 10135 } else { 10136 // Initialize packing with insertelements to start from undef. 10137 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 10138 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10139 set(Def, Undef, Part); 10140 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10141 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10142 VectorValue = get(Def, Part); 10143 } 10144 Builder.restoreIP(OldIP); 10145 return VectorValue; 10146 } 10147 10148 // Process the loop in the VPlan-native vectorization path. This path builds 10149 // VPlan upfront in the vectorization pipeline, which allows to apply 10150 // VPlan-to-VPlan transformations from the very beginning without modifying the 10151 // input LLVM IR. 10152 static bool processLoopInVPlanNativePath( 10153 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10154 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10155 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10156 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10157 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10158 LoopVectorizationRequirements &Requirements) { 10159 10160 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10161 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10162 return false; 10163 } 10164 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10165 Function *F = L->getHeader()->getParent(); 10166 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10167 10168 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10169 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10170 10171 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10172 &Hints, IAI); 10173 // Use the planner for outer loop vectorization. 10174 // TODO: CM is not used at this point inside the planner. Turn CM into an 10175 // optional argument if we don't need it in the future. 10176 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10177 Requirements, ORE); 10178 10179 // Get user vectorization factor. 10180 ElementCount UserVF = Hints.getWidth(); 10181 10182 CM.collectElementTypesForWidening(); 10183 10184 // Plan how to best vectorize, return the best VF and its cost. 10185 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10186 10187 // If we are stress testing VPlan builds, do not attempt to generate vector 10188 // code. Masked vector code generation support will follow soon. 10189 // Also, do not attempt to vectorize if no vector code will be produced. 10190 if (VPlanBuildStressTest || EnableVPlanPredication || 10191 VectorizationFactor::Disabled() == VF) 10192 return false; 10193 10194 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10195 10196 { 10197 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10198 F->getParent()->getDataLayout()); 10199 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10200 &CM, BFI, PSI, Checks); 10201 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10202 << L->getHeader()->getParent()->getName() << "\"\n"); 10203 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT); 10204 } 10205 10206 // Mark the loop as already vectorized to avoid vectorizing again. 10207 Hints.setAlreadyVectorized(); 10208 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10209 return true; 10210 } 10211 10212 // Emit a remark if there are stores to floats that required a floating point 10213 // extension. If the vectorized loop was generated with floating point there 10214 // will be a performance penalty from the conversion overhead and the change in 10215 // the vector width. 10216 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10217 SmallVector<Instruction *, 4> Worklist; 10218 for (BasicBlock *BB : L->getBlocks()) { 10219 for (Instruction &Inst : *BB) { 10220 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10221 if (S->getValueOperand()->getType()->isFloatTy()) 10222 Worklist.push_back(S); 10223 } 10224 } 10225 } 10226 10227 // Traverse the floating point stores upwards searching, for floating point 10228 // conversions. 10229 SmallPtrSet<const Instruction *, 4> Visited; 10230 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10231 while (!Worklist.empty()) { 10232 auto *I = Worklist.pop_back_val(); 10233 if (!L->contains(I)) 10234 continue; 10235 if (!Visited.insert(I).second) 10236 continue; 10237 10238 // Emit a remark if the floating point store required a floating 10239 // point conversion. 10240 // TODO: More work could be done to identify the root cause such as a 10241 // constant or a function return type and point the user to it. 10242 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10243 ORE->emit([&]() { 10244 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10245 I->getDebugLoc(), L->getHeader()) 10246 << "floating point conversion changes vector width. " 10247 << "Mixed floating point precision requires an up/down " 10248 << "cast that will negatively impact performance."; 10249 }); 10250 10251 for (Use &Op : I->operands()) 10252 if (auto *OpI = dyn_cast<Instruction>(Op)) 10253 Worklist.push_back(OpI); 10254 } 10255 } 10256 10257 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10258 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10259 !EnableLoopInterleaving), 10260 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10261 !EnableLoopVectorization) {} 10262 10263 bool LoopVectorizePass::processLoop(Loop *L) { 10264 assert((EnableVPlanNativePath || L->isInnermost()) && 10265 "VPlan-native path is not enabled. Only process inner loops."); 10266 10267 #ifndef NDEBUG 10268 const std::string DebugLocStr = getDebugLocString(L); 10269 #endif /* NDEBUG */ 10270 10271 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '" 10272 << L->getHeader()->getParent()->getName() << "' from " 10273 << DebugLocStr << "\n"); 10274 10275 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI); 10276 10277 LLVM_DEBUG( 10278 dbgs() << "LV: Loop hints:" 10279 << " force=" 10280 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10281 ? "disabled" 10282 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10283 ? "enabled" 10284 : "?")) 10285 << " width=" << Hints.getWidth() 10286 << " interleave=" << Hints.getInterleave() << "\n"); 10287 10288 // Function containing loop 10289 Function *F = L->getHeader()->getParent(); 10290 10291 // Looking at the diagnostic output is the only way to determine if a loop 10292 // was vectorized (other than looking at the IR or machine code), so it 10293 // is important to generate an optimization remark for each loop. Most of 10294 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10295 // generated as OptimizationRemark and OptimizationRemarkMissed are 10296 // less verbose reporting vectorized loops and unvectorized loops that may 10297 // benefit from vectorization, respectively. 10298 10299 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10300 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10301 return false; 10302 } 10303 10304 PredicatedScalarEvolution PSE(*SE, *L); 10305 10306 // Check if it is legal to vectorize the loop. 10307 LoopVectorizationRequirements Requirements; 10308 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10309 &Requirements, &Hints, DB, AC, BFI, PSI); 10310 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10311 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10312 Hints.emitRemarkWithHints(); 10313 return false; 10314 } 10315 10316 // Check the function attributes and profiles to find out if this function 10317 // should be optimized for size. 10318 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10319 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10320 10321 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10322 // here. They may require CFG and instruction level transformations before 10323 // even evaluating whether vectorization is profitable. Since we cannot modify 10324 // the incoming IR, we need to build VPlan upfront in the vectorization 10325 // pipeline. 10326 if (!L->isInnermost()) 10327 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10328 ORE, BFI, PSI, Hints, Requirements); 10329 10330 assert(L->isInnermost() && "Inner loop expected."); 10331 10332 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10333 // count by optimizing for size, to minimize overheads. 10334 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10335 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10336 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10337 << "This loop is worth vectorizing only if no scalar " 10338 << "iteration overheads are incurred."); 10339 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10340 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10341 else { 10342 LLVM_DEBUG(dbgs() << "\n"); 10343 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10344 } 10345 } 10346 10347 // Check the function attributes to see if implicit floats are allowed. 10348 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10349 // an integer loop and the vector instructions selected are purely integer 10350 // vector instructions? 10351 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10352 reportVectorizationFailure( 10353 "Can't vectorize when the NoImplicitFloat attribute is used", 10354 "loop not vectorized due to NoImplicitFloat attribute", 10355 "NoImplicitFloat", ORE, L); 10356 Hints.emitRemarkWithHints(); 10357 return false; 10358 } 10359 10360 // Check if the target supports potentially unsafe FP vectorization. 10361 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10362 // for the target we're vectorizing for, to make sure none of the 10363 // additional fp-math flags can help. 10364 if (Hints.isPotentiallyUnsafe() && 10365 TTI->isFPVectorizationPotentiallyUnsafe()) { 10366 reportVectorizationFailure( 10367 "Potentially unsafe FP op prevents vectorization", 10368 "loop not vectorized due to unsafe FP support.", 10369 "UnsafeFP", ORE, L); 10370 Hints.emitRemarkWithHints(); 10371 return false; 10372 } 10373 10374 bool AllowOrderedReductions; 10375 // If the flag is set, use that instead and override the TTI behaviour. 10376 if (ForceOrderedReductions.getNumOccurrences() > 0) 10377 AllowOrderedReductions = ForceOrderedReductions; 10378 else 10379 AllowOrderedReductions = TTI->enableOrderedReductions(); 10380 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10381 ORE->emit([&]() { 10382 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10383 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10384 ExactFPMathInst->getDebugLoc(), 10385 ExactFPMathInst->getParent()) 10386 << "loop not vectorized: cannot prove it is safe to reorder " 10387 "floating-point operations"; 10388 }); 10389 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10390 "reorder floating-point operations\n"); 10391 Hints.emitRemarkWithHints(); 10392 return false; 10393 } 10394 10395 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10396 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10397 10398 // If an override option has been passed in for interleaved accesses, use it. 10399 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10400 UseInterleaved = EnableInterleavedMemAccesses; 10401 10402 // Analyze interleaved memory accesses. 10403 if (UseInterleaved) { 10404 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10405 } 10406 10407 // Use the cost model. 10408 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10409 F, &Hints, IAI); 10410 CM.collectValuesToIgnore(); 10411 CM.collectElementTypesForWidening(); 10412 10413 // Use the planner for vectorization. 10414 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10415 Requirements, ORE); 10416 10417 // Get user vectorization factor and interleave count. 10418 ElementCount UserVF = Hints.getWidth(); 10419 unsigned UserIC = Hints.getInterleave(); 10420 10421 // Plan how to best vectorize, return the best VF and its cost. 10422 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10423 10424 VectorizationFactor VF = VectorizationFactor::Disabled(); 10425 unsigned IC = 1; 10426 10427 if (MaybeVF) { 10428 VF = *MaybeVF; 10429 // Select the interleave count. 10430 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10431 } 10432 10433 // Identify the diagnostic messages that should be produced. 10434 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10435 bool VectorizeLoop = true, InterleaveLoop = true; 10436 if (VF.Width.isScalar()) { 10437 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10438 VecDiagMsg = std::make_pair( 10439 "VectorizationNotBeneficial", 10440 "the cost-model indicates that vectorization is not beneficial"); 10441 VectorizeLoop = false; 10442 } 10443 10444 if (!MaybeVF && UserIC > 1) { 10445 // Tell the user interleaving was avoided up-front, despite being explicitly 10446 // requested. 10447 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10448 "interleaving should be avoided up front\n"); 10449 IntDiagMsg = std::make_pair( 10450 "InterleavingAvoided", 10451 "Ignoring UserIC, because interleaving was avoided up front"); 10452 InterleaveLoop = false; 10453 } else if (IC == 1 && UserIC <= 1) { 10454 // Tell the user interleaving is not beneficial. 10455 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10456 IntDiagMsg = std::make_pair( 10457 "InterleavingNotBeneficial", 10458 "the cost-model indicates that interleaving is not beneficial"); 10459 InterleaveLoop = false; 10460 if (UserIC == 1) { 10461 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10462 IntDiagMsg.second += 10463 " and is explicitly disabled or interleave count is set to 1"; 10464 } 10465 } else if (IC > 1 && UserIC == 1) { 10466 // Tell the user interleaving is beneficial, but it explicitly disabled. 10467 LLVM_DEBUG( 10468 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10469 IntDiagMsg = std::make_pair( 10470 "InterleavingBeneficialButDisabled", 10471 "the cost-model indicates that interleaving is beneficial " 10472 "but is explicitly disabled or interleave count is set to 1"); 10473 InterleaveLoop = false; 10474 } 10475 10476 // Override IC if user provided an interleave count. 10477 IC = UserIC > 0 ? UserIC : IC; 10478 10479 // Emit diagnostic messages, if any. 10480 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10481 if (!VectorizeLoop && !InterleaveLoop) { 10482 // Do not vectorize or interleaving the loop. 10483 ORE->emit([&]() { 10484 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10485 L->getStartLoc(), L->getHeader()) 10486 << VecDiagMsg.second; 10487 }); 10488 ORE->emit([&]() { 10489 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10490 L->getStartLoc(), L->getHeader()) 10491 << IntDiagMsg.second; 10492 }); 10493 return false; 10494 } else if (!VectorizeLoop && InterleaveLoop) { 10495 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10496 ORE->emit([&]() { 10497 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10498 L->getStartLoc(), L->getHeader()) 10499 << VecDiagMsg.second; 10500 }); 10501 } else if (VectorizeLoop && !InterleaveLoop) { 10502 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10503 << ") in " << DebugLocStr << '\n'); 10504 ORE->emit([&]() { 10505 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10506 L->getStartLoc(), L->getHeader()) 10507 << IntDiagMsg.second; 10508 }); 10509 } else if (VectorizeLoop && InterleaveLoop) { 10510 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10511 << ") in " << DebugLocStr << '\n'); 10512 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10513 } 10514 10515 bool DisableRuntimeUnroll = false; 10516 MDNode *OrigLoopID = L->getLoopID(); 10517 { 10518 // Optimistically generate runtime checks. Drop them if they turn out to not 10519 // be profitable. Limit the scope of Checks, so the cleanup happens 10520 // immediately after vector codegeneration is done. 10521 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10522 F->getParent()->getDataLayout()); 10523 if (!VF.Width.isScalar() || IC > 1) 10524 Checks.Create(L, *LVL.getLAI(), PSE.getPredicate()); 10525 10526 using namespace ore; 10527 if (!VectorizeLoop) { 10528 assert(IC > 1 && "interleave count should not be 1 or 0"); 10529 // If we decided that it is not legal to vectorize the loop, then 10530 // interleave it. 10531 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10532 &CM, BFI, PSI, Checks); 10533 10534 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10535 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT); 10536 10537 ORE->emit([&]() { 10538 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10539 L->getHeader()) 10540 << "interleaved loop (interleaved count: " 10541 << NV("InterleaveCount", IC) << ")"; 10542 }); 10543 } else { 10544 // If we decided that it is *legal* to vectorize the loop, then do it. 10545 10546 // Consider vectorizing the epilogue too if it's profitable. 10547 VectorizationFactor EpilogueVF = 10548 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10549 if (EpilogueVF.Width.isVector()) { 10550 10551 // The first pass vectorizes the main loop and creates a scalar epilogue 10552 // to be vectorized by executing the plan (potentially with a different 10553 // factor) again shortly afterwards. 10554 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10555 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10556 EPI, &LVL, &CM, BFI, PSI, Checks); 10557 10558 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10559 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10560 DT); 10561 ++LoopsVectorized; 10562 10563 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10564 formLCSSARecursively(*L, *DT, LI, SE); 10565 10566 // Second pass vectorizes the epilogue and adjusts the control flow 10567 // edges from the first pass. 10568 EPI.MainLoopVF = EPI.EpilogueVF; 10569 EPI.MainLoopUF = EPI.EpilogueUF; 10570 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10571 ORE, EPI, &LVL, &CM, BFI, PSI, 10572 Checks); 10573 10574 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10575 10576 // Ensure that the start values for any VPReductionPHIRecipes are 10577 // updated before vectorising the epilogue loop. 10578 VPBasicBlock *Header = BestEpiPlan.getEntry()->getEntryBasicBlock(); 10579 for (VPRecipeBase &R : Header->phis()) { 10580 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) { 10581 if (auto *Resume = MainILV.getReductionResumeValue( 10582 ReductionPhi->getRecurrenceDescriptor())) { 10583 VPValue *StartVal = new VPValue(Resume); 10584 BestEpiPlan.addExternalDef(StartVal); 10585 ReductionPhi->setOperand(0, StartVal); 10586 } 10587 } 10588 } 10589 10590 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10591 DT); 10592 ++LoopsEpilogueVectorized; 10593 10594 if (!MainILV.areSafetyChecksAdded()) 10595 DisableRuntimeUnroll = true; 10596 } else { 10597 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10598 &LVL, &CM, BFI, PSI, Checks); 10599 10600 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10601 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT); 10602 ++LoopsVectorized; 10603 10604 // Add metadata to disable runtime unrolling a scalar loop when there 10605 // are no runtime checks about strides and memory. A scalar loop that is 10606 // rarely used is not worth unrolling. 10607 if (!LB.areSafetyChecksAdded()) 10608 DisableRuntimeUnroll = true; 10609 } 10610 // Report the vectorization decision. 10611 ORE->emit([&]() { 10612 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10613 L->getHeader()) 10614 << "vectorized loop (vectorization width: " 10615 << NV("VectorizationFactor", VF.Width) 10616 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10617 }); 10618 } 10619 10620 if (ORE->allowExtraAnalysis(LV_NAME)) 10621 checkMixedPrecision(L, ORE); 10622 } 10623 10624 Optional<MDNode *> RemainderLoopID = 10625 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10626 LLVMLoopVectorizeFollowupEpilogue}); 10627 if (RemainderLoopID.hasValue()) { 10628 L->setLoopID(RemainderLoopID.getValue()); 10629 } else { 10630 if (DisableRuntimeUnroll) 10631 AddRuntimeUnrollDisableMetaData(L); 10632 10633 // Mark the loop as already vectorized to avoid vectorizing again. 10634 Hints.setAlreadyVectorized(); 10635 } 10636 10637 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10638 return true; 10639 } 10640 10641 LoopVectorizeResult LoopVectorizePass::runImpl( 10642 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10643 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10644 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10645 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10646 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10647 SE = &SE_; 10648 LI = &LI_; 10649 TTI = &TTI_; 10650 DT = &DT_; 10651 BFI = &BFI_; 10652 TLI = TLI_; 10653 AA = &AA_; 10654 AC = &AC_; 10655 GetLAA = &GetLAA_; 10656 DB = &DB_; 10657 ORE = &ORE_; 10658 PSI = PSI_; 10659 10660 // Don't attempt if 10661 // 1. the target claims to have no vector registers, and 10662 // 2. interleaving won't help ILP. 10663 // 10664 // The second condition is necessary because, even if the target has no 10665 // vector registers, loop vectorization may still enable scalar 10666 // interleaving. 10667 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10668 TTI->getMaxInterleaveFactor(1) < 2) 10669 return LoopVectorizeResult(false, false); 10670 10671 bool Changed = false, CFGChanged = false; 10672 10673 // The vectorizer requires loops to be in simplified form. 10674 // Since simplification may add new inner loops, it has to run before the 10675 // legality and profitability checks. This means running the loop vectorizer 10676 // will simplify all loops, regardless of whether anything end up being 10677 // vectorized. 10678 for (auto &L : *LI) 10679 Changed |= CFGChanged |= 10680 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10681 10682 // Build up a worklist of inner-loops to vectorize. This is necessary as 10683 // the act of vectorizing or partially unrolling a loop creates new loops 10684 // and can invalidate iterators across the loops. 10685 SmallVector<Loop *, 8> Worklist; 10686 10687 for (Loop *L : *LI) 10688 collectSupportedLoops(*L, LI, ORE, Worklist); 10689 10690 LoopsAnalyzed += Worklist.size(); 10691 10692 // Now walk the identified inner loops. 10693 while (!Worklist.empty()) { 10694 Loop *L = Worklist.pop_back_val(); 10695 10696 // For the inner loops we actually process, form LCSSA to simplify the 10697 // transform. 10698 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10699 10700 Changed |= CFGChanged |= processLoop(L); 10701 } 10702 10703 // Process each loop nest in the function. 10704 return LoopVectorizeResult(Changed, CFGChanged); 10705 } 10706 10707 PreservedAnalyses LoopVectorizePass::run(Function &F, 10708 FunctionAnalysisManager &AM) { 10709 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10710 auto &LI = AM.getResult<LoopAnalysis>(F); 10711 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10712 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10713 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10714 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10715 auto &AA = AM.getResult<AAManager>(F); 10716 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10717 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10718 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10719 10720 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10721 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10722 [&](Loop &L) -> const LoopAccessInfo & { 10723 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10724 TLI, TTI, nullptr, nullptr, nullptr}; 10725 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10726 }; 10727 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10728 ProfileSummaryInfo *PSI = 10729 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10730 LoopVectorizeResult Result = 10731 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10732 if (!Result.MadeAnyChange) 10733 return PreservedAnalyses::all(); 10734 PreservedAnalyses PA; 10735 10736 // We currently do not preserve loopinfo/dominator analyses with outer loop 10737 // vectorization. Until this is addressed, mark these analyses as preserved 10738 // only for non-VPlan-native path. 10739 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10740 if (!EnableVPlanNativePath) { 10741 PA.preserve<LoopAnalysis>(); 10742 PA.preserve<DominatorTreeAnalysis>(); 10743 } 10744 10745 if (Result.MadeCFGChange) { 10746 // Making CFG changes likely means a loop got vectorized. Indicate that 10747 // extra simplification passes should be run. 10748 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only 10749 // be run if runtime checks have been added. 10750 AM.getResult<ShouldRunExtraVectorPasses>(F); 10751 PA.preserve<ShouldRunExtraVectorPasses>(); 10752 } else { 10753 PA.preserveSet<CFGAnalyses>(); 10754 } 10755 return PA; 10756 } 10757 10758 void LoopVectorizePass::printPipeline( 10759 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10760 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10761 OS, MapClassName2PassName); 10762 10763 OS << "<"; 10764 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10765 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10766 OS << ">"; 10767 } 10768