1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanTransforms.h" 62 #include "llvm/ADT/APInt.h" 63 #include "llvm/ADT/ArrayRef.h" 64 #include "llvm/ADT/DenseMap.h" 65 #include "llvm/ADT/DenseMapInfo.h" 66 #include "llvm/ADT/Hashing.h" 67 #include "llvm/ADT/MapVector.h" 68 #include "llvm/ADT/None.h" 69 #include "llvm/ADT/Optional.h" 70 #include "llvm/ADT/STLExtras.h" 71 #include "llvm/ADT/SmallPtrSet.h" 72 #include "llvm/ADT/SmallSet.h" 73 #include "llvm/ADT/SmallVector.h" 74 #include "llvm/ADT/Statistic.h" 75 #include "llvm/ADT/StringRef.h" 76 #include "llvm/ADT/Twine.h" 77 #include "llvm/ADT/iterator_range.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/BasicAliasAnalysis.h" 80 #include "llvm/Analysis/BlockFrequencyInfo.h" 81 #include "llvm/Analysis/CFG.h" 82 #include "llvm/Analysis/CodeMetrics.h" 83 #include "llvm/Analysis/DemandedBits.h" 84 #include "llvm/Analysis/GlobalsModRef.h" 85 #include "llvm/Analysis/LoopAccessAnalysis.h" 86 #include "llvm/Analysis/LoopAnalysisManager.h" 87 #include "llvm/Analysis/LoopInfo.h" 88 #include "llvm/Analysis/LoopIterator.h" 89 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 90 #include "llvm/Analysis/ProfileSummaryInfo.h" 91 #include "llvm/Analysis/ScalarEvolution.h" 92 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 93 #include "llvm/Analysis/TargetLibraryInfo.h" 94 #include "llvm/Analysis/TargetTransformInfo.h" 95 #include "llvm/Analysis/ValueTracking.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/Metadata.h" 116 #include "llvm/IR/Module.h" 117 #include "llvm/IR/Operator.h" 118 #include "llvm/IR/PatternMatch.h" 119 #include "llvm/IR/Type.h" 120 #include "llvm/IR/Use.h" 121 #include "llvm/IR/User.h" 122 #include "llvm/IR/Value.h" 123 #include "llvm/IR/ValueHandle.h" 124 #include "llvm/IR/Verifier.h" 125 #include "llvm/InitializePasses.h" 126 #include "llvm/Pass.h" 127 #include "llvm/Support/Casting.h" 128 #include "llvm/Support/CommandLine.h" 129 #include "llvm/Support/Compiler.h" 130 #include "llvm/Support/Debug.h" 131 #include "llvm/Support/ErrorHandling.h" 132 #include "llvm/Support/InstructionCost.h" 133 #include "llvm/Support/MathExtras.h" 134 #include "llvm/Support/raw_ostream.h" 135 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 136 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 137 #include "llvm/Transforms/Utils/LoopSimplify.h" 138 #include "llvm/Transforms/Utils/LoopUtils.h" 139 #include "llvm/Transforms/Utils/LoopVersioning.h" 140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 141 #include "llvm/Transforms/Utils/SizeOpts.h" 142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 143 #include <algorithm> 144 #include <cassert> 145 #include <cstdint> 146 #include <functional> 147 #include <iterator> 148 #include <limits> 149 #include <map> 150 #include <memory> 151 #include <string> 152 #include <tuple> 153 #include <utility> 154 155 using namespace llvm; 156 157 #define LV_NAME "loop-vectorize" 158 #define DEBUG_TYPE LV_NAME 159 160 #ifndef NDEBUG 161 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 162 #endif 163 164 /// @{ 165 /// Metadata attribute names 166 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 167 const char LLVMLoopVectorizeFollowupVectorized[] = 168 "llvm.loop.vectorize.followup_vectorized"; 169 const char LLVMLoopVectorizeFollowupEpilogue[] = 170 "llvm.loop.vectorize.followup_epilogue"; 171 /// @} 172 173 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 174 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 175 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 176 177 static cl::opt<bool> EnableEpilogueVectorization( 178 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 179 cl::desc("Enable vectorization of epilogue loops.")); 180 181 static cl::opt<unsigned> EpilogueVectorizationForceVF( 182 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 183 cl::desc("When epilogue vectorization is enabled, and a value greater than " 184 "1 is specified, forces the given VF for all applicable epilogue " 185 "loops.")); 186 187 static cl::opt<unsigned> EpilogueVectorizationMinVF( 188 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 189 cl::desc("Only loops with vectorization factor equal to or larger than " 190 "the specified value are considered for epilogue vectorization.")); 191 192 /// Loops with a known constant trip count below this number are vectorized only 193 /// if no scalar iteration overheads are incurred. 194 static cl::opt<unsigned> TinyTripCountVectorThreshold( 195 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 196 cl::desc("Loops with a constant trip count that is smaller than this " 197 "value are vectorized only if no scalar iteration overheads " 198 "are incurred.")); 199 200 static cl::opt<unsigned> VectorizeMemoryCheckThreshold( 201 "vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 202 cl::desc("The maximum allowed number of runtime memory checks")); 203 204 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 205 // that predication is preferred, and this lists all options. I.e., the 206 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 207 // and predicate the instructions accordingly. If tail-folding fails, there are 208 // different fallback strategies depending on these values: 209 namespace PreferPredicateTy { 210 enum Option { 211 ScalarEpilogue = 0, 212 PredicateElseScalarEpilogue, 213 PredicateOrDontVectorize 214 }; 215 } // namespace PreferPredicateTy 216 217 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 218 "prefer-predicate-over-epilogue", 219 cl::init(PreferPredicateTy::ScalarEpilogue), 220 cl::Hidden, 221 cl::desc("Tail-folding and predication preferences over creating a scalar " 222 "epilogue loop."), 223 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 224 "scalar-epilogue", 225 "Don't tail-predicate loops, create scalar epilogue"), 226 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 227 "predicate-else-scalar-epilogue", 228 "prefer tail-folding, create scalar epilogue if tail " 229 "folding fails."), 230 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 231 "predicate-dont-vectorize", 232 "prefers tail-folding, don't attempt vectorization if " 233 "tail-folding fails."))); 234 235 static cl::opt<bool> MaximizeBandwidth( 236 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 237 cl::desc("Maximize bandwidth when selecting vectorization factor which " 238 "will be determined by the smallest type in loop.")); 239 240 static cl::opt<bool> EnableInterleavedMemAccesses( 241 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 242 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 243 244 /// An interleave-group may need masking if it resides in a block that needs 245 /// predication, or in order to mask away gaps. 246 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 247 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 248 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 249 250 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 251 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 252 cl::desc("We don't interleave loops with a estimated constant trip count " 253 "below this number")); 254 255 static cl::opt<unsigned> ForceTargetNumScalarRegs( 256 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 257 cl::desc("A flag that overrides the target's number of scalar registers.")); 258 259 static cl::opt<unsigned> ForceTargetNumVectorRegs( 260 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 261 cl::desc("A flag that overrides the target's number of vector registers.")); 262 263 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 264 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 265 cl::desc("A flag that overrides the target's max interleave factor for " 266 "scalar loops.")); 267 268 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 269 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 270 cl::desc("A flag that overrides the target's max interleave factor for " 271 "vectorized loops.")); 272 273 static cl::opt<unsigned> ForceTargetInstructionCost( 274 "force-target-instruction-cost", cl::init(0), cl::Hidden, 275 cl::desc("A flag that overrides the target's expected cost for " 276 "an instruction to a single constant value. Mostly " 277 "useful for getting consistent testing.")); 278 279 static cl::opt<bool> ForceTargetSupportsScalableVectors( 280 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 281 cl::desc( 282 "Pretend that scalable vectors are supported, even if the target does " 283 "not support them. This flag should only be used for testing.")); 284 285 static cl::opt<unsigned> SmallLoopCost( 286 "small-loop-cost", cl::init(20), cl::Hidden, 287 cl::desc( 288 "The cost of a loop that is considered 'small' by the interleaver.")); 289 290 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 291 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 292 cl::desc("Enable the use of the block frequency analysis to access PGO " 293 "heuristics minimizing code growth in cold regions and being more " 294 "aggressive in hot regions.")); 295 296 // Runtime interleave loops for load/store throughput. 297 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 298 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 299 cl::desc( 300 "Enable runtime interleaving until load/store ports are saturated")); 301 302 /// Interleave small loops with scalar reductions. 303 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 304 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 305 cl::desc("Enable interleaving for loops with small iteration counts that " 306 "contain scalar reductions to expose ILP.")); 307 308 /// The number of stores in a loop that are allowed to need predication. 309 static cl::opt<unsigned> NumberOfStoresToPredicate( 310 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 311 cl::desc("Max number of stores to be predicated behind an if.")); 312 313 static cl::opt<bool> EnableIndVarRegisterHeur( 314 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 315 cl::desc("Count the induction variable only once when interleaving")); 316 317 static cl::opt<bool> EnableCondStoresVectorization( 318 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 319 cl::desc("Enable if predication of stores during vectorization.")); 320 321 static cl::opt<unsigned> MaxNestedScalarReductionIC( 322 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 323 cl::desc("The maximum interleave count to use when interleaving a scalar " 324 "reduction in a nested loop.")); 325 326 static cl::opt<bool> 327 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 328 cl::Hidden, 329 cl::desc("Prefer in-loop vector reductions, " 330 "overriding the targets preference.")); 331 332 static cl::opt<bool> ForceOrderedReductions( 333 "force-ordered-reductions", cl::init(false), cl::Hidden, 334 cl::desc("Enable the vectorisation of loops with in-order (strict) " 335 "FP reductions")); 336 337 static cl::opt<bool> PreferPredicatedReductionSelect( 338 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 339 cl::desc( 340 "Prefer predicating a reduction operation over an after loop select.")); 341 342 cl::opt<bool> EnableVPlanNativePath( 343 "enable-vplan-native-path", cl::init(false), cl::Hidden, 344 cl::desc("Enable VPlan-native vectorization path with " 345 "support for outer loop vectorization.")); 346 347 // This flag enables the stress testing of the VPlan H-CFG construction in the 348 // VPlan-native vectorization path. It must be used in conjuction with 349 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 350 // verification of the H-CFGs built. 351 static cl::opt<bool> VPlanBuildStressTest( 352 "vplan-build-stress-test", cl::init(false), cl::Hidden, 353 cl::desc( 354 "Build VPlan for every supported loop nest in the function and bail " 355 "out right after the build (stress test the VPlan H-CFG construction " 356 "in the VPlan-native vectorization path).")); 357 358 cl::opt<bool> llvm::EnableLoopInterleaving( 359 "interleave-loops", cl::init(true), cl::Hidden, 360 cl::desc("Enable loop interleaving in Loop vectorization passes")); 361 cl::opt<bool> llvm::EnableLoopVectorization( 362 "vectorize-loops", cl::init(true), cl::Hidden, 363 cl::desc("Run the Loop vectorization passes")); 364 365 cl::opt<bool> PrintVPlansInDotFormat( 366 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 367 cl::desc("Use dot format instead of plain text when dumping VPlans")); 368 369 /// A helper function that returns true if the given type is irregular. The 370 /// type is irregular if its allocated size doesn't equal the store size of an 371 /// element of the corresponding vector type. 372 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 373 // Determine if an array of N elements of type Ty is "bitcast compatible" 374 // with a <N x Ty> vector. 375 // This is only true if there is no padding between the array elements. 376 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 377 } 378 379 /// A helper function that returns the reciprocal of the block probability of 380 /// predicated blocks. If we return X, we are assuming the predicated block 381 /// will execute once for every X iterations of the loop header. 382 /// 383 /// TODO: We should use actual block probability here, if available. Currently, 384 /// we always assume predicated blocks have a 50% chance of executing. 385 static unsigned getReciprocalPredBlockProb() { return 2; } 386 387 /// A helper function that returns an integer or floating-point constant with 388 /// value C. 389 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 390 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 391 : ConstantFP::get(Ty, C); 392 } 393 394 /// Returns "best known" trip count for the specified loop \p L as defined by 395 /// the following procedure: 396 /// 1) Returns exact trip count if it is known. 397 /// 2) Returns expected trip count according to profile data if any. 398 /// 3) Returns upper bound estimate if it is known. 399 /// 4) Returns None if all of the above failed. 400 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 401 // Check if exact trip count is known. 402 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 403 return ExpectedTC; 404 405 // Check if there is an expected trip count available from profile data. 406 if (LoopVectorizeWithBlockFrequency) 407 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 408 return EstimatedTC; 409 410 // Check if upper bound estimate is known. 411 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 412 return ExpectedTC; 413 414 return None; 415 } 416 417 // Forward declare GeneratedRTChecks. 418 class GeneratedRTChecks; 419 420 namespace llvm { 421 422 AnalysisKey ShouldRunExtraVectorPasses::Key; 423 424 /// InnerLoopVectorizer vectorizes loops which contain only one basic 425 /// block to a specified vectorization factor (VF). 426 /// This class performs the widening of scalars into vectors, or multiple 427 /// scalars. This class also implements the following features: 428 /// * It inserts an epilogue loop for handling loops that don't have iteration 429 /// counts that are known to be a multiple of the vectorization factor. 430 /// * It handles the code generation for reduction variables. 431 /// * Scalarization (implementation using scalars) of un-vectorizable 432 /// instructions. 433 /// InnerLoopVectorizer does not perform any vectorization-legality 434 /// checks, and relies on the caller to check for the different legality 435 /// aspects. The InnerLoopVectorizer relies on the 436 /// LoopVectorizationLegality class to provide information about the induction 437 /// and reduction variables that were found to a given vectorization factor. 438 class InnerLoopVectorizer { 439 public: 440 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 441 LoopInfo *LI, DominatorTree *DT, 442 const TargetLibraryInfo *TLI, 443 const TargetTransformInfo *TTI, AssumptionCache *AC, 444 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 445 ElementCount MinProfitableTripCount, 446 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 447 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 448 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 449 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 450 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 451 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 452 PSI(PSI), RTChecks(RTChecks) { 453 // Query this against the original loop and save it here because the profile 454 // of the original loop header may change as the transformation happens. 455 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 456 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 457 458 if (MinProfitableTripCount.isZero()) 459 this->MinProfitableTripCount = VecWidth; 460 else 461 this->MinProfitableTripCount = MinProfitableTripCount; 462 } 463 464 virtual ~InnerLoopVectorizer() = default; 465 466 /// Create a new empty loop that will contain vectorized instructions later 467 /// on, while the old loop will be used as the scalar remainder. Control flow 468 /// is generated around the vectorized (and scalar epilogue) loops consisting 469 /// of various checks and bypasses. Return the pre-header block of the new 470 /// loop and the start value for the canonical induction, if it is != 0. The 471 /// latter is the case when vectorizing the epilogue loop. In the case of 472 /// epilogue vectorization, this function is overriden to handle the more 473 /// complex control flow around the loops. 474 virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton(); 475 476 /// Widen a single call instruction within the innermost loop. 477 void widenCallInstruction(CallInst &CI, VPValue *Def, VPUser &ArgOperands, 478 VPTransformState &State); 479 480 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 481 void fixVectorizedLoop(VPTransformState &State, VPlan &Plan); 482 483 // Return true if any runtime check is added. 484 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 485 486 /// A type for vectorized values in the new loop. Each value from the 487 /// original loop, when vectorized, is represented by UF vector values in the 488 /// new unrolled loop, where UF is the unroll factor. 489 using VectorParts = SmallVector<Value *, 2>; 490 491 /// A helper function to scalarize a single Instruction in the innermost loop. 492 /// Generates a sequence of scalar instances for each lane between \p MinLane 493 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 494 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p 495 /// Instr's operands. 496 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe, 497 const VPIteration &Instance, bool IfPredicateInstr, 498 VPTransformState &State); 499 500 /// Construct the vector value of a scalarized value \p V one lane at a time. 501 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 502 VPTransformState &State); 503 504 /// Try to vectorize interleaved access group \p Group with the base address 505 /// given in \p Addr, optionally masking the vector operations if \p 506 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 507 /// values in the vectorized loop. 508 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 509 ArrayRef<VPValue *> VPDefs, 510 VPTransformState &State, VPValue *Addr, 511 ArrayRef<VPValue *> StoredValues, 512 VPValue *BlockInMask = nullptr); 513 514 /// Fix the non-induction PHIs in \p Plan. 515 void fixNonInductionPHIs(VPlan &Plan, VPTransformState &State); 516 517 /// Returns true if the reordering of FP operations is not allowed, but we are 518 /// able to vectorize with strict in-order reductions for the given RdxDesc. 519 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc); 520 521 /// Create a broadcast instruction. This method generates a broadcast 522 /// instruction (shuffle) for loop invariant values and for the induction 523 /// value. If this is the induction variable then we extend it to N, N+1, ... 524 /// this is needed because each iteration in the loop corresponds to a SIMD 525 /// element. 526 virtual Value *getBroadcastInstrs(Value *V); 527 528 // Returns the resume value (bc.merge.rdx) for a reduction as 529 // generated by fixReduction. 530 PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc); 531 532 protected: 533 friend class LoopVectorizationPlanner; 534 535 /// A small list of PHINodes. 536 using PhiVector = SmallVector<PHINode *, 4>; 537 538 /// A type for scalarized values in the new loop. Each value from the 539 /// original loop, when scalarized, is represented by UF x VF scalar values 540 /// in the new unrolled loop, where UF is the unroll factor and VF is the 541 /// vectorization factor. 542 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 543 544 /// Set up the values of the IVs correctly when exiting the vector loop. 545 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 546 Value *VectorTripCount, Value *EndValue, 547 BasicBlock *MiddleBlock, BasicBlock *VectorHeader, 548 VPlan &Plan); 549 550 /// Handle all cross-iteration phis in the header. 551 void fixCrossIterationPHIs(VPTransformState &State); 552 553 /// Create the exit value of first order recurrences in the middle block and 554 /// update their users. 555 void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR, 556 VPTransformState &State); 557 558 /// Create code for the loop exit value of the reduction. 559 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 560 561 /// Clear NSW/NUW flags from reduction instructions if necessary. 562 void clearReductionWrapFlags(VPReductionPHIRecipe *PhiR, 563 VPTransformState &State); 564 565 /// Iteratively sink the scalarized operands of a predicated instruction into 566 /// the block that was created for it. 567 void sinkScalarOperands(Instruction *PredInst); 568 569 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 570 /// represented as. 571 void truncateToMinimalBitwidths(VPTransformState &State); 572 573 /// Returns (and creates if needed) the original loop trip count. 574 Value *getOrCreateTripCount(BasicBlock *InsertBlock); 575 576 /// Returns (and creates if needed) the trip count of the widened loop. 577 Value *getOrCreateVectorTripCount(BasicBlock *InsertBlock); 578 579 /// Returns a bitcasted value to the requested vector type. 580 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 581 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 582 const DataLayout &DL); 583 584 /// Emit a bypass check to see if the vector trip count is zero, including if 585 /// it overflows. 586 void emitIterationCountCheck(BasicBlock *Bypass); 587 588 /// Emit a bypass check to see if all of the SCEV assumptions we've 589 /// had to make are correct. Returns the block containing the checks or 590 /// nullptr if no checks have been added. 591 BasicBlock *emitSCEVChecks(BasicBlock *Bypass); 592 593 /// Emit bypass checks to check any memory assumptions we may have made. 594 /// Returns the block containing the checks or nullptr if no checks have been 595 /// added. 596 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass); 597 598 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 599 /// vector loop preheader, middle block and scalar preheader. 600 void createVectorLoopSkeleton(StringRef Prefix); 601 602 /// Create new phi nodes for the induction variables to resume iteration count 603 /// in the scalar epilogue, from where the vectorized loop left off. 604 /// In cases where the loop skeleton is more complicated (eg. epilogue 605 /// vectorization) and the resume values can come from an additional bypass 606 /// block, the \p AdditionalBypass pair provides information about the bypass 607 /// block and the end value on the edge from bypass to this loop. 608 void createInductionResumeValues( 609 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 610 611 /// Complete the loop skeleton by adding debug MDs, creating appropriate 612 /// conditional branches in the middle block, preparing the builder and 613 /// running the verifier. Return the preheader of the completed vector loop. 614 BasicBlock *completeLoopSkeleton(MDNode *OrigLoopID); 615 616 /// Collect poison-generating recipes that may generate a poison value that is 617 /// used after vectorization, even when their operands are not poison. Those 618 /// recipes meet the following conditions: 619 /// * Contribute to the address computation of a recipe generating a widen 620 /// memory load/store (VPWidenMemoryInstructionRecipe or 621 /// VPInterleaveRecipe). 622 /// * Such a widen memory load/store has at least one underlying Instruction 623 /// that is in a basic block that needs predication and after vectorization 624 /// the generated instruction won't be predicated. 625 void collectPoisonGeneratingRecipes(VPTransformState &State); 626 627 /// Allow subclasses to override and print debug traces before/after vplan 628 /// execution, when trace information is requested. 629 virtual void printDebugTracesAtStart(){}; 630 virtual void printDebugTracesAtEnd(){}; 631 632 /// The original loop. 633 Loop *OrigLoop; 634 635 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 636 /// dynamic knowledge to simplify SCEV expressions and converts them to a 637 /// more usable form. 638 PredicatedScalarEvolution &PSE; 639 640 /// Loop Info. 641 LoopInfo *LI; 642 643 /// Dominator Tree. 644 DominatorTree *DT; 645 646 /// Alias Analysis. 647 AAResults *AA; 648 649 /// Target Library Info. 650 const TargetLibraryInfo *TLI; 651 652 /// Target Transform Info. 653 const TargetTransformInfo *TTI; 654 655 /// Assumption Cache. 656 AssumptionCache *AC; 657 658 /// Interface to emit optimization remarks. 659 OptimizationRemarkEmitter *ORE; 660 661 /// The vectorization SIMD factor to use. Each vector will have this many 662 /// vector elements. 663 ElementCount VF; 664 665 ElementCount MinProfitableTripCount; 666 667 /// The vectorization unroll factor to use. Each scalar is vectorized to this 668 /// many different vector instructions. 669 unsigned UF; 670 671 /// The builder that we use 672 IRBuilder<> Builder; 673 674 // --- Vectorization state --- 675 676 /// The vector-loop preheader. 677 BasicBlock *LoopVectorPreHeader; 678 679 /// The scalar-loop preheader. 680 BasicBlock *LoopScalarPreHeader; 681 682 /// Middle Block between the vector and the scalar. 683 BasicBlock *LoopMiddleBlock; 684 685 /// The unique ExitBlock of the scalar loop if one exists. Note that 686 /// there can be multiple exiting edges reaching this block. 687 BasicBlock *LoopExitBlock; 688 689 /// The scalar loop body. 690 BasicBlock *LoopScalarBody; 691 692 /// A list of all bypass blocks. The first block is the entry of the loop. 693 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 694 695 /// Store instructions that were predicated. 696 SmallVector<Instruction *, 4> PredicatedInstructions; 697 698 /// Trip count of the original loop. 699 Value *TripCount = nullptr; 700 701 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 702 Value *VectorTripCount = nullptr; 703 704 /// The legality analysis. 705 LoopVectorizationLegality *Legal; 706 707 /// The profitablity analysis. 708 LoopVectorizationCostModel *Cost; 709 710 // Record whether runtime checks are added. 711 bool AddedSafetyChecks = false; 712 713 // Holds the end values for each induction variable. We save the end values 714 // so we can later fix-up the external users of the induction variables. 715 DenseMap<PHINode *, Value *> IVEndValues; 716 717 /// BFI and PSI are used to check for profile guided size optimizations. 718 BlockFrequencyInfo *BFI; 719 ProfileSummaryInfo *PSI; 720 721 // Whether this loop should be optimized for size based on profile guided size 722 // optimizatios. 723 bool OptForSizeBasedOnProfile; 724 725 /// Structure to hold information about generated runtime checks, responsible 726 /// for cleaning the checks, if vectorization turns out unprofitable. 727 GeneratedRTChecks &RTChecks; 728 729 // Holds the resume values for reductions in the loops, used to set the 730 // correct start value of reduction PHIs when vectorizing the epilogue. 731 SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4> 732 ReductionResumeValues; 733 }; 734 735 class InnerLoopUnroller : public InnerLoopVectorizer { 736 public: 737 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 738 LoopInfo *LI, DominatorTree *DT, 739 const TargetLibraryInfo *TLI, 740 const TargetTransformInfo *TTI, AssumptionCache *AC, 741 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 742 LoopVectorizationLegality *LVL, 743 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 744 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 745 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 746 ElementCount::getFixed(1), 747 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 748 BFI, PSI, Check) {} 749 750 private: 751 Value *getBroadcastInstrs(Value *V) override; 752 }; 753 754 /// Encapsulate information regarding vectorization of a loop and its epilogue. 755 /// This information is meant to be updated and used across two stages of 756 /// epilogue vectorization. 757 struct EpilogueLoopVectorizationInfo { 758 ElementCount MainLoopVF = ElementCount::getFixed(0); 759 unsigned MainLoopUF = 0; 760 ElementCount EpilogueVF = ElementCount::getFixed(0); 761 unsigned EpilogueUF = 0; 762 BasicBlock *MainLoopIterationCountCheck = nullptr; 763 BasicBlock *EpilogueIterationCountCheck = nullptr; 764 BasicBlock *SCEVSafetyCheck = nullptr; 765 BasicBlock *MemSafetyCheck = nullptr; 766 Value *TripCount = nullptr; 767 Value *VectorTripCount = nullptr; 768 769 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 770 ElementCount EVF, unsigned EUF) 771 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 772 assert(EUF == 1 && 773 "A high UF for the epilogue loop is likely not beneficial."); 774 } 775 }; 776 777 /// An extension of the inner loop vectorizer that creates a skeleton for a 778 /// vectorized loop that has its epilogue (residual) also vectorized. 779 /// The idea is to run the vplan on a given loop twice, firstly to setup the 780 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 781 /// from the first step and vectorize the epilogue. This is achieved by 782 /// deriving two concrete strategy classes from this base class and invoking 783 /// them in succession from the loop vectorizer planner. 784 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 785 public: 786 InnerLoopAndEpilogueVectorizer( 787 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 788 DominatorTree *DT, const TargetLibraryInfo *TLI, 789 const TargetTransformInfo *TTI, AssumptionCache *AC, 790 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 791 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 792 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 793 GeneratedRTChecks &Checks) 794 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 795 EPI.MainLoopVF, EPI.MainLoopVF, EPI.MainLoopUF, LVL, 796 CM, BFI, PSI, Checks), 797 EPI(EPI) {} 798 799 // Override this function to handle the more complex control flow around the 800 // three loops. 801 std::pair<BasicBlock *, Value *> 802 createVectorizedLoopSkeleton() final override { 803 return createEpilogueVectorizedLoopSkeleton(); 804 } 805 806 /// The interface for creating a vectorized skeleton using one of two 807 /// different strategies, each corresponding to one execution of the vplan 808 /// as described above. 809 virtual std::pair<BasicBlock *, Value *> 810 createEpilogueVectorizedLoopSkeleton() = 0; 811 812 /// Holds and updates state information required to vectorize the main loop 813 /// and its epilogue in two separate passes. This setup helps us avoid 814 /// regenerating and recomputing runtime safety checks. It also helps us to 815 /// shorten the iteration-count-check path length for the cases where the 816 /// iteration count of the loop is so small that the main vector loop is 817 /// completely skipped. 818 EpilogueLoopVectorizationInfo &EPI; 819 }; 820 821 /// A specialized derived class of inner loop vectorizer that performs 822 /// vectorization of *main* loops in the process of vectorizing loops and their 823 /// epilogues. 824 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 825 public: 826 EpilogueVectorizerMainLoop( 827 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 828 DominatorTree *DT, const TargetLibraryInfo *TLI, 829 const TargetTransformInfo *TTI, AssumptionCache *AC, 830 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 831 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 832 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 833 GeneratedRTChecks &Check) 834 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 835 EPI, LVL, CM, BFI, PSI, Check) {} 836 /// Implements the interface for creating a vectorized skeleton using the 837 /// *main loop* strategy (ie the first pass of vplan execution). 838 std::pair<BasicBlock *, Value *> 839 createEpilogueVectorizedLoopSkeleton() final override; 840 841 protected: 842 /// Emits an iteration count bypass check once for the main loop (when \p 843 /// ForEpilogue is false) and once for the epilogue loop (when \p 844 /// ForEpilogue is true). 845 BasicBlock *emitIterationCountCheck(BasicBlock *Bypass, bool ForEpilogue); 846 void printDebugTracesAtStart() override; 847 void printDebugTracesAtEnd() override; 848 }; 849 850 // A specialized derived class of inner loop vectorizer that performs 851 // vectorization of *epilogue* loops in the process of vectorizing loops and 852 // their epilogues. 853 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 854 public: 855 EpilogueVectorizerEpilogueLoop( 856 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 857 DominatorTree *DT, const TargetLibraryInfo *TLI, 858 const TargetTransformInfo *TTI, AssumptionCache *AC, 859 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 860 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 861 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 862 GeneratedRTChecks &Checks) 863 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 864 EPI, LVL, CM, BFI, PSI, Checks) { 865 TripCount = EPI.TripCount; 866 } 867 /// Implements the interface for creating a vectorized skeleton using the 868 /// *epilogue loop* strategy (ie the second pass of vplan execution). 869 std::pair<BasicBlock *, Value *> 870 createEpilogueVectorizedLoopSkeleton() final override; 871 872 protected: 873 /// Emits an iteration count bypass check after the main vector loop has 874 /// finished to see if there are any iterations left to execute by either 875 /// the vector epilogue or the scalar epilogue. 876 BasicBlock *emitMinimumVectorEpilogueIterCountCheck( 877 BasicBlock *Bypass, 878 BasicBlock *Insert); 879 void printDebugTracesAtStart() override; 880 void printDebugTracesAtEnd() override; 881 }; 882 } // end namespace llvm 883 884 /// Look for a meaningful debug location on the instruction or it's 885 /// operands. 886 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 887 if (!I) 888 return I; 889 890 DebugLoc Empty; 891 if (I->getDebugLoc() != Empty) 892 return I; 893 894 for (Use &Op : I->operands()) { 895 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 896 if (OpInst->getDebugLoc() != Empty) 897 return OpInst; 898 } 899 900 return I; 901 } 902 903 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 904 /// is passed, the message relates to that particular instruction. 905 #ifndef NDEBUG 906 static void debugVectorizationMessage(const StringRef Prefix, 907 const StringRef DebugMsg, 908 Instruction *I) { 909 dbgs() << "LV: " << Prefix << DebugMsg; 910 if (I != nullptr) 911 dbgs() << " " << *I; 912 else 913 dbgs() << '.'; 914 dbgs() << '\n'; 915 } 916 #endif 917 918 /// Create an analysis remark that explains why vectorization failed 919 /// 920 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 921 /// RemarkName is the identifier for the remark. If \p I is passed it is an 922 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 923 /// the location of the remark. \return the remark object that can be 924 /// streamed to. 925 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 926 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 927 Value *CodeRegion = TheLoop->getHeader(); 928 DebugLoc DL = TheLoop->getStartLoc(); 929 930 if (I) { 931 CodeRegion = I->getParent(); 932 // If there is no debug location attached to the instruction, revert back to 933 // using the loop's. 934 if (I->getDebugLoc()) 935 DL = I->getDebugLoc(); 936 } 937 938 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 939 } 940 941 namespace llvm { 942 943 /// Return a value for Step multiplied by VF. 944 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, 945 int64_t Step) { 946 assert(Ty->isIntegerTy() && "Expected an integer step"); 947 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 948 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 949 } 950 951 /// Return the runtime value for VF. 952 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) { 953 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 954 return VF.isScalable() ? B.CreateVScale(EC) : EC; 955 } 956 957 static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy, 958 ElementCount VF) { 959 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 960 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 961 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 962 return B.CreateUIToFP(RuntimeVF, FTy); 963 } 964 965 void reportVectorizationFailure(const StringRef DebugMsg, 966 const StringRef OREMsg, const StringRef ORETag, 967 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 968 Instruction *I) { 969 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 970 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 971 ORE->emit( 972 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 973 << "loop not vectorized: " << OREMsg); 974 } 975 976 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 977 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 978 Instruction *I) { 979 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 980 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 981 ORE->emit( 982 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 983 << Msg); 984 } 985 986 } // end namespace llvm 987 988 #ifndef NDEBUG 989 /// \return string containing a file name and a line # for the given loop. 990 static std::string getDebugLocString(const Loop *L) { 991 std::string Result; 992 if (L) { 993 raw_string_ostream OS(Result); 994 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 995 LoopDbgLoc.print(OS); 996 else 997 // Just print the module name. 998 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 999 OS.flush(); 1000 } 1001 return Result; 1002 } 1003 #endif 1004 1005 void InnerLoopVectorizer::collectPoisonGeneratingRecipes( 1006 VPTransformState &State) { 1007 1008 // Collect recipes in the backward slice of `Root` that may generate a poison 1009 // value that is used after vectorization. 1010 SmallPtrSet<VPRecipeBase *, 16> Visited; 1011 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1012 SmallVector<VPRecipeBase *, 16> Worklist; 1013 Worklist.push_back(Root); 1014 1015 // Traverse the backward slice of Root through its use-def chain. 1016 while (!Worklist.empty()) { 1017 VPRecipeBase *CurRec = Worklist.back(); 1018 Worklist.pop_back(); 1019 1020 if (!Visited.insert(CurRec).second) 1021 continue; 1022 1023 // Prune search if we find another recipe generating a widen memory 1024 // instruction. Widen memory instructions involved in address computation 1025 // will lead to gather/scatter instructions, which don't need to be 1026 // handled. 1027 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) || 1028 isa<VPInterleaveRecipe>(CurRec) || 1029 isa<VPScalarIVStepsRecipe>(CurRec) || 1030 isa<VPCanonicalIVPHIRecipe>(CurRec) || 1031 isa<VPActiveLaneMaskPHIRecipe>(CurRec)) 1032 continue; 1033 1034 // This recipe contributes to the address computation of a widen 1035 // load/store. Collect recipe if its underlying instruction has 1036 // poison-generating flags. 1037 Instruction *Instr = CurRec->getUnderlyingInstr(); 1038 if (Instr && Instr->hasPoisonGeneratingFlags()) 1039 State.MayGeneratePoisonRecipes.insert(CurRec); 1040 1041 // Add new definitions to the worklist. 1042 for (VPValue *operand : CurRec->operands()) 1043 if (VPDef *OpDef = operand->getDef()) 1044 Worklist.push_back(cast<VPRecipeBase>(OpDef)); 1045 } 1046 }); 1047 1048 // Traverse all the recipes in the VPlan and collect the poison-generating 1049 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1050 // VPInterleaveRecipe. 1051 auto Iter = depth_first( 1052 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry())); 1053 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1054 for (VPRecipeBase &Recipe : *VPBB) { 1055 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) { 1056 Instruction &UnderlyingInstr = WidenRec->getIngredient(); 1057 VPDef *AddrDef = WidenRec->getAddr()->getDef(); 1058 if (AddrDef && WidenRec->isConsecutive() && 1059 Legal->blockNeedsPredication(UnderlyingInstr.getParent())) 1060 collectPoisonGeneratingInstrsInBackwardSlice( 1061 cast<VPRecipeBase>(AddrDef)); 1062 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1063 VPDef *AddrDef = InterleaveRec->getAddr()->getDef(); 1064 if (AddrDef) { 1065 // Check if any member of the interleave group needs predication. 1066 const InterleaveGroup<Instruction> *InterGroup = 1067 InterleaveRec->getInterleaveGroup(); 1068 bool NeedPredication = false; 1069 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1070 I < NumMembers; ++I) { 1071 Instruction *Member = InterGroup->getMember(I); 1072 if (Member) 1073 NeedPredication |= 1074 Legal->blockNeedsPredication(Member->getParent()); 1075 } 1076 1077 if (NeedPredication) 1078 collectPoisonGeneratingInstrsInBackwardSlice( 1079 cast<VPRecipeBase>(AddrDef)); 1080 } 1081 } 1082 } 1083 } 1084 } 1085 1086 PHINode *InnerLoopVectorizer::getReductionResumeValue( 1087 const RecurrenceDescriptor &RdxDesc) { 1088 auto It = ReductionResumeValues.find(&RdxDesc); 1089 assert(It != ReductionResumeValues.end() && 1090 "Expected to find a resume value for the reduction."); 1091 return It->second; 1092 } 1093 1094 namespace llvm { 1095 1096 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1097 // lowered. 1098 enum ScalarEpilogueLowering { 1099 1100 // The default: allowing scalar epilogues. 1101 CM_ScalarEpilogueAllowed, 1102 1103 // Vectorization with OptForSize: don't allow epilogues. 1104 CM_ScalarEpilogueNotAllowedOptSize, 1105 1106 // A special case of vectorisation with OptForSize: loops with a very small 1107 // trip count are considered for vectorization under OptForSize, thereby 1108 // making sure the cost of their loop body is dominant, free of runtime 1109 // guards and scalar iteration overheads. 1110 CM_ScalarEpilogueNotAllowedLowTripLoop, 1111 1112 // Loop hint predicate indicating an epilogue is undesired. 1113 CM_ScalarEpilogueNotNeededUsePredicate, 1114 1115 // Directive indicating we must either tail fold or not vectorize 1116 CM_ScalarEpilogueNotAllowedUsePredicate 1117 }; 1118 1119 /// ElementCountComparator creates a total ordering for ElementCount 1120 /// for the purposes of using it in a set structure. 1121 struct ElementCountComparator { 1122 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1123 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1124 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1125 } 1126 }; 1127 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1128 1129 /// LoopVectorizationCostModel - estimates the expected speedups due to 1130 /// vectorization. 1131 /// In many cases vectorization is not profitable. This can happen because of 1132 /// a number of reasons. In this class we mainly attempt to predict the 1133 /// expected speedup/slowdowns due to the supported instruction set. We use the 1134 /// TargetTransformInfo to query the different backends for the cost of 1135 /// different operations. 1136 class LoopVectorizationCostModel { 1137 public: 1138 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1139 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1140 LoopVectorizationLegality *Legal, 1141 const TargetTransformInfo &TTI, 1142 const TargetLibraryInfo *TLI, DemandedBits *DB, 1143 AssumptionCache *AC, 1144 OptimizationRemarkEmitter *ORE, const Function *F, 1145 const LoopVectorizeHints *Hints, 1146 InterleavedAccessInfo &IAI) 1147 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1148 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1149 Hints(Hints), InterleaveInfo(IAI) {} 1150 1151 /// \return An upper bound for the vectorization factors (both fixed and 1152 /// scalable). If the factors are 0, vectorization and interleaving should be 1153 /// avoided up front. 1154 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1155 1156 /// \return True if runtime checks are required for vectorization, and false 1157 /// otherwise. 1158 bool runtimeChecksRequired(); 1159 1160 /// \return The most profitable vectorization factor and the cost of that VF. 1161 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1162 /// then this vectorization factor will be selected if vectorization is 1163 /// possible. 1164 VectorizationFactor 1165 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1166 1167 VectorizationFactor 1168 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1169 const LoopVectorizationPlanner &LVP); 1170 1171 /// Setup cost-based decisions for user vectorization factor. 1172 /// \return true if the UserVF is a feasible VF to be chosen. 1173 bool selectUserVectorizationFactor(ElementCount UserVF) { 1174 collectUniformsAndScalars(UserVF); 1175 collectInstsToScalarize(UserVF); 1176 return expectedCost(UserVF).first.isValid(); 1177 } 1178 1179 /// \return The size (in bits) of the smallest and widest types in the code 1180 /// that needs to be vectorized. We ignore values that remain scalar such as 1181 /// 64 bit loop indices. 1182 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1183 1184 /// \return The desired interleave count. 1185 /// If interleave count has been specified by metadata it will be returned. 1186 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1187 /// are the selected vectorization factor and the cost of the selected VF. 1188 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1189 1190 /// Memory access instruction may be vectorized in more than one way. 1191 /// Form of instruction after vectorization depends on cost. 1192 /// This function takes cost-based decisions for Load/Store instructions 1193 /// and collects them in a map. This decisions map is used for building 1194 /// the lists of loop-uniform and loop-scalar instructions. 1195 /// The calculated cost is saved with widening decision in order to 1196 /// avoid redundant calculations. 1197 void setCostBasedWideningDecision(ElementCount VF); 1198 1199 /// A struct that represents some properties of the register usage 1200 /// of a loop. 1201 struct RegisterUsage { 1202 /// Holds the number of loop invariant values that are used in the loop. 1203 /// The key is ClassID of target-provided register class. 1204 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1205 /// Holds the maximum number of concurrent live intervals in the loop. 1206 /// The key is ClassID of target-provided register class. 1207 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1208 }; 1209 1210 /// \return Returns information about the register usages of the loop for the 1211 /// given vectorization factors. 1212 SmallVector<RegisterUsage, 8> 1213 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1214 1215 /// Collect values we want to ignore in the cost model. 1216 void collectValuesToIgnore(); 1217 1218 /// Collect all element types in the loop for which widening is needed. 1219 void collectElementTypesForWidening(); 1220 1221 /// Split reductions into those that happen in the loop, and those that happen 1222 /// outside. In loop reductions are collected into InLoopReductionChains. 1223 void collectInLoopReductions(); 1224 1225 /// Returns true if we should use strict in-order reductions for the given 1226 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1227 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1228 /// of FP operations. 1229 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const { 1230 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1231 } 1232 1233 /// \returns The smallest bitwidth each instruction can be represented with. 1234 /// The vector equivalents of these instructions should be truncated to this 1235 /// type. 1236 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1237 return MinBWs; 1238 } 1239 1240 /// \returns True if it is more profitable to scalarize instruction \p I for 1241 /// vectorization factor \p VF. 1242 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1243 assert(VF.isVector() && 1244 "Profitable to scalarize relevant only for VF > 1."); 1245 1246 // Cost model is not run in the VPlan-native path - return conservative 1247 // result until this changes. 1248 if (EnableVPlanNativePath) 1249 return false; 1250 1251 auto Scalars = InstsToScalarize.find(VF); 1252 assert(Scalars != InstsToScalarize.end() && 1253 "VF not yet analyzed for scalarization profitability"); 1254 return Scalars->second.find(I) != Scalars->second.end(); 1255 } 1256 1257 /// Returns true if \p I is known to be uniform after vectorization. 1258 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1259 if (VF.isScalar()) 1260 return true; 1261 1262 // Cost model is not run in the VPlan-native path - return conservative 1263 // result until this changes. 1264 if (EnableVPlanNativePath) 1265 return false; 1266 1267 auto UniformsPerVF = Uniforms.find(VF); 1268 assert(UniformsPerVF != Uniforms.end() && 1269 "VF not yet analyzed for uniformity"); 1270 return UniformsPerVF->second.count(I); 1271 } 1272 1273 /// Returns true if \p I is known to be scalar after vectorization. 1274 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1275 if (VF.isScalar()) 1276 return true; 1277 1278 // Cost model is not run in the VPlan-native path - return conservative 1279 // result until this changes. 1280 if (EnableVPlanNativePath) 1281 return false; 1282 1283 auto ScalarsPerVF = Scalars.find(VF); 1284 assert(ScalarsPerVF != Scalars.end() && 1285 "Scalar values are not calculated for VF"); 1286 return ScalarsPerVF->second.count(I); 1287 } 1288 1289 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1290 /// for vectorization factor \p VF. 1291 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1292 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1293 !isProfitableToScalarize(I, VF) && 1294 !isScalarAfterVectorization(I, VF); 1295 } 1296 1297 /// Decision that was taken during cost calculation for memory instruction. 1298 enum InstWidening { 1299 CM_Unknown, 1300 CM_Widen, // For consecutive accesses with stride +1. 1301 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1302 CM_Interleave, 1303 CM_GatherScatter, 1304 CM_Scalarize 1305 }; 1306 1307 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1308 /// instruction \p I and vector width \p VF. 1309 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1310 InstructionCost Cost) { 1311 assert(VF.isVector() && "Expected VF >=2"); 1312 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1313 } 1314 1315 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1316 /// interleaving group \p Grp and vector width \p VF. 1317 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1318 ElementCount VF, InstWidening W, 1319 InstructionCost Cost) { 1320 assert(VF.isVector() && "Expected VF >=2"); 1321 /// Broadcast this decicion to all instructions inside the group. 1322 /// But the cost will be assigned to one instruction only. 1323 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1324 if (auto *I = Grp->getMember(i)) { 1325 if (Grp->getInsertPos() == I) 1326 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1327 else 1328 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1329 } 1330 } 1331 } 1332 1333 /// Return the cost model decision for the given instruction \p I and vector 1334 /// width \p VF. Return CM_Unknown if this instruction did not pass 1335 /// through the cost modeling. 1336 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1337 assert(VF.isVector() && "Expected VF to be a vector VF"); 1338 // Cost model is not run in the VPlan-native path - return conservative 1339 // result until this changes. 1340 if (EnableVPlanNativePath) 1341 return CM_GatherScatter; 1342 1343 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1344 auto Itr = WideningDecisions.find(InstOnVF); 1345 if (Itr == WideningDecisions.end()) 1346 return CM_Unknown; 1347 return Itr->second.first; 1348 } 1349 1350 /// Return the vectorization cost for the given instruction \p I and vector 1351 /// width \p VF. 1352 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1353 assert(VF.isVector() && "Expected VF >=2"); 1354 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1355 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1356 "The cost is not calculated"); 1357 return WideningDecisions[InstOnVF].second; 1358 } 1359 1360 /// Return True if instruction \p I is an optimizable truncate whose operand 1361 /// is an induction variable. Such a truncate will be removed by adding a new 1362 /// induction variable with the destination type. 1363 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1364 // If the instruction is not a truncate, return false. 1365 auto *Trunc = dyn_cast<TruncInst>(I); 1366 if (!Trunc) 1367 return false; 1368 1369 // Get the source and destination types of the truncate. 1370 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1371 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1372 1373 // If the truncate is free for the given types, return false. Replacing a 1374 // free truncate with an induction variable would add an induction variable 1375 // update instruction to each iteration of the loop. We exclude from this 1376 // check the primary induction variable since it will need an update 1377 // instruction regardless. 1378 Value *Op = Trunc->getOperand(0); 1379 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1380 return false; 1381 1382 // If the truncated value is not an induction variable, return false. 1383 return Legal->isInductionPhi(Op); 1384 } 1385 1386 /// Collects the instructions to scalarize for each predicated instruction in 1387 /// the loop. 1388 void collectInstsToScalarize(ElementCount VF); 1389 1390 /// Collect Uniform and Scalar values for the given \p VF. 1391 /// The sets depend on CM decision for Load/Store instructions 1392 /// that may be vectorized as interleave, gather-scatter or scalarized. 1393 void collectUniformsAndScalars(ElementCount VF) { 1394 // Do the analysis once. 1395 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1396 return; 1397 setCostBasedWideningDecision(VF); 1398 collectLoopUniforms(VF); 1399 collectLoopScalars(VF); 1400 } 1401 1402 /// Returns true if the target machine supports masked store operation 1403 /// for the given \p DataType and kind of access to \p Ptr. 1404 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1405 return Legal->isConsecutivePtr(DataType, Ptr) && 1406 TTI.isLegalMaskedStore(DataType, Alignment); 1407 } 1408 1409 /// Returns true if the target machine supports masked load operation 1410 /// for the given \p DataType and kind of access to \p Ptr. 1411 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1412 return Legal->isConsecutivePtr(DataType, Ptr) && 1413 TTI.isLegalMaskedLoad(DataType, Alignment); 1414 } 1415 1416 /// Returns true if the target machine can represent \p V as a masked gather 1417 /// or scatter operation. 1418 bool isLegalGatherOrScatter(Value *V, 1419 ElementCount VF = ElementCount::getFixed(1)) { 1420 bool LI = isa<LoadInst>(V); 1421 bool SI = isa<StoreInst>(V); 1422 if (!LI && !SI) 1423 return false; 1424 auto *Ty = getLoadStoreType(V); 1425 Align Align = getLoadStoreAlignment(V); 1426 if (VF.isVector()) 1427 Ty = VectorType::get(Ty, VF); 1428 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1429 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1430 } 1431 1432 /// Returns true if the target machine supports all of the reduction 1433 /// variables found for the given VF. 1434 bool canVectorizeReductions(ElementCount VF) const { 1435 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1436 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1437 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1438 })); 1439 } 1440 1441 /// Returns true if \p I is an instruction that will be scalarized with 1442 /// predication when vectorizing \p I with vectorization factor \p VF. Such 1443 /// instructions include conditional stores and instructions that may divide 1444 /// by zero. 1445 bool isScalarWithPredication(Instruction *I, ElementCount VF) const; 1446 1447 // Returns true if \p I is an instruction that will be predicated either 1448 // through scalar predication or masked load/store or masked gather/scatter. 1449 // \p VF is the vectorization factor that will be used to vectorize \p I. 1450 // Superset of instructions that return true for isScalarWithPredication. 1451 bool isPredicatedInst(Instruction *I, ElementCount VF, 1452 bool IsKnownUniform = false) { 1453 // When we know the load is uniform and the original scalar loop was not 1454 // predicated we don't need to mark it as a predicated instruction. Any 1455 // vectorised blocks created when tail-folding are something artificial we 1456 // have introduced and we know there is always at least one active lane. 1457 // That's why we call Legal->blockNeedsPredication here because it doesn't 1458 // query tail-folding. 1459 if (IsKnownUniform && isa<LoadInst>(I) && 1460 !Legal->blockNeedsPredication(I->getParent())) 1461 return false; 1462 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1463 return false; 1464 // Loads and stores that need some form of masked operation are predicated 1465 // instructions. 1466 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1467 return Legal->isMaskRequired(I); 1468 return isScalarWithPredication(I, VF); 1469 } 1470 1471 /// Returns true if \p I is a memory instruction with consecutive memory 1472 /// access that can be widened. 1473 bool 1474 memoryInstructionCanBeWidened(Instruction *I, 1475 ElementCount VF = ElementCount::getFixed(1)); 1476 1477 /// Returns true if \p I is a memory instruction in an interleaved-group 1478 /// of memory accesses that can be vectorized with wide vector loads/stores 1479 /// and shuffles. 1480 bool 1481 interleavedAccessCanBeWidened(Instruction *I, 1482 ElementCount VF = ElementCount::getFixed(1)); 1483 1484 /// Check if \p Instr belongs to any interleaved access group. 1485 bool isAccessInterleaved(Instruction *Instr) { 1486 return InterleaveInfo.isInterleaved(Instr); 1487 } 1488 1489 /// Get the interleaved access group that \p Instr belongs to. 1490 const InterleaveGroup<Instruction> * 1491 getInterleavedAccessGroup(Instruction *Instr) { 1492 return InterleaveInfo.getInterleaveGroup(Instr); 1493 } 1494 1495 /// Returns true if we're required to use a scalar epilogue for at least 1496 /// the final iteration of the original loop. 1497 bool requiresScalarEpilogue(ElementCount VF) const { 1498 if (!isScalarEpilogueAllowed()) 1499 return false; 1500 // If we might exit from anywhere but the latch, must run the exiting 1501 // iteration in scalar form. 1502 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1503 return true; 1504 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1505 } 1506 1507 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1508 /// loop hint annotation. 1509 bool isScalarEpilogueAllowed() const { 1510 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1511 } 1512 1513 /// Returns true if all loop blocks should be masked to fold tail loop. 1514 bool foldTailByMasking() const { return FoldTailByMasking; } 1515 1516 /// Returns true if were tail-folding and want to use the active lane mask 1517 /// for vector loop control flow. 1518 bool useActiveLaneMaskForControlFlow() const { 1519 return FoldTailByMasking && 1520 TTI.emitGetActiveLaneMask() == PredicationStyle::DataAndControlFlow; 1521 } 1522 1523 /// Returns true if the instructions in this block requires predication 1524 /// for any reason, e.g. because tail folding now requires a predicate 1525 /// or because the block in the original loop was predicated. 1526 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1527 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1528 } 1529 1530 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1531 /// nodes to the chain of instructions representing the reductions. Uses a 1532 /// MapVector to ensure deterministic iteration order. 1533 using ReductionChainMap = 1534 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1535 1536 /// Return the chain of instructions representing an inloop reduction. 1537 const ReductionChainMap &getInLoopReductionChains() const { 1538 return InLoopReductionChains; 1539 } 1540 1541 /// Returns true if the Phi is part of an inloop reduction. 1542 bool isInLoopReduction(PHINode *Phi) const { 1543 return InLoopReductionChains.count(Phi); 1544 } 1545 1546 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1547 /// with factor VF. Return the cost of the instruction, including 1548 /// scalarization overhead if it's needed. 1549 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1550 1551 /// Estimate cost of a call instruction CI if it were vectorized with factor 1552 /// VF. Return the cost of the instruction, including scalarization overhead 1553 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1554 /// scalarized - 1555 /// i.e. either vector version isn't available, or is too expensive. 1556 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1557 bool &NeedToScalarize) const; 1558 1559 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1560 /// that of B. 1561 bool isMoreProfitable(const VectorizationFactor &A, 1562 const VectorizationFactor &B) const; 1563 1564 /// Invalidates decisions already taken by the cost model. 1565 void invalidateCostModelingDecisions() { 1566 WideningDecisions.clear(); 1567 Uniforms.clear(); 1568 Scalars.clear(); 1569 } 1570 1571 /// Convenience function that returns the value of vscale_range iff 1572 /// vscale_range.min == vscale_range.max or otherwise returns the value 1573 /// returned by the corresponding TLI method. 1574 Optional<unsigned> getVScaleForTuning() const; 1575 1576 private: 1577 unsigned NumPredStores = 0; 1578 1579 /// \return An upper bound for the vectorization factors for both 1580 /// fixed and scalable vectorization, where the minimum-known number of 1581 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1582 /// disabled or unsupported, then the scalable part will be equal to 1583 /// ElementCount::getScalable(0). 1584 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1585 ElementCount UserVF, 1586 bool FoldTailByMasking); 1587 1588 /// \return the maximized element count based on the targets vector 1589 /// registers and the loop trip-count, but limited to a maximum safe VF. 1590 /// This is a helper function of computeFeasibleMaxVF. 1591 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1592 unsigned SmallestType, 1593 unsigned WidestType, 1594 ElementCount MaxSafeVF, 1595 bool FoldTailByMasking); 1596 1597 /// \return the maximum legal scalable VF, based on the safe max number 1598 /// of elements. 1599 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1600 1601 /// The vectorization cost is a combination of the cost itself and a boolean 1602 /// indicating whether any of the contributing operations will actually 1603 /// operate on vector values after type legalization in the backend. If this 1604 /// latter value is false, then all operations will be scalarized (i.e. no 1605 /// vectorization has actually taken place). 1606 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1607 1608 /// Returns the expected execution cost. The unit of the cost does 1609 /// not matter because we use the 'cost' units to compare different 1610 /// vector widths. The cost that is returned is *not* normalized by 1611 /// the factor width. If \p Invalid is not nullptr, this function 1612 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1613 /// each instruction that has an Invalid cost for the given VF. 1614 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1615 VectorizationCostTy 1616 expectedCost(ElementCount VF, 1617 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1618 1619 /// Returns the execution time cost of an instruction for a given vector 1620 /// width. Vector width of one means scalar. 1621 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1622 1623 /// The cost-computation logic from getInstructionCost which provides 1624 /// the vector type as an output parameter. 1625 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1626 Type *&VectorTy); 1627 1628 /// Return the cost of instructions in an inloop reduction pattern, if I is 1629 /// part of that pattern. 1630 Optional<InstructionCost> 1631 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1632 TTI::TargetCostKind CostKind); 1633 1634 /// Calculate vectorization cost of memory instruction \p I. 1635 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1636 1637 /// The cost computation for scalarized memory instruction. 1638 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1639 1640 /// The cost computation for interleaving group of memory instructions. 1641 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1642 1643 /// The cost computation for Gather/Scatter instruction. 1644 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1645 1646 /// The cost computation for widening instruction \p I with consecutive 1647 /// memory access. 1648 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1649 1650 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1651 /// Load: scalar load + broadcast. 1652 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1653 /// element) 1654 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1655 1656 /// Estimate the overhead of scalarizing an instruction. This is a 1657 /// convenience wrapper for the type-based getScalarizationOverhead API. 1658 InstructionCost getScalarizationOverhead(Instruction *I, 1659 ElementCount VF) const; 1660 1661 /// Returns whether the instruction is a load or store and will be a emitted 1662 /// as a vector operation. 1663 bool isConsecutiveLoadOrStore(Instruction *I); 1664 1665 /// Returns true if an artificially high cost for emulated masked memrefs 1666 /// should be used. 1667 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF); 1668 1669 /// Map of scalar integer values to the smallest bitwidth they can be legally 1670 /// represented as. The vector equivalents of these values should be truncated 1671 /// to this type. 1672 MapVector<Instruction *, uint64_t> MinBWs; 1673 1674 /// A type representing the costs for instructions if they were to be 1675 /// scalarized rather than vectorized. The entries are Instruction-Cost 1676 /// pairs. 1677 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1678 1679 /// A set containing all BasicBlocks that are known to present after 1680 /// vectorization as a predicated block. 1681 DenseMap<ElementCount, SmallPtrSet<BasicBlock *, 4>> 1682 PredicatedBBsAfterVectorization; 1683 1684 /// Records whether it is allowed to have the original scalar loop execute at 1685 /// least once. This may be needed as a fallback loop in case runtime 1686 /// aliasing/dependence checks fail, or to handle the tail/remainder 1687 /// iterations when the trip count is unknown or doesn't divide by the VF, 1688 /// or as a peel-loop to handle gaps in interleave-groups. 1689 /// Under optsize and when the trip count is very small we don't allow any 1690 /// iterations to execute in the scalar loop. 1691 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1692 1693 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1694 bool FoldTailByMasking = false; 1695 1696 /// A map holding scalar costs for different vectorization factors. The 1697 /// presence of a cost for an instruction in the mapping indicates that the 1698 /// instruction will be scalarized when vectorizing with the associated 1699 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1700 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1701 1702 /// Holds the instructions known to be uniform after vectorization. 1703 /// The data is collected per VF. 1704 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1705 1706 /// Holds the instructions known to be scalar after vectorization. 1707 /// The data is collected per VF. 1708 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1709 1710 /// Holds the instructions (address computations) that are forced to be 1711 /// scalarized. 1712 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1713 1714 /// PHINodes of the reductions that should be expanded in-loop along with 1715 /// their associated chains of reduction operations, in program order from top 1716 /// (PHI) to bottom 1717 ReductionChainMap InLoopReductionChains; 1718 1719 /// A Map of inloop reduction operations and their immediate chain operand. 1720 /// FIXME: This can be removed once reductions can be costed correctly in 1721 /// vplan. This was added to allow quick lookup to the inloop operations, 1722 /// without having to loop through InLoopReductionChains. 1723 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1724 1725 /// Returns the expected difference in cost from scalarizing the expression 1726 /// feeding a predicated instruction \p PredInst. The instructions to 1727 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1728 /// non-negative return value implies the expression will be scalarized. 1729 /// Currently, only single-use chains are considered for scalarization. 1730 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1731 ElementCount VF); 1732 1733 /// Collect the instructions that are uniform after vectorization. An 1734 /// instruction is uniform if we represent it with a single scalar value in 1735 /// the vectorized loop corresponding to each vector iteration. Examples of 1736 /// uniform instructions include pointer operands of consecutive or 1737 /// interleaved memory accesses. Note that although uniformity implies an 1738 /// instruction will be scalar, the reverse is not true. In general, a 1739 /// scalarized instruction will be represented by VF scalar values in the 1740 /// vectorized loop, each corresponding to an iteration of the original 1741 /// scalar loop. 1742 void collectLoopUniforms(ElementCount VF); 1743 1744 /// Collect the instructions that are scalar after vectorization. An 1745 /// instruction is scalar if it is known to be uniform or will be scalarized 1746 /// during vectorization. collectLoopScalars should only add non-uniform nodes 1747 /// to the list if they are used by a load/store instruction that is marked as 1748 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by 1749 /// VF values in the vectorized loop, each corresponding to an iteration of 1750 /// the original scalar loop. 1751 void collectLoopScalars(ElementCount VF); 1752 1753 /// Keeps cost model vectorization decision and cost for instructions. 1754 /// Right now it is used for memory instructions only. 1755 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1756 std::pair<InstWidening, InstructionCost>>; 1757 1758 DecisionList WideningDecisions; 1759 1760 /// Returns true if \p V is expected to be vectorized and it needs to be 1761 /// extracted. 1762 bool needsExtract(Value *V, ElementCount VF) const { 1763 Instruction *I = dyn_cast<Instruction>(V); 1764 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1765 TheLoop->isLoopInvariant(I)) 1766 return false; 1767 1768 // Assume we can vectorize V (and hence we need extraction) if the 1769 // scalars are not computed yet. This can happen, because it is called 1770 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1771 // the scalars are collected. That should be a safe assumption in most 1772 // cases, because we check if the operands have vectorizable types 1773 // beforehand in LoopVectorizationLegality. 1774 return Scalars.find(VF) == Scalars.end() || 1775 !isScalarAfterVectorization(I, VF); 1776 }; 1777 1778 /// Returns a range containing only operands needing to be extracted. 1779 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1780 ElementCount VF) const { 1781 return SmallVector<Value *, 4>(make_filter_range( 1782 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1783 } 1784 1785 /// Determines if we have the infrastructure to vectorize loop \p L and its 1786 /// epilogue, assuming the main loop is vectorized by \p VF. 1787 bool isCandidateForEpilogueVectorization(const Loop &L, 1788 const ElementCount VF) const; 1789 1790 /// Returns true if epilogue vectorization is considered profitable, and 1791 /// false otherwise. 1792 /// \p VF is the vectorization factor chosen for the original loop. 1793 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1794 1795 public: 1796 /// The loop that we evaluate. 1797 Loop *TheLoop; 1798 1799 /// Predicated scalar evolution analysis. 1800 PredicatedScalarEvolution &PSE; 1801 1802 /// Loop Info analysis. 1803 LoopInfo *LI; 1804 1805 /// Vectorization legality. 1806 LoopVectorizationLegality *Legal; 1807 1808 /// Vector target information. 1809 const TargetTransformInfo &TTI; 1810 1811 /// Target Library Info. 1812 const TargetLibraryInfo *TLI; 1813 1814 /// Demanded bits analysis. 1815 DemandedBits *DB; 1816 1817 /// Assumption cache. 1818 AssumptionCache *AC; 1819 1820 /// Interface to emit optimization remarks. 1821 OptimizationRemarkEmitter *ORE; 1822 1823 const Function *TheFunction; 1824 1825 /// Loop Vectorize Hint. 1826 const LoopVectorizeHints *Hints; 1827 1828 /// The interleave access information contains groups of interleaved accesses 1829 /// with the same stride and close to each other. 1830 InterleavedAccessInfo &InterleaveInfo; 1831 1832 /// Values to ignore in the cost model. 1833 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1834 1835 /// Values to ignore in the cost model when VF > 1. 1836 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1837 1838 /// All element types found in the loop. 1839 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1840 1841 /// Profitable vector factors. 1842 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1843 }; 1844 } // end namespace llvm 1845 1846 /// Helper struct to manage generating runtime checks for vectorization. 1847 /// 1848 /// The runtime checks are created up-front in temporary blocks to allow better 1849 /// estimating the cost and un-linked from the existing IR. After deciding to 1850 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1851 /// temporary blocks are completely removed. 1852 class GeneratedRTChecks { 1853 /// Basic block which contains the generated SCEV checks, if any. 1854 BasicBlock *SCEVCheckBlock = nullptr; 1855 1856 /// The value representing the result of the generated SCEV checks. If it is 1857 /// nullptr, either no SCEV checks have been generated or they have been used. 1858 Value *SCEVCheckCond = nullptr; 1859 1860 /// Basic block which contains the generated memory runtime checks, if any. 1861 BasicBlock *MemCheckBlock = nullptr; 1862 1863 /// The value representing the result of the generated memory runtime checks. 1864 /// If it is nullptr, either no memory runtime checks have been generated or 1865 /// they have been used. 1866 Value *MemRuntimeCheckCond = nullptr; 1867 1868 DominatorTree *DT; 1869 LoopInfo *LI; 1870 TargetTransformInfo *TTI; 1871 1872 SCEVExpander SCEVExp; 1873 SCEVExpander MemCheckExp; 1874 1875 bool CostTooHigh = false; 1876 1877 public: 1878 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1879 TargetTransformInfo *TTI, const DataLayout &DL) 1880 : DT(DT), LI(LI), TTI(TTI), SCEVExp(SE, DL, "scev.check"), 1881 MemCheckExp(SE, DL, "scev.check") {} 1882 1883 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1884 /// accurately estimate the cost of the runtime checks. The blocks are 1885 /// un-linked from the IR and is added back during vector code generation. If 1886 /// there is no vector code generation, the check blocks are removed 1887 /// completely. 1888 void Create(Loop *L, const LoopAccessInfo &LAI, 1889 const SCEVPredicate &UnionPred, ElementCount VF, unsigned IC) { 1890 1891 // Hard cutoff to limit compile-time increase in case a very large number of 1892 // runtime checks needs to be generated. 1893 // TODO: Skip cutoff if the loop is guaranteed to execute, e.g. due to 1894 // profile info. 1895 CostTooHigh = 1896 LAI.getNumRuntimePointerChecks() > VectorizeMemoryCheckThreshold; 1897 if (CostTooHigh) 1898 return; 1899 1900 BasicBlock *LoopHeader = L->getHeader(); 1901 BasicBlock *Preheader = L->getLoopPreheader(); 1902 1903 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1904 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1905 // may be used by SCEVExpander. The blocks will be un-linked from their 1906 // predecessors and removed from LI & DT at the end of the function. 1907 if (!UnionPred.isAlwaysTrue()) { 1908 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1909 nullptr, "vector.scevcheck"); 1910 1911 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1912 &UnionPred, SCEVCheckBlock->getTerminator()); 1913 } 1914 1915 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1916 if (RtPtrChecking.Need) { 1917 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1918 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1919 "vector.memcheck"); 1920 1921 auto DiffChecks = RtPtrChecking.getDiffChecks(); 1922 if (DiffChecks) { 1923 Value *RuntimeVF = nullptr; 1924 MemRuntimeCheckCond = addDiffRuntimeChecks( 1925 MemCheckBlock->getTerminator(), L, *DiffChecks, MemCheckExp, 1926 [VF, &RuntimeVF](IRBuilderBase &B, unsigned Bits) { 1927 if (!RuntimeVF) 1928 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF); 1929 return RuntimeVF; 1930 }, 1931 IC); 1932 } else { 1933 MemRuntimeCheckCond = 1934 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 1935 RtPtrChecking.getChecks(), MemCheckExp); 1936 } 1937 assert(MemRuntimeCheckCond && 1938 "no RT checks generated although RtPtrChecking " 1939 "claimed checks are required"); 1940 } 1941 1942 if (!MemCheckBlock && !SCEVCheckBlock) 1943 return; 1944 1945 // Unhook the temporary block with the checks, update various places 1946 // accordingly. 1947 if (SCEVCheckBlock) 1948 SCEVCheckBlock->replaceAllUsesWith(Preheader); 1949 if (MemCheckBlock) 1950 MemCheckBlock->replaceAllUsesWith(Preheader); 1951 1952 if (SCEVCheckBlock) { 1953 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1954 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 1955 Preheader->getTerminator()->eraseFromParent(); 1956 } 1957 if (MemCheckBlock) { 1958 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1959 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 1960 Preheader->getTerminator()->eraseFromParent(); 1961 } 1962 1963 DT->changeImmediateDominator(LoopHeader, Preheader); 1964 if (MemCheckBlock) { 1965 DT->eraseNode(MemCheckBlock); 1966 LI->removeBlock(MemCheckBlock); 1967 } 1968 if (SCEVCheckBlock) { 1969 DT->eraseNode(SCEVCheckBlock); 1970 LI->removeBlock(SCEVCheckBlock); 1971 } 1972 } 1973 1974 InstructionCost getCost() { 1975 if (SCEVCheckBlock || MemCheckBlock) 1976 LLVM_DEBUG(dbgs() << "Calculating cost of runtime checks:\n"); 1977 1978 if (CostTooHigh) { 1979 InstructionCost Cost; 1980 Cost.setInvalid(); 1981 LLVM_DEBUG(dbgs() << " number of checks exceeded threshold\n"); 1982 return Cost; 1983 } 1984 1985 InstructionCost RTCheckCost = 0; 1986 if (SCEVCheckBlock) 1987 for (Instruction &I : *SCEVCheckBlock) { 1988 if (SCEVCheckBlock->getTerminator() == &I) 1989 continue; 1990 InstructionCost C = 1991 TTI->getInstructionCost(&I, TTI::TCK_RecipThroughput); 1992 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n"); 1993 RTCheckCost += C; 1994 } 1995 if (MemCheckBlock) 1996 for (Instruction &I : *MemCheckBlock) { 1997 if (MemCheckBlock->getTerminator() == &I) 1998 continue; 1999 InstructionCost C = 2000 TTI->getInstructionCost(&I, TTI::TCK_RecipThroughput); 2001 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n"); 2002 RTCheckCost += C; 2003 } 2004 2005 if (SCEVCheckBlock || MemCheckBlock) 2006 LLVM_DEBUG(dbgs() << "Total cost of runtime checks: " << RTCheckCost 2007 << "\n"); 2008 2009 return RTCheckCost; 2010 } 2011 2012 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2013 /// unused. 2014 ~GeneratedRTChecks() { 2015 SCEVExpanderCleaner SCEVCleaner(SCEVExp); 2016 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp); 2017 if (!SCEVCheckCond) 2018 SCEVCleaner.markResultUsed(); 2019 2020 if (!MemRuntimeCheckCond) 2021 MemCheckCleaner.markResultUsed(); 2022 2023 if (MemRuntimeCheckCond) { 2024 auto &SE = *MemCheckExp.getSE(); 2025 // Memory runtime check generation creates compares that use expanded 2026 // values. Remove them before running the SCEVExpanderCleaners. 2027 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2028 if (MemCheckExp.isInsertedInstruction(&I)) 2029 continue; 2030 SE.forgetValue(&I); 2031 I.eraseFromParent(); 2032 } 2033 } 2034 MemCheckCleaner.cleanup(); 2035 SCEVCleaner.cleanup(); 2036 2037 if (SCEVCheckCond) 2038 SCEVCheckBlock->eraseFromParent(); 2039 if (MemRuntimeCheckCond) 2040 MemCheckBlock->eraseFromParent(); 2041 } 2042 2043 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2044 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2045 /// depending on the generated condition. 2046 BasicBlock *emitSCEVChecks(BasicBlock *Bypass, 2047 BasicBlock *LoopVectorPreHeader, 2048 BasicBlock *LoopExitBlock) { 2049 if (!SCEVCheckCond) 2050 return nullptr; 2051 2052 Value *Cond = SCEVCheckCond; 2053 // Mark the check as used, to prevent it from being removed during cleanup. 2054 SCEVCheckCond = nullptr; 2055 if (auto *C = dyn_cast<ConstantInt>(Cond)) 2056 if (C->isZero()) 2057 return nullptr; 2058 2059 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2060 2061 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2062 // Create new preheader for vector loop. 2063 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2064 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2065 2066 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2067 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2068 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2069 SCEVCheckBlock); 2070 2071 DT->addNewBlock(SCEVCheckBlock, Pred); 2072 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2073 2074 ReplaceInstWithInst(SCEVCheckBlock->getTerminator(), 2075 BranchInst::Create(Bypass, LoopVectorPreHeader, Cond)); 2076 return SCEVCheckBlock; 2077 } 2078 2079 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2080 /// the branches to branch to the vector preheader or \p Bypass, depending on 2081 /// the generated condition. 2082 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass, 2083 BasicBlock *LoopVectorPreHeader) { 2084 // Check if we generated code that checks in runtime if arrays overlap. 2085 if (!MemRuntimeCheckCond) 2086 return nullptr; 2087 2088 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2089 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2090 MemCheckBlock); 2091 2092 DT->addNewBlock(MemCheckBlock, Pred); 2093 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2094 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2095 2096 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2097 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2098 2099 ReplaceInstWithInst( 2100 MemCheckBlock->getTerminator(), 2101 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2102 MemCheckBlock->getTerminator()->setDebugLoc( 2103 Pred->getTerminator()->getDebugLoc()); 2104 2105 // Mark the check as used, to prevent it from being removed during cleanup. 2106 MemRuntimeCheckCond = nullptr; 2107 return MemCheckBlock; 2108 } 2109 }; 2110 2111 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2112 // vectorization. The loop needs to be annotated with #pragma omp simd 2113 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2114 // vector length information is not provided, vectorization is not considered 2115 // explicit. Interleave hints are not allowed either. These limitations will be 2116 // relaxed in the future. 2117 // Please, note that we are currently forced to abuse the pragma 'clang 2118 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2119 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2120 // provides *explicit vectorization hints* (LV can bypass legal checks and 2121 // assume that vectorization is legal). However, both hints are implemented 2122 // using the same metadata (llvm.loop.vectorize, processed by 2123 // LoopVectorizeHints). This will be fixed in the future when the native IR 2124 // representation for pragma 'omp simd' is introduced. 2125 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2126 OptimizationRemarkEmitter *ORE) { 2127 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2128 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2129 2130 // Only outer loops with an explicit vectorization hint are supported. 2131 // Unannotated outer loops are ignored. 2132 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2133 return false; 2134 2135 Function *Fn = OuterLp->getHeader()->getParent(); 2136 if (!Hints.allowVectorization(Fn, OuterLp, 2137 true /*VectorizeOnlyWhenForced*/)) { 2138 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2139 return false; 2140 } 2141 2142 if (Hints.getInterleave() > 1) { 2143 // TODO: Interleave support is future work. 2144 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2145 "outer loops.\n"); 2146 Hints.emitRemarkWithHints(); 2147 return false; 2148 } 2149 2150 return true; 2151 } 2152 2153 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2154 OptimizationRemarkEmitter *ORE, 2155 SmallVectorImpl<Loop *> &V) { 2156 // Collect inner loops and outer loops without irreducible control flow. For 2157 // now, only collect outer loops that have explicit vectorization hints. If we 2158 // are stress testing the VPlan H-CFG construction, we collect the outermost 2159 // loop of every loop nest. 2160 if (L.isInnermost() || VPlanBuildStressTest || 2161 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2162 LoopBlocksRPO RPOT(&L); 2163 RPOT.perform(LI); 2164 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2165 V.push_back(&L); 2166 // TODO: Collect inner loops inside marked outer loops in case 2167 // vectorization fails for the outer loop. Do not invoke 2168 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2169 // already known to be reducible. We can use an inherited attribute for 2170 // that. 2171 return; 2172 } 2173 } 2174 for (Loop *InnerL : L) 2175 collectSupportedLoops(*InnerL, LI, ORE, V); 2176 } 2177 2178 namespace { 2179 2180 /// The LoopVectorize Pass. 2181 struct LoopVectorize : public FunctionPass { 2182 /// Pass identification, replacement for typeid 2183 static char ID; 2184 2185 LoopVectorizePass Impl; 2186 2187 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2188 bool VectorizeOnlyWhenForced = false) 2189 : FunctionPass(ID), 2190 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2191 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2192 } 2193 2194 bool runOnFunction(Function &F) override { 2195 if (skipFunction(F)) 2196 return false; 2197 2198 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2199 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2200 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2201 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2202 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2203 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2204 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2205 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2206 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2207 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2208 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2209 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2210 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2211 2212 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2213 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2214 2215 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2216 GetLAA, *ORE, PSI).MadeAnyChange; 2217 } 2218 2219 void getAnalysisUsage(AnalysisUsage &AU) const override { 2220 AU.addRequired<AssumptionCacheTracker>(); 2221 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2222 AU.addRequired<DominatorTreeWrapperPass>(); 2223 AU.addRequired<LoopInfoWrapperPass>(); 2224 AU.addRequired<ScalarEvolutionWrapperPass>(); 2225 AU.addRequired<TargetTransformInfoWrapperPass>(); 2226 AU.addRequired<AAResultsWrapperPass>(); 2227 AU.addRequired<LoopAccessLegacyAnalysis>(); 2228 AU.addRequired<DemandedBitsWrapperPass>(); 2229 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2230 AU.addRequired<InjectTLIMappingsLegacy>(); 2231 2232 // We currently do not preserve loopinfo/dominator analyses with outer loop 2233 // vectorization. Until this is addressed, mark these analyses as preserved 2234 // only for non-VPlan-native path. 2235 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2236 if (!EnableVPlanNativePath) { 2237 AU.addPreserved<LoopInfoWrapperPass>(); 2238 AU.addPreserved<DominatorTreeWrapperPass>(); 2239 } 2240 2241 AU.addPreserved<BasicAAWrapperPass>(); 2242 AU.addPreserved<GlobalsAAWrapperPass>(); 2243 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2244 } 2245 }; 2246 2247 } // end anonymous namespace 2248 2249 //===----------------------------------------------------------------------===// 2250 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2251 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2252 //===----------------------------------------------------------------------===// 2253 2254 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2255 // We need to place the broadcast of invariant variables outside the loop, 2256 // but only if it's proven safe to do so. Else, broadcast will be inside 2257 // vector loop body. 2258 Instruction *Instr = dyn_cast<Instruction>(V); 2259 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2260 (!Instr || 2261 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2262 // Place the code for broadcasting invariant variables in the new preheader. 2263 IRBuilder<>::InsertPointGuard Guard(Builder); 2264 if (SafeToHoist) 2265 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2266 2267 // Broadcast the scalar into all locations in the vector. 2268 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2269 2270 return Shuf; 2271 } 2272 2273 /// This function adds 2274 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 2275 /// to each vector element of Val. The sequence starts at StartIndex. 2276 /// \p Opcode is relevant for FP induction variable. 2277 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step, 2278 Instruction::BinaryOps BinOp, ElementCount VF, 2279 IRBuilderBase &Builder) { 2280 assert(VF.isVector() && "only vector VFs are supported"); 2281 2282 // Create and check the types. 2283 auto *ValVTy = cast<VectorType>(Val->getType()); 2284 ElementCount VLen = ValVTy->getElementCount(); 2285 2286 Type *STy = Val->getType()->getScalarType(); 2287 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2288 "Induction Step must be an integer or FP"); 2289 assert(Step->getType() == STy && "Step has wrong type"); 2290 2291 SmallVector<Constant *, 8> Indices; 2292 2293 // Create a vector of consecutive numbers from zero to VF. 2294 VectorType *InitVecValVTy = ValVTy; 2295 if (STy->isFloatingPointTy()) { 2296 Type *InitVecValSTy = 2297 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2298 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2299 } 2300 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2301 2302 // Splat the StartIdx 2303 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2304 2305 if (STy->isIntegerTy()) { 2306 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2307 Step = Builder.CreateVectorSplat(VLen, Step); 2308 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2309 // FIXME: The newly created binary instructions should contain nsw/nuw 2310 // flags, which can be found from the original scalar operations. 2311 Step = Builder.CreateMul(InitVec, Step); 2312 return Builder.CreateAdd(Val, Step, "induction"); 2313 } 2314 2315 // Floating point induction. 2316 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2317 "Binary Opcode should be specified for FP induction"); 2318 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2319 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2320 2321 Step = Builder.CreateVectorSplat(VLen, Step); 2322 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2323 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2324 } 2325 2326 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 2327 /// variable on which to base the steps, \p Step is the size of the step. 2328 static void buildScalarSteps(Value *ScalarIV, Value *Step, 2329 const InductionDescriptor &ID, VPValue *Def, 2330 VPTransformState &State) { 2331 IRBuilderBase &Builder = State.Builder; 2332 // We shouldn't have to build scalar steps if we aren't vectorizing. 2333 assert(State.VF.isVector() && "VF should be greater than one"); 2334 // Get the value type and ensure it and the step have the same integer type. 2335 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2336 assert(ScalarIVTy == Step->getType() && 2337 "Val and Step should have the same type"); 2338 2339 // We build scalar steps for both integer and floating-point induction 2340 // variables. Here, we determine the kind of arithmetic we will perform. 2341 Instruction::BinaryOps AddOp; 2342 Instruction::BinaryOps MulOp; 2343 if (ScalarIVTy->isIntegerTy()) { 2344 AddOp = Instruction::Add; 2345 MulOp = Instruction::Mul; 2346 } else { 2347 AddOp = ID.getInductionOpcode(); 2348 MulOp = Instruction::FMul; 2349 } 2350 2351 // Determine the number of scalars we need to generate for each unroll 2352 // iteration. 2353 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def); 2354 unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue(); 2355 // Compute the scalar steps and save the results in State. 2356 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2357 ScalarIVTy->getScalarSizeInBits()); 2358 Type *VecIVTy = nullptr; 2359 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2360 if (!FirstLaneOnly && State.VF.isScalable()) { 2361 VecIVTy = VectorType::get(ScalarIVTy, State.VF); 2362 UnitStepVec = 2363 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF)); 2364 SplatStep = Builder.CreateVectorSplat(State.VF, Step); 2365 SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV); 2366 } 2367 2368 for (unsigned Part = 0; Part < State.UF; ++Part) { 2369 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part); 2370 2371 if (!FirstLaneOnly && State.VF.isScalable()) { 2372 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0); 2373 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2374 if (ScalarIVTy->isFloatingPointTy()) 2375 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2376 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2377 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2378 State.set(Def, Add, Part); 2379 // It's useful to record the lane values too for the known minimum number 2380 // of elements so we do those below. This improves the code quality when 2381 // trying to extract the first element, for example. 2382 } 2383 2384 if (ScalarIVTy->isFloatingPointTy()) 2385 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2386 2387 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2388 Value *StartIdx = Builder.CreateBinOp( 2389 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2390 // The step returned by `createStepForVF` is a runtime-evaluated value 2391 // when VF is scalable. Otherwise, it should be folded into a Constant. 2392 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) && 2393 "Expected StartIdx to be folded to a constant when VF is not " 2394 "scalable"); 2395 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2396 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2397 State.set(Def, Add, VPIteration(Part, Lane)); 2398 } 2399 } 2400 } 2401 2402 // Generate code for the induction step. Note that induction steps are 2403 // required to be loop-invariant 2404 static Value *CreateStepValue(const SCEV *Step, ScalarEvolution &SE, 2405 Instruction *InsertBefore, 2406 Loop *OrigLoop = nullptr) { 2407 const DataLayout &DL = SE.getDataLayout(); 2408 assert((!OrigLoop || SE.isLoopInvariant(Step, OrigLoop)) && 2409 "Induction step should be loop invariant"); 2410 if (auto *E = dyn_cast<SCEVUnknown>(Step)) 2411 return E->getValue(); 2412 2413 SCEVExpander Exp(SE, DL, "induction"); 2414 return Exp.expandCodeFor(Step, Step->getType(), InsertBefore); 2415 } 2416 2417 /// Compute the transformed value of Index at offset StartValue using step 2418 /// StepValue. 2419 /// For integer induction, returns StartValue + Index * StepValue. 2420 /// For pointer induction, returns StartValue[Index * StepValue]. 2421 /// FIXME: The newly created binary instructions should contain nsw/nuw 2422 /// flags, which can be found from the original scalar operations. 2423 static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index, 2424 Value *StartValue, Value *Step, 2425 const InductionDescriptor &ID) { 2426 assert(Index->getType()->getScalarType() == Step->getType() && 2427 "Index scalar type does not match StepValue type"); 2428 2429 // Note: the IR at this point is broken. We cannot use SE to create any new 2430 // SCEV and then expand it, hoping that SCEV's simplification will give us 2431 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2432 // lead to various SCEV crashes. So all we can do is to use builder and rely 2433 // on InstCombine for future simplifications. Here we handle some trivial 2434 // cases only. 2435 auto CreateAdd = [&B](Value *X, Value *Y) { 2436 assert(X->getType() == Y->getType() && "Types don't match!"); 2437 if (auto *CX = dyn_cast<ConstantInt>(X)) 2438 if (CX->isZero()) 2439 return Y; 2440 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2441 if (CY->isZero()) 2442 return X; 2443 return B.CreateAdd(X, Y); 2444 }; 2445 2446 // We allow X to be a vector type, in which case Y will potentially be 2447 // splatted into a vector with the same element count. 2448 auto CreateMul = [&B](Value *X, Value *Y) { 2449 assert(X->getType()->getScalarType() == Y->getType() && 2450 "Types don't match!"); 2451 if (auto *CX = dyn_cast<ConstantInt>(X)) 2452 if (CX->isOne()) 2453 return Y; 2454 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2455 if (CY->isOne()) 2456 return X; 2457 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 2458 if (XVTy && !isa<VectorType>(Y->getType())) 2459 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 2460 return B.CreateMul(X, Y); 2461 }; 2462 2463 switch (ID.getKind()) { 2464 case InductionDescriptor::IK_IntInduction: { 2465 assert(!isa<VectorType>(Index->getType()) && 2466 "Vector indices not supported for integer inductions yet"); 2467 assert(Index->getType() == StartValue->getType() && 2468 "Index type does not match StartValue type"); 2469 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne()) 2470 return B.CreateSub(StartValue, Index); 2471 auto *Offset = CreateMul(Index, Step); 2472 return CreateAdd(StartValue, Offset); 2473 } 2474 case InductionDescriptor::IK_PtrInduction: { 2475 assert(isa<Constant>(Step) && 2476 "Expected constant step for pointer induction"); 2477 return B.CreateGEP(ID.getElementType(), StartValue, CreateMul(Index, Step)); 2478 } 2479 case InductionDescriptor::IK_FpInduction: { 2480 assert(!isa<VectorType>(Index->getType()) && 2481 "Vector indices not supported for FP inductions yet"); 2482 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2483 auto InductionBinOp = ID.getInductionBinOp(); 2484 assert(InductionBinOp && 2485 (InductionBinOp->getOpcode() == Instruction::FAdd || 2486 InductionBinOp->getOpcode() == Instruction::FSub) && 2487 "Original bin op should be defined for FP induction"); 2488 2489 Value *MulExp = B.CreateFMul(Step, Index); 2490 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2491 "induction"); 2492 } 2493 case InductionDescriptor::IK_NoInduction: 2494 return nullptr; 2495 } 2496 llvm_unreachable("invalid enum"); 2497 } 2498 2499 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2500 const VPIteration &Instance, 2501 VPTransformState &State) { 2502 Value *ScalarInst = State.get(Def, Instance); 2503 Value *VectorValue = State.get(Def, Instance.Part); 2504 VectorValue = Builder.CreateInsertElement( 2505 VectorValue, ScalarInst, 2506 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2507 State.set(Def, VectorValue, Instance.Part); 2508 } 2509 2510 // Return whether we allow using masked interleave-groups (for dealing with 2511 // strided loads/stores that reside in predicated blocks, or for dealing 2512 // with gaps). 2513 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2514 // If an override option has been passed in for interleaved accesses, use it. 2515 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2516 return EnableMaskedInterleavedMemAccesses; 2517 2518 return TTI.enableMaskedInterleavedAccessVectorization(); 2519 } 2520 2521 // Try to vectorize the interleave group that \p Instr belongs to. 2522 // 2523 // E.g. Translate following interleaved load group (factor = 3): 2524 // for (i = 0; i < N; i+=3) { 2525 // R = Pic[i]; // Member of index 0 2526 // G = Pic[i+1]; // Member of index 1 2527 // B = Pic[i+2]; // Member of index 2 2528 // ... // do something to R, G, B 2529 // } 2530 // To: 2531 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2532 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2533 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2534 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2535 // 2536 // Or translate following interleaved store group (factor = 3): 2537 // for (i = 0; i < N; i+=3) { 2538 // ... do something to R, G, B 2539 // Pic[i] = R; // Member of index 0 2540 // Pic[i+1] = G; // Member of index 1 2541 // Pic[i+2] = B; // Member of index 2 2542 // } 2543 // To: 2544 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2545 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2546 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2547 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2548 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2549 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2550 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2551 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2552 VPValue *BlockInMask) { 2553 Instruction *Instr = Group->getInsertPos(); 2554 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2555 2556 // Prepare for the vector type of the interleaved load/store. 2557 Type *ScalarTy = getLoadStoreType(Instr); 2558 unsigned InterleaveFactor = Group->getFactor(); 2559 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2560 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2561 2562 // Prepare for the new pointers. 2563 SmallVector<Value *, 2> AddrParts; 2564 unsigned Index = Group->getIndex(Instr); 2565 2566 // TODO: extend the masked interleaved-group support to reversed access. 2567 assert((!BlockInMask || !Group->isReverse()) && 2568 "Reversed masked interleave-group not supported."); 2569 2570 // If the group is reverse, adjust the index to refer to the last vector lane 2571 // instead of the first. We adjust the index from the first vector lane, 2572 // rather than directly getting the pointer for lane VF - 1, because the 2573 // pointer operand of the interleaved access is supposed to be uniform. For 2574 // uniform instructions, we're only required to generate a value for the 2575 // first vector lane in each unroll iteration. 2576 if (Group->isReverse()) 2577 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2578 2579 for (unsigned Part = 0; Part < UF; Part++) { 2580 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2581 State.setDebugLocFromInst(AddrPart); 2582 2583 // Notice current instruction could be any index. Need to adjust the address 2584 // to the member of index 0. 2585 // 2586 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2587 // b = A[i]; // Member of index 0 2588 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2589 // 2590 // E.g. A[i+1] = a; // Member of index 1 2591 // A[i] = b; // Member of index 0 2592 // A[i+2] = c; // Member of index 2 (Current instruction) 2593 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2594 2595 bool InBounds = false; 2596 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2597 InBounds = gep->isInBounds(); 2598 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2599 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2600 2601 // Cast to the vector pointer type. 2602 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2603 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2604 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2605 } 2606 2607 State.setDebugLocFromInst(Instr); 2608 Value *PoisonVec = PoisonValue::get(VecTy); 2609 2610 Value *MaskForGaps = nullptr; 2611 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2612 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2613 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2614 } 2615 2616 // Vectorize the interleaved load group. 2617 if (isa<LoadInst>(Instr)) { 2618 // For each unroll part, create a wide load for the group. 2619 SmallVector<Value *, 2> NewLoads; 2620 for (unsigned Part = 0; Part < UF; Part++) { 2621 Instruction *NewLoad; 2622 if (BlockInMask || MaskForGaps) { 2623 assert(useMaskedInterleavedAccesses(*TTI) && 2624 "masked interleaved groups are not allowed."); 2625 Value *GroupMask = MaskForGaps; 2626 if (BlockInMask) { 2627 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2628 Value *ShuffledMask = Builder.CreateShuffleVector( 2629 BlockInMaskPart, 2630 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2631 "interleaved.mask"); 2632 GroupMask = MaskForGaps 2633 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2634 MaskForGaps) 2635 : ShuffledMask; 2636 } 2637 NewLoad = 2638 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2639 GroupMask, PoisonVec, "wide.masked.vec"); 2640 } 2641 else 2642 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2643 Group->getAlign(), "wide.vec"); 2644 Group->addMetadata(NewLoad); 2645 NewLoads.push_back(NewLoad); 2646 } 2647 2648 // For each member in the group, shuffle out the appropriate data from the 2649 // wide loads. 2650 unsigned J = 0; 2651 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2652 Instruction *Member = Group->getMember(I); 2653 2654 // Skip the gaps in the group. 2655 if (!Member) 2656 continue; 2657 2658 auto StrideMask = 2659 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2660 for (unsigned Part = 0; Part < UF; Part++) { 2661 Value *StridedVec = Builder.CreateShuffleVector( 2662 NewLoads[Part], StrideMask, "strided.vec"); 2663 2664 // If this member has different type, cast the result type. 2665 if (Member->getType() != ScalarTy) { 2666 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2667 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2668 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2669 } 2670 2671 if (Group->isReverse()) 2672 StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse"); 2673 2674 State.set(VPDefs[J], StridedVec, Part); 2675 } 2676 ++J; 2677 } 2678 return; 2679 } 2680 2681 // The sub vector type for current instruction. 2682 auto *SubVT = VectorType::get(ScalarTy, VF); 2683 2684 // Vectorize the interleaved store group. 2685 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2686 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2687 "masked interleaved groups are not allowed."); 2688 assert((!MaskForGaps || !VF.isScalable()) && 2689 "masking gaps for scalable vectors is not yet supported."); 2690 for (unsigned Part = 0; Part < UF; Part++) { 2691 // Collect the stored vector from each member. 2692 SmallVector<Value *, 4> StoredVecs; 2693 for (unsigned i = 0; i < InterleaveFactor; i++) { 2694 assert((Group->getMember(i) || MaskForGaps) && 2695 "Fail to get a member from an interleaved store group"); 2696 Instruction *Member = Group->getMember(i); 2697 2698 // Skip the gaps in the group. 2699 if (!Member) { 2700 Value *Undef = PoisonValue::get(SubVT); 2701 StoredVecs.push_back(Undef); 2702 continue; 2703 } 2704 2705 Value *StoredVec = State.get(StoredValues[i], Part); 2706 2707 if (Group->isReverse()) 2708 StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse"); 2709 2710 // If this member has different type, cast it to a unified type. 2711 2712 if (StoredVec->getType() != SubVT) 2713 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2714 2715 StoredVecs.push_back(StoredVec); 2716 } 2717 2718 // Concatenate all vectors into a wide vector. 2719 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2720 2721 // Interleave the elements in the wide vector. 2722 Value *IVec = Builder.CreateShuffleVector( 2723 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2724 "interleaved.vec"); 2725 2726 Instruction *NewStoreInstr; 2727 if (BlockInMask || MaskForGaps) { 2728 Value *GroupMask = MaskForGaps; 2729 if (BlockInMask) { 2730 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2731 Value *ShuffledMask = Builder.CreateShuffleVector( 2732 BlockInMaskPart, 2733 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2734 "interleaved.mask"); 2735 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2736 ShuffledMask, MaskForGaps) 2737 : ShuffledMask; 2738 } 2739 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2740 Group->getAlign(), GroupMask); 2741 } else 2742 NewStoreInstr = 2743 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2744 2745 Group->addMetadata(NewStoreInstr); 2746 } 2747 } 2748 2749 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2750 VPReplicateRecipe *RepRecipe, 2751 const VPIteration &Instance, 2752 bool IfPredicateInstr, 2753 VPTransformState &State) { 2754 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2755 2756 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2757 // the first lane and part. 2758 if (isa<NoAliasScopeDeclInst>(Instr)) 2759 if (!Instance.isFirstIteration()) 2760 return; 2761 2762 // Does this instruction return a value ? 2763 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2764 2765 Instruction *Cloned = Instr->clone(); 2766 if (!IsVoidRetTy) 2767 Cloned->setName(Instr->getName() + ".cloned"); 2768 2769 // If the scalarized instruction contributes to the address computation of a 2770 // widen masked load/store which was in a basic block that needed predication 2771 // and is not predicated after vectorization, we can't propagate 2772 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized 2773 // instruction could feed a poison value to the base address of the widen 2774 // load/store. 2775 if (State.MayGeneratePoisonRecipes.contains(RepRecipe)) 2776 Cloned->dropPoisonGeneratingFlags(); 2777 2778 if (Instr->getDebugLoc()) 2779 State.setDebugLocFromInst(Instr); 2780 2781 // Replace the operands of the cloned instructions with their scalar 2782 // equivalents in the new loop. 2783 for (auto &I : enumerate(RepRecipe->operands())) { 2784 auto InputInstance = Instance; 2785 VPValue *Operand = I.value(); 2786 VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand); 2787 if (OperandR && OperandR->isUniform()) 2788 InputInstance.Lane = VPLane::getFirstLane(); 2789 Cloned->setOperand(I.index(), State.get(Operand, InputInstance)); 2790 } 2791 State.addNewMetadata(Cloned, Instr); 2792 2793 // Place the cloned scalar in the new loop. 2794 State.Builder.Insert(Cloned); 2795 2796 State.set(RepRecipe, Cloned, Instance); 2797 2798 // If we just cloned a new assumption, add it the assumption cache. 2799 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 2800 AC->registerAssumption(II); 2801 2802 // End if-block. 2803 if (IfPredicateInstr) 2804 PredicatedInstructions.push_back(Cloned); 2805 } 2806 2807 Value *InnerLoopVectorizer::getOrCreateTripCount(BasicBlock *InsertBlock) { 2808 if (TripCount) 2809 return TripCount; 2810 2811 assert(InsertBlock); 2812 IRBuilder<> Builder(InsertBlock->getTerminator()); 2813 // Find the loop boundaries. 2814 ScalarEvolution *SE = PSE.getSE(); 2815 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2816 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 2817 "Invalid loop count"); 2818 2819 Type *IdxTy = Legal->getWidestInductionType(); 2820 assert(IdxTy && "No type for induction"); 2821 2822 // The exit count might have the type of i64 while the phi is i32. This can 2823 // happen if we have an induction variable that is sign extended before the 2824 // compare. The only way that we get a backedge taken count is that the 2825 // induction variable was signed and as such will not overflow. In such a case 2826 // truncation is legal. 2827 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2828 IdxTy->getPrimitiveSizeInBits()) 2829 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2830 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2831 2832 // Get the total trip count from the count by adding 1. 2833 const SCEV *ExitCount = SE->getAddExpr( 2834 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2835 2836 const DataLayout &DL = InsertBlock->getModule()->getDataLayout(); 2837 2838 // Expand the trip count and place the new instructions in the preheader. 2839 // Notice that the pre-header does not change, only the loop body. 2840 SCEVExpander Exp(*SE, DL, "induction"); 2841 2842 // Count holds the overall loop count (N). 2843 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2844 InsertBlock->getTerminator()); 2845 2846 if (TripCount->getType()->isPointerTy()) 2847 TripCount = 2848 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2849 InsertBlock->getTerminator()); 2850 2851 return TripCount; 2852 } 2853 2854 Value * 2855 InnerLoopVectorizer::getOrCreateVectorTripCount(BasicBlock *InsertBlock) { 2856 if (VectorTripCount) 2857 return VectorTripCount; 2858 2859 Value *TC = getOrCreateTripCount(InsertBlock); 2860 IRBuilder<> Builder(InsertBlock->getTerminator()); 2861 2862 Type *Ty = TC->getType(); 2863 // This is where we can make the step a runtime constant. 2864 Value *Step = createStepForVF(Builder, Ty, VF, UF); 2865 2866 // If the tail is to be folded by masking, round the number of iterations N 2867 // up to a multiple of Step instead of rounding down. This is done by first 2868 // adding Step-1 and then rounding down. Note that it's ok if this addition 2869 // overflows: the vector induction variable will eventually wrap to zero given 2870 // that it starts at zero and its Step is a power of two; the loop will then 2871 // exit, with the last early-exit vector comparison also producing all-true. 2872 // For scalable vectors the VF is not guaranteed to be a power of 2, but this 2873 // is accounted for in emitIterationCountCheck that adds an overflow check. 2874 if (Cost->foldTailByMasking()) { 2875 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 2876 "VF*UF must be a power of 2 when folding tail by masking"); 2877 Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF); 2878 TC = Builder.CreateAdd( 2879 TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up"); 2880 } 2881 2882 // Now we need to generate the expression for the part of the loop that the 2883 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2884 // iterations are not required for correctness, or N - Step, otherwise. Step 2885 // is equal to the vectorization factor (number of SIMD elements) times the 2886 // unroll factor (number of SIMD instructions). 2887 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2888 2889 // There are cases where we *must* run at least one iteration in the remainder 2890 // loop. See the cost model for when this can happen. If the step evenly 2891 // divides the trip count, we set the remainder to be equal to the step. If 2892 // the step does not evenly divide the trip count, no adjustment is necessary 2893 // since there will already be scalar iterations. Note that the minimum 2894 // iterations check ensures that N >= Step. 2895 if (Cost->requiresScalarEpilogue(VF)) { 2896 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2897 R = Builder.CreateSelect(IsZero, Step, R); 2898 } 2899 2900 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2901 2902 return VectorTripCount; 2903 } 2904 2905 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2906 const DataLayout &DL) { 2907 // Verify that V is a vector type with same number of elements as DstVTy. 2908 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 2909 unsigned VF = DstFVTy->getNumElements(); 2910 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 2911 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2912 Type *SrcElemTy = SrcVecTy->getElementType(); 2913 Type *DstElemTy = DstFVTy->getElementType(); 2914 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2915 "Vector elements must have same size"); 2916 2917 // Do a direct cast if element types are castable. 2918 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2919 return Builder.CreateBitOrPointerCast(V, DstFVTy); 2920 } 2921 // V cannot be directly casted to desired vector type. 2922 // May happen when V is a floating point vector but DstVTy is a vector of 2923 // pointers or vice-versa. Handle this using a two-step bitcast using an 2924 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2925 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2926 "Only one type should be a pointer type"); 2927 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2928 "Only one type should be a floating point type"); 2929 Type *IntTy = 2930 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2931 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 2932 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2933 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 2934 } 2935 2936 void InnerLoopVectorizer::emitIterationCountCheck(BasicBlock *Bypass) { 2937 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 2938 // Reuse existing vector loop preheader for TC checks. 2939 // Note that new preheader block is generated for vector loop. 2940 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 2941 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 2942 2943 // Generate code to check if the loop's trip count is less than VF * UF, or 2944 // equal to it in case a scalar epilogue is required; this implies that the 2945 // vector trip count is zero. This check also covers the case where adding one 2946 // to the backedge-taken count overflowed leading to an incorrect trip count 2947 // of zero. In this case we will also jump to the scalar loop. 2948 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 2949 : ICmpInst::ICMP_ULT; 2950 2951 // If tail is to be folded, vector loop takes care of all iterations. 2952 Type *CountTy = Count->getType(); 2953 Value *CheckMinIters = Builder.getFalse(); 2954 auto CreateStep = [&]() -> Value * { 2955 // Create step with max(MinProTripCount, UF * VF). 2956 if (UF * VF.getKnownMinValue() >= MinProfitableTripCount.getKnownMinValue()) 2957 return createStepForVF(Builder, CountTy, VF, UF); 2958 2959 Value *MinProfTC = 2960 createStepForVF(Builder, CountTy, MinProfitableTripCount, 1); 2961 if (!VF.isScalable()) 2962 return MinProfTC; 2963 return Builder.CreateBinaryIntrinsic( 2964 Intrinsic::umax, MinProfTC, createStepForVF(Builder, CountTy, VF, UF)); 2965 }; 2966 2967 if (!Cost->foldTailByMasking()) 2968 CheckMinIters = 2969 Builder.CreateICmp(P, Count, CreateStep(), "min.iters.check"); 2970 else if (VF.isScalable()) { 2971 // vscale is not necessarily a power-of-2, which means we cannot guarantee 2972 // an overflow to zero when updating induction variables and so an 2973 // additional overflow check is required before entering the vector loop. 2974 2975 // Get the maximum unsigned value for the type. 2976 Value *MaxUIntTripCount = 2977 ConstantInt::get(CountTy, cast<IntegerType>(CountTy)->getMask()); 2978 Value *LHS = Builder.CreateSub(MaxUIntTripCount, Count); 2979 2980 // Don't execute the vector loop if (UMax - n) < (VF * UF). 2981 CheckMinIters = Builder.CreateICmp(ICmpInst::ICMP_ULT, LHS, CreateStep()); 2982 } 2983 2984 // Create new preheader for vector loop. 2985 LoopVectorPreHeader = 2986 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 2987 "vector.ph"); 2988 2989 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 2990 DT->getNode(Bypass)->getIDom()) && 2991 "TC check is expected to dominate Bypass"); 2992 2993 // Update dominator for Bypass & LoopExit (if needed). 2994 DT->changeImmediateDominator(Bypass, TCCheckBlock); 2995 if (!Cost->requiresScalarEpilogue(VF)) 2996 // If there is an epilogue which must run, there's no edge from the 2997 // middle block to exit blocks and thus no need to update the immediate 2998 // dominator of the exit blocks. 2999 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3000 3001 ReplaceInstWithInst( 3002 TCCheckBlock->getTerminator(), 3003 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3004 LoopBypassBlocks.push_back(TCCheckBlock); 3005 } 3006 3007 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) { 3008 BasicBlock *const SCEVCheckBlock = 3009 RTChecks.emitSCEVChecks(Bypass, LoopVectorPreHeader, LoopExitBlock); 3010 if (!SCEVCheckBlock) 3011 return nullptr; 3012 3013 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3014 (OptForSizeBasedOnProfile && 3015 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3016 "Cannot SCEV check stride or overflow when optimizing for size"); 3017 3018 3019 // Update dominator only if this is first RT check. 3020 if (LoopBypassBlocks.empty()) { 3021 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3022 if (!Cost->requiresScalarEpilogue(VF)) 3023 // If there is an epilogue which must run, there's no edge from the 3024 // middle block to exit blocks and thus no need to update the immediate 3025 // dominator of the exit blocks. 3026 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3027 } 3028 3029 LoopBypassBlocks.push_back(SCEVCheckBlock); 3030 AddedSafetyChecks = true; 3031 return SCEVCheckBlock; 3032 } 3033 3034 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) { 3035 // VPlan-native path does not do any analysis for runtime checks currently. 3036 if (EnableVPlanNativePath) 3037 return nullptr; 3038 3039 BasicBlock *const MemCheckBlock = 3040 RTChecks.emitMemRuntimeChecks(Bypass, LoopVectorPreHeader); 3041 3042 // Check if we generated code that checks in runtime if arrays overlap. We put 3043 // the checks into a separate block to make the more common case of few 3044 // elements faster. 3045 if (!MemCheckBlock) 3046 return nullptr; 3047 3048 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3049 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3050 "Cannot emit memory checks when optimizing for size, unless forced " 3051 "to vectorize."); 3052 ORE->emit([&]() { 3053 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3054 OrigLoop->getStartLoc(), 3055 OrigLoop->getHeader()) 3056 << "Code-size may be reduced by not forcing " 3057 "vectorization, or by source-code modifications " 3058 "eliminating the need for runtime checks " 3059 "(e.g., adding 'restrict')."; 3060 }); 3061 } 3062 3063 LoopBypassBlocks.push_back(MemCheckBlock); 3064 3065 AddedSafetyChecks = true; 3066 3067 return MemCheckBlock; 3068 } 3069 3070 void InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3071 LoopScalarBody = OrigLoop->getHeader(); 3072 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3073 assert(LoopVectorPreHeader && "Invalid loop structure"); 3074 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3075 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3076 "multiple exit loop without required epilogue?"); 3077 3078 LoopMiddleBlock = 3079 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3080 LI, nullptr, Twine(Prefix) + "middle.block"); 3081 LoopScalarPreHeader = 3082 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3083 nullptr, Twine(Prefix) + "scalar.ph"); 3084 3085 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3086 3087 // Set up the middle block terminator. Two cases: 3088 // 1) If we know that we must execute the scalar epilogue, emit an 3089 // unconditional branch. 3090 // 2) Otherwise, we must have a single unique exit block (due to how we 3091 // implement the multiple exit case). In this case, set up a conditonal 3092 // branch from the middle block to the loop scalar preheader, and the 3093 // exit block. completeLoopSkeleton will update the condition to use an 3094 // iteration check, if required to decide whether to execute the remainder. 3095 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3096 BranchInst::Create(LoopScalarPreHeader) : 3097 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3098 Builder.getTrue()); 3099 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3100 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3101 3102 // Update dominator for loop exit. During skeleton creation, only the vector 3103 // pre-header and the middle block are created. The vector loop is entirely 3104 // created during VPlan exection. 3105 if (!Cost->requiresScalarEpilogue(VF)) 3106 // If there is an epilogue which must run, there's no edge from the 3107 // middle block to exit blocks and thus no need to update the immediate 3108 // dominator of the exit blocks. 3109 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3110 } 3111 3112 void InnerLoopVectorizer::createInductionResumeValues( 3113 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3114 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3115 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3116 "Inconsistent information about additional bypass."); 3117 3118 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3119 assert(VectorTripCount && "Expected valid arguments"); 3120 // We are going to resume the execution of the scalar loop. 3121 // Go over all of the induction variables that we found and fix the 3122 // PHIs that are left in the scalar version of the loop. 3123 // The starting values of PHI nodes depend on the counter of the last 3124 // iteration in the vectorized loop. 3125 // If we come from a bypass edge then we need to start from the original 3126 // start value. 3127 Instruction *OldInduction = Legal->getPrimaryInduction(); 3128 for (auto &InductionEntry : Legal->getInductionVars()) { 3129 PHINode *OrigPhi = InductionEntry.first; 3130 InductionDescriptor II = InductionEntry.second; 3131 3132 Value *&EndValue = IVEndValues[OrigPhi]; 3133 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3134 if (OrigPhi == OldInduction) { 3135 // We know what the end value is. 3136 EndValue = VectorTripCount; 3137 } else { 3138 IRBuilder<> B(LoopVectorPreHeader->getTerminator()); 3139 3140 // Fast-math-flags propagate from the original induction instruction. 3141 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3142 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3143 3144 Type *StepType = II.getStep()->getType(); 3145 Instruction::CastOps CastOp = 3146 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3147 Value *VTC = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.vtc"); 3148 Value *Step = 3149 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3150 EndValue = emitTransformedIndex(B, VTC, II.getStartValue(), Step, II); 3151 EndValue->setName("ind.end"); 3152 3153 // Compute the end value for the additional bypass (if applicable). 3154 if (AdditionalBypass.first) { 3155 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3156 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3157 StepType, true); 3158 Value *Step = 3159 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3160 VTC = 3161 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.vtc"); 3162 EndValueFromAdditionalBypass = 3163 emitTransformedIndex(B, VTC, II.getStartValue(), Step, II); 3164 EndValueFromAdditionalBypass->setName("ind.end"); 3165 } 3166 } 3167 3168 // Create phi nodes to merge from the backedge-taken check block. 3169 PHINode *BCResumeVal = 3170 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3171 LoopScalarPreHeader->getTerminator()); 3172 // Copy original phi DL over to the new one. 3173 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3174 3175 // The new PHI merges the original incoming value, in case of a bypass, 3176 // or the value at the end of the vectorized loop. 3177 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3178 3179 // Fix the scalar body counter (PHI node). 3180 // The old induction's phi node in the scalar body needs the truncated 3181 // value. 3182 for (BasicBlock *BB : LoopBypassBlocks) 3183 BCResumeVal->addIncoming(II.getStartValue(), BB); 3184 3185 if (AdditionalBypass.first) 3186 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3187 EndValueFromAdditionalBypass); 3188 3189 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3190 } 3191 } 3192 3193 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(MDNode *OrigLoopID) { 3194 // The trip counts should be cached by now. 3195 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 3196 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3197 3198 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3199 3200 // Add a check in the middle block to see if we have completed 3201 // all of the iterations in the first vector loop. Three cases: 3202 // 1) If we require a scalar epilogue, there is no conditional branch as 3203 // we unconditionally branch to the scalar preheader. Do nothing. 3204 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3205 // Thus if tail is to be folded, we know we don't need to run the 3206 // remainder and we can use the previous value for the condition (true). 3207 // 3) Otherwise, construct a runtime check. 3208 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3209 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3210 Count, VectorTripCount, "cmp.n", 3211 LoopMiddleBlock->getTerminator()); 3212 3213 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3214 // of the corresponding compare because they may have ended up with 3215 // different line numbers and we want to avoid awkward line stepping while 3216 // debugging. Eg. if the compare has got a line number inside the loop. 3217 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3218 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3219 } 3220 3221 #ifdef EXPENSIVE_CHECKS 3222 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3223 #endif 3224 3225 return LoopVectorPreHeader; 3226 } 3227 3228 std::pair<BasicBlock *, Value *> 3229 InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3230 /* 3231 In this function we generate a new loop. The new loop will contain 3232 the vectorized instructions while the old loop will continue to run the 3233 scalar remainder. 3234 3235 [ ] <-- loop iteration number check. 3236 / | 3237 / v 3238 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3239 | / | 3240 | / v 3241 || [ ] <-- vector pre header. 3242 |/ | 3243 | v 3244 | [ ] \ 3245 | [ ]_| <-- vector loop (created during VPlan execution). 3246 | | 3247 | v 3248 \ -[ ] <--- middle-block. 3249 \/ | 3250 /\ v 3251 | ->[ ] <--- new preheader. 3252 | | 3253 (opt) v <-- edge from middle to exit iff epilogue is not required. 3254 | [ ] \ 3255 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3256 \ | 3257 \ v 3258 >[ ] <-- exit block(s). 3259 ... 3260 */ 3261 3262 // Get the metadata of the original loop before it gets modified. 3263 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3264 3265 // Workaround! Compute the trip count of the original loop and cache it 3266 // before we start modifying the CFG. This code has a systemic problem 3267 // wherein it tries to run analysis over partially constructed IR; this is 3268 // wrong, and not simply for SCEV. The trip count of the original loop 3269 // simply happens to be prone to hitting this in practice. In theory, we 3270 // can hit the same issue for any SCEV, or ValueTracking query done during 3271 // mutation. See PR49900. 3272 getOrCreateTripCount(OrigLoop->getLoopPreheader()); 3273 3274 // Create an empty vector loop, and prepare basic blocks for the runtime 3275 // checks. 3276 createVectorLoopSkeleton(""); 3277 3278 // Now, compare the new count to zero. If it is zero skip the vector loop and 3279 // jump to the scalar loop. This check also covers the case where the 3280 // backedge-taken count is uint##_max: adding one to it will overflow leading 3281 // to an incorrect trip count of zero. In this (rare) case we will also jump 3282 // to the scalar loop. 3283 emitIterationCountCheck(LoopScalarPreHeader); 3284 3285 // Generate the code to check any assumptions that we've made for SCEV 3286 // expressions. 3287 emitSCEVChecks(LoopScalarPreHeader); 3288 3289 // Generate the code that checks in runtime if arrays overlap. We put the 3290 // checks into a separate block to make the more common case of few elements 3291 // faster. 3292 emitMemRuntimeChecks(LoopScalarPreHeader); 3293 3294 // Emit phis for the new starting index of the scalar loop. 3295 createInductionResumeValues(); 3296 3297 return {completeLoopSkeleton(OrigLoopID), nullptr}; 3298 } 3299 3300 // Fix up external users of the induction variable. At this point, we are 3301 // in LCSSA form, with all external PHIs that use the IV having one input value, 3302 // coming from the remainder loop. We need those PHIs to also have a correct 3303 // value for the IV when arriving directly from the middle block. 3304 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3305 const InductionDescriptor &II, 3306 Value *VectorTripCount, Value *EndValue, 3307 BasicBlock *MiddleBlock, 3308 BasicBlock *VectorHeader, VPlan &Plan) { 3309 // There are two kinds of external IV usages - those that use the value 3310 // computed in the last iteration (the PHI) and those that use the penultimate 3311 // value (the value that feeds into the phi from the loop latch). 3312 // We allow both, but they, obviously, have different values. 3313 3314 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3315 3316 DenseMap<Value *, Value *> MissingVals; 3317 3318 // An external user of the last iteration's value should see the value that 3319 // the remainder loop uses to initialize its own IV. 3320 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3321 for (User *U : PostInc->users()) { 3322 Instruction *UI = cast<Instruction>(U); 3323 if (!OrigLoop->contains(UI)) { 3324 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3325 MissingVals[UI] = EndValue; 3326 } 3327 } 3328 3329 // An external user of the penultimate value need to see EndValue - Step. 3330 // The simplest way to get this is to recompute it from the constituent SCEVs, 3331 // that is Start + (Step * (CRD - 1)). 3332 for (User *U : OrigPhi->users()) { 3333 auto *UI = cast<Instruction>(U); 3334 if (!OrigLoop->contains(UI)) { 3335 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3336 3337 IRBuilder<> B(MiddleBlock->getTerminator()); 3338 3339 // Fast-math-flags propagate from the original induction instruction. 3340 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3341 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3342 3343 Value *CountMinusOne = B.CreateSub( 3344 VectorTripCount, ConstantInt::get(VectorTripCount->getType(), 1)); 3345 Value *CMO = 3346 !II.getStep()->getType()->isIntegerTy() 3347 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3348 II.getStep()->getType()) 3349 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3350 CMO->setName("cast.cmo"); 3351 3352 Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(), 3353 VectorHeader->getTerminator()); 3354 Value *Escape = 3355 emitTransformedIndex(B, CMO, II.getStartValue(), Step, II); 3356 Escape->setName("ind.escape"); 3357 MissingVals[UI] = Escape; 3358 } 3359 } 3360 3361 for (auto &I : MissingVals) { 3362 PHINode *PHI = cast<PHINode>(I.first); 3363 // One corner case we have to handle is two IVs "chasing" each-other, 3364 // that is %IV2 = phi [...], [ %IV1, %latch ] 3365 // In this case, if IV1 has an external use, we need to avoid adding both 3366 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3367 // don't already have an incoming value for the middle block. 3368 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) { 3369 PHI->addIncoming(I.second, MiddleBlock); 3370 Plan.removeLiveOut(PHI); 3371 } 3372 } 3373 } 3374 3375 namespace { 3376 3377 struct CSEDenseMapInfo { 3378 static bool canHandle(const Instruction *I) { 3379 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3380 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3381 } 3382 3383 static inline Instruction *getEmptyKey() { 3384 return DenseMapInfo<Instruction *>::getEmptyKey(); 3385 } 3386 3387 static inline Instruction *getTombstoneKey() { 3388 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3389 } 3390 3391 static unsigned getHashValue(const Instruction *I) { 3392 assert(canHandle(I) && "Unknown instruction!"); 3393 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3394 I->value_op_end())); 3395 } 3396 3397 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3398 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3399 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3400 return LHS == RHS; 3401 return LHS->isIdenticalTo(RHS); 3402 } 3403 }; 3404 3405 } // end anonymous namespace 3406 3407 ///Perform cse of induction variable instructions. 3408 static void cse(BasicBlock *BB) { 3409 // Perform simple cse. 3410 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3411 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3412 if (!CSEDenseMapInfo::canHandle(&In)) 3413 continue; 3414 3415 // Check if we can replace this instruction with any of the 3416 // visited instructions. 3417 if (Instruction *V = CSEMap.lookup(&In)) { 3418 In.replaceAllUsesWith(V); 3419 In.eraseFromParent(); 3420 continue; 3421 } 3422 3423 CSEMap[&In] = &In; 3424 } 3425 } 3426 3427 InstructionCost 3428 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3429 bool &NeedToScalarize) const { 3430 Function *F = CI->getCalledFunction(); 3431 Type *ScalarRetTy = CI->getType(); 3432 SmallVector<Type *, 4> Tys, ScalarTys; 3433 for (auto &ArgOp : CI->args()) 3434 ScalarTys.push_back(ArgOp->getType()); 3435 3436 // Estimate cost of scalarized vector call. The source operands are assumed 3437 // to be vectors, so we need to extract individual elements from there, 3438 // execute VF scalar calls, and then gather the result into the vector return 3439 // value. 3440 InstructionCost ScalarCallCost = 3441 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3442 if (VF.isScalar()) 3443 return ScalarCallCost; 3444 3445 // Compute corresponding vector type for return value and arguments. 3446 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3447 for (Type *ScalarTy : ScalarTys) 3448 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3449 3450 // Compute costs of unpacking argument values for the scalar calls and 3451 // packing the return values to a vector. 3452 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3453 3454 InstructionCost Cost = 3455 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3456 3457 // If we can't emit a vector call for this function, then the currently found 3458 // cost is the cost we need to return. 3459 NeedToScalarize = true; 3460 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3461 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3462 3463 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3464 return Cost; 3465 3466 // If the corresponding vector cost is cheaper, return its cost. 3467 InstructionCost VectorCallCost = 3468 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3469 if (VectorCallCost < Cost) { 3470 NeedToScalarize = false; 3471 Cost = VectorCallCost; 3472 } 3473 return Cost; 3474 } 3475 3476 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3477 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3478 return Elt; 3479 return VectorType::get(Elt, VF); 3480 } 3481 3482 InstructionCost 3483 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3484 ElementCount VF) const { 3485 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3486 assert(ID && "Expected intrinsic call!"); 3487 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3488 FastMathFlags FMF; 3489 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3490 FMF = FPMO->getFastMathFlags(); 3491 3492 SmallVector<const Value *> Arguments(CI->args()); 3493 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3494 SmallVector<Type *> ParamTys; 3495 std::transform(FTy->param_begin(), FTy->param_end(), 3496 std::back_inserter(ParamTys), 3497 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3498 3499 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3500 dyn_cast<IntrinsicInst>(CI)); 3501 return TTI.getIntrinsicInstrCost(CostAttrs, 3502 TargetTransformInfo::TCK_RecipThroughput); 3503 } 3504 3505 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3506 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3507 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3508 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3509 } 3510 3511 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3512 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3513 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3514 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3515 } 3516 3517 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3518 // For every instruction `I` in MinBWs, truncate the operands, create a 3519 // truncated version of `I` and reextend its result. InstCombine runs 3520 // later and will remove any ext/trunc pairs. 3521 SmallPtrSet<Value *, 4> Erased; 3522 for (const auto &KV : Cost->getMinimalBitwidths()) { 3523 // If the value wasn't vectorized, we must maintain the original scalar 3524 // type. The absence of the value from State indicates that it 3525 // wasn't vectorized. 3526 // FIXME: Should not rely on getVPValue at this point. 3527 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3528 if (!State.hasAnyVectorValue(Def)) 3529 continue; 3530 for (unsigned Part = 0; Part < UF; ++Part) { 3531 Value *I = State.get(Def, Part); 3532 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3533 continue; 3534 Type *OriginalTy = I->getType(); 3535 Type *ScalarTruncatedTy = 3536 IntegerType::get(OriginalTy->getContext(), KV.second); 3537 auto *TruncatedTy = VectorType::get( 3538 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3539 if (TruncatedTy == OriginalTy) 3540 continue; 3541 3542 IRBuilder<> B(cast<Instruction>(I)); 3543 auto ShrinkOperand = [&](Value *V) -> Value * { 3544 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3545 if (ZI->getSrcTy() == TruncatedTy) 3546 return ZI->getOperand(0); 3547 return B.CreateZExtOrTrunc(V, TruncatedTy); 3548 }; 3549 3550 // The actual instruction modification depends on the instruction type, 3551 // unfortunately. 3552 Value *NewI = nullptr; 3553 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3554 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3555 ShrinkOperand(BO->getOperand(1))); 3556 3557 // Any wrapping introduced by shrinking this operation shouldn't be 3558 // considered undefined behavior. So, we can't unconditionally copy 3559 // arithmetic wrapping flags to NewI. 3560 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3561 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3562 NewI = 3563 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3564 ShrinkOperand(CI->getOperand(1))); 3565 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3566 NewI = B.CreateSelect(SI->getCondition(), 3567 ShrinkOperand(SI->getTrueValue()), 3568 ShrinkOperand(SI->getFalseValue())); 3569 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3570 switch (CI->getOpcode()) { 3571 default: 3572 llvm_unreachable("Unhandled cast!"); 3573 case Instruction::Trunc: 3574 NewI = ShrinkOperand(CI->getOperand(0)); 3575 break; 3576 case Instruction::SExt: 3577 NewI = B.CreateSExtOrTrunc( 3578 CI->getOperand(0), 3579 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3580 break; 3581 case Instruction::ZExt: 3582 NewI = B.CreateZExtOrTrunc( 3583 CI->getOperand(0), 3584 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3585 break; 3586 } 3587 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3588 auto Elements0 = 3589 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 3590 auto *O0 = B.CreateZExtOrTrunc( 3591 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3592 auto Elements1 = 3593 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 3594 auto *O1 = B.CreateZExtOrTrunc( 3595 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3596 3597 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3598 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3599 // Don't do anything with the operands, just extend the result. 3600 continue; 3601 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3602 auto Elements = 3603 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 3604 auto *O0 = B.CreateZExtOrTrunc( 3605 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3606 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3607 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3608 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3609 auto Elements = 3610 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 3611 auto *O0 = B.CreateZExtOrTrunc( 3612 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3613 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3614 } else { 3615 // If we don't know what to do, be conservative and don't do anything. 3616 continue; 3617 } 3618 3619 // Lastly, extend the result. 3620 NewI->takeName(cast<Instruction>(I)); 3621 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3622 I->replaceAllUsesWith(Res); 3623 cast<Instruction>(I)->eraseFromParent(); 3624 Erased.insert(I); 3625 State.reset(Def, Res, Part); 3626 } 3627 } 3628 3629 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3630 for (const auto &KV : Cost->getMinimalBitwidths()) { 3631 // If the value wasn't vectorized, we must maintain the original scalar 3632 // type. The absence of the value from State indicates that it 3633 // wasn't vectorized. 3634 // FIXME: Should not rely on getVPValue at this point. 3635 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3636 if (!State.hasAnyVectorValue(Def)) 3637 continue; 3638 for (unsigned Part = 0; Part < UF; ++Part) { 3639 Value *I = State.get(Def, Part); 3640 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3641 if (Inst && Inst->use_empty()) { 3642 Value *NewI = Inst->getOperand(0); 3643 Inst->eraseFromParent(); 3644 State.reset(Def, NewI, Part); 3645 } 3646 } 3647 } 3648 } 3649 3650 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State, 3651 VPlan &Plan) { 3652 // Insert truncates and extends for any truncated instructions as hints to 3653 // InstCombine. 3654 if (VF.isVector()) 3655 truncateToMinimalBitwidths(State); 3656 3657 // Fix widened non-induction PHIs by setting up the PHI operands. 3658 if (EnableVPlanNativePath) 3659 fixNonInductionPHIs(Plan, State); 3660 3661 // At this point every instruction in the original loop is widened to a 3662 // vector form. Now we need to fix the recurrences in the loop. These PHI 3663 // nodes are currently empty because we did not want to introduce cycles. 3664 // This is the second stage of vectorizing recurrences. 3665 fixCrossIterationPHIs(State); 3666 3667 // Forget the original basic block. 3668 PSE.getSE()->forgetLoop(OrigLoop); 3669 3670 VPBasicBlock *LatchVPBB = Plan.getVectorLoopRegion()->getExitingBasicBlock(); 3671 Loop *VectorLoop = LI->getLoopFor(State.CFG.VPBB2IRBB[LatchVPBB]); 3672 if (Cost->requiresScalarEpilogue(VF)) { 3673 // No edge from the middle block to the unique exit block has been inserted 3674 // and there is nothing to fix from vector loop; phis should have incoming 3675 // from scalar loop only. 3676 Plan.clearLiveOuts(); 3677 } else { 3678 // If we inserted an edge from the middle block to the unique exit block, 3679 // update uses outside the loop (phis) to account for the newly inserted 3680 // edge. 3681 3682 // Fix-up external users of the induction variables. 3683 for (auto &Entry : Legal->getInductionVars()) 3684 fixupIVUsers(Entry.first, Entry.second, 3685 getOrCreateVectorTripCount(VectorLoop->getLoopPreheader()), 3686 IVEndValues[Entry.first], LoopMiddleBlock, 3687 VectorLoop->getHeader(), Plan); 3688 } 3689 3690 // Fix LCSSA phis not already fixed earlier. Extracts may need to be generated 3691 // in the exit block, so update the builder. 3692 State.Builder.SetInsertPoint(State.CFG.ExitBB->getFirstNonPHI()); 3693 for (auto &KV : Plan.getLiveOuts()) 3694 KV.second->fixPhi(Plan, State); 3695 3696 for (Instruction *PI : PredicatedInstructions) 3697 sinkScalarOperands(&*PI); 3698 3699 // Remove redundant induction instructions. 3700 cse(VectorLoop->getHeader()); 3701 3702 // Set/update profile weights for the vector and remainder loops as original 3703 // loop iterations are now distributed among them. Note that original loop 3704 // represented by LoopScalarBody becomes remainder loop after vectorization. 3705 // 3706 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3707 // end up getting slightly roughened result but that should be OK since 3708 // profile is not inherently precise anyway. Note also possible bypass of 3709 // vector code caused by legality checks is ignored, assigning all the weight 3710 // to the vector loop, optimistically. 3711 // 3712 // For scalable vectorization we can't know at compile time how many iterations 3713 // of the loop are handled in one vector iteration, so instead assume a pessimistic 3714 // vscale of '1'. 3715 setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), VectorLoop, 3716 LI->getLoopFor(LoopScalarBody), 3717 VF.getKnownMinValue() * UF); 3718 } 3719 3720 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 3721 // In order to support recurrences we need to be able to vectorize Phi nodes. 3722 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3723 // stage #2: We now need to fix the recurrences by adding incoming edges to 3724 // the currently empty PHI nodes. At this point every instruction in the 3725 // original loop is widened to a vector form so we can use them to construct 3726 // the incoming edges. 3727 VPBasicBlock *Header = 3728 State.Plan->getVectorLoopRegion()->getEntryBasicBlock(); 3729 for (VPRecipeBase &R : Header->phis()) { 3730 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 3731 fixReduction(ReductionPhi, State); 3732 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 3733 fixFirstOrderRecurrence(FOR, State); 3734 } 3735 } 3736 3737 void InnerLoopVectorizer::fixFirstOrderRecurrence( 3738 VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) { 3739 // This is the second phase of vectorizing first-order recurrences. An 3740 // overview of the transformation is described below. Suppose we have the 3741 // following loop. 3742 // 3743 // for (int i = 0; i < n; ++i) 3744 // b[i] = a[i] - a[i - 1]; 3745 // 3746 // There is a first-order recurrence on "a". For this loop, the shorthand 3747 // scalar IR looks like: 3748 // 3749 // scalar.ph: 3750 // s_init = a[-1] 3751 // br scalar.body 3752 // 3753 // scalar.body: 3754 // i = phi [0, scalar.ph], [i+1, scalar.body] 3755 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3756 // s2 = a[i] 3757 // b[i] = s2 - s1 3758 // br cond, scalar.body, ... 3759 // 3760 // In this example, s1 is a recurrence because it's value depends on the 3761 // previous iteration. In the first phase of vectorization, we created a 3762 // vector phi v1 for s1. We now complete the vectorization and produce the 3763 // shorthand vector IR shown below (for VF = 4, UF = 1). 3764 // 3765 // vector.ph: 3766 // v_init = vector(..., ..., ..., a[-1]) 3767 // br vector.body 3768 // 3769 // vector.body 3770 // i = phi [0, vector.ph], [i+4, vector.body] 3771 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3772 // v2 = a[i, i+1, i+2, i+3]; 3773 // v3 = vector(v1(3), v2(0, 1, 2)) 3774 // b[i, i+1, i+2, i+3] = v2 - v3 3775 // br cond, vector.body, middle.block 3776 // 3777 // middle.block: 3778 // x = v2(3) 3779 // br scalar.ph 3780 // 3781 // scalar.ph: 3782 // s_init = phi [x, middle.block], [a[-1], otherwise] 3783 // br scalar.body 3784 // 3785 // After execution completes the vector loop, we extract the next value of 3786 // the recurrence (x) to use as the initial value in the scalar loop. 3787 3788 // Extract the last vector element in the middle block. This will be the 3789 // initial value for the recurrence when jumping to the scalar loop. 3790 VPValue *PreviousDef = PhiR->getBackedgeValue(); 3791 Value *Incoming = State.get(PreviousDef, UF - 1); 3792 auto *ExtractForScalar = Incoming; 3793 auto *IdxTy = Builder.getInt32Ty(); 3794 if (VF.isVector()) { 3795 auto *One = ConstantInt::get(IdxTy, 1); 3796 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3797 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3798 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 3799 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 3800 "vector.recur.extract"); 3801 } 3802 // Extract the second last element in the middle block if the 3803 // Phi is used outside the loop. We need to extract the phi itself 3804 // and not the last element (the phi update in the current iteration). This 3805 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3806 // when the scalar loop is not run at all. 3807 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3808 if (VF.isVector()) { 3809 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3810 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 3811 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3812 Incoming, Idx, "vector.recur.extract.for.phi"); 3813 } else if (UF > 1) 3814 // When loop is unrolled without vectorizing, initialize 3815 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 3816 // of `Incoming`. This is analogous to the vectorized case above: extracting 3817 // the second last element when VF > 1. 3818 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 3819 3820 // Fix the initial value of the original recurrence in the scalar loop. 3821 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3822 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 3823 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3824 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 3825 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3826 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3827 Start->addIncoming(Incoming, BB); 3828 } 3829 3830 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 3831 Phi->setName("scalar.recur"); 3832 3833 // Finally, fix users of the recurrence outside the loop. The users will need 3834 // either the last value of the scalar recurrence or the last value of the 3835 // vector recurrence we extracted in the middle block. Since the loop is in 3836 // LCSSA form, we just need to find all the phi nodes for the original scalar 3837 // recurrence in the exit block, and then add an edge for the middle block. 3838 // Note that LCSSA does not imply single entry when the original scalar loop 3839 // had multiple exiting edges (as we always run the last iteration in the 3840 // scalar epilogue); in that case, there is no edge from middle to exit and 3841 // and thus no phis which needed updated. 3842 if (!Cost->requiresScalarEpilogue(VF)) 3843 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 3844 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) { 3845 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3846 State.Plan->removeLiveOut(&LCSSAPhi); 3847 } 3848 } 3849 3850 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 3851 VPTransformState &State) { 3852 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 3853 // Get it's reduction variable descriptor. 3854 assert(Legal->isReductionVariable(OrigPhi) && 3855 "Unable to find the reduction variable"); 3856 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 3857 3858 RecurKind RK = RdxDesc.getRecurrenceKind(); 3859 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3860 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3861 State.setDebugLocFromInst(ReductionStartValue); 3862 3863 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 3864 // This is the vector-clone of the value that leaves the loop. 3865 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 3866 3867 // Wrap flags are in general invalid after vectorization, clear them. 3868 clearReductionWrapFlags(PhiR, State); 3869 3870 // Before each round, move the insertion point right between 3871 // the PHIs and the values we are going to write. 3872 // This allows us to write both PHINodes and the extractelement 3873 // instructions. 3874 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3875 3876 State.setDebugLocFromInst(LoopExitInst); 3877 3878 Type *PhiTy = OrigPhi->getType(); 3879 3880 VPBasicBlock *LatchVPBB = 3881 PhiR->getParent()->getEnclosingLoopRegion()->getExitingBasicBlock(); 3882 BasicBlock *VectorLoopLatch = State.CFG.VPBB2IRBB[LatchVPBB]; 3883 // If tail is folded by masking, the vector value to leave the loop should be 3884 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 3885 // instead of the former. For an inloop reduction the reduction will already 3886 // be predicated, and does not need to be handled here. 3887 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 3888 for (unsigned Part = 0; Part < UF; ++Part) { 3889 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 3890 SelectInst *Sel = nullptr; 3891 for (User *U : VecLoopExitInst->users()) { 3892 if (isa<SelectInst>(U)) { 3893 assert(!Sel && "Reduction exit feeding two selects"); 3894 Sel = cast<SelectInst>(U); 3895 } else 3896 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 3897 } 3898 assert(Sel && "Reduction exit feeds no select"); 3899 State.reset(LoopExitInstDef, Sel, Part); 3900 3901 if (isa<FPMathOperator>(Sel)) 3902 Sel->setFastMathFlags(RdxDesc.getFastMathFlags()); 3903 3904 // If the target can create a predicated operator for the reduction at no 3905 // extra cost in the loop (for example a predicated vadd), it can be 3906 // cheaper for the select to remain in the loop than be sunk out of it, 3907 // and so use the select value for the phi instead of the old 3908 // LoopExitValue. 3909 if (PreferPredicatedReductionSelect || 3910 TTI->preferPredicatedReductionSelect( 3911 RdxDesc.getOpcode(), PhiTy, 3912 TargetTransformInfo::ReductionFlags())) { 3913 auto *VecRdxPhi = 3914 cast<PHINode>(State.get(PhiR, Part)); 3915 VecRdxPhi->setIncomingValueForBlock(VectorLoopLatch, Sel); 3916 } 3917 } 3918 } 3919 3920 // If the vector reduction can be performed in a smaller type, we truncate 3921 // then extend the loop exit value to enable InstCombine to evaluate the 3922 // entire expression in the smaller type. 3923 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 3924 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 3925 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3926 Builder.SetInsertPoint(VectorLoopLatch->getTerminator()); 3927 VectorParts RdxParts(UF); 3928 for (unsigned Part = 0; Part < UF; ++Part) { 3929 RdxParts[Part] = State.get(LoopExitInstDef, Part); 3930 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3931 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3932 : Builder.CreateZExt(Trunc, VecTy); 3933 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 3934 if (U != Trunc) { 3935 U->replaceUsesOfWith(RdxParts[Part], Extnd); 3936 RdxParts[Part] = Extnd; 3937 } 3938 } 3939 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3940 for (unsigned Part = 0; Part < UF; ++Part) { 3941 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3942 State.reset(LoopExitInstDef, RdxParts[Part], Part); 3943 } 3944 } 3945 3946 // Reduce all of the unrolled parts into a single vector. 3947 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 3948 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 3949 3950 // The middle block terminator has already been assigned a DebugLoc here (the 3951 // OrigLoop's single latch terminator). We want the whole middle block to 3952 // appear to execute on this line because: (a) it is all compiler generated, 3953 // (b) these instructions are always executed after evaluating the latch 3954 // conditional branch, and (c) other passes may add new predecessors which 3955 // terminate on this line. This is the easiest way to ensure we don't 3956 // accidentally cause an extra step back into the loop while debugging. 3957 State.setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 3958 if (PhiR->isOrdered()) 3959 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 3960 else { 3961 // Floating-point operations should have some FMF to enable the reduction. 3962 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 3963 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 3964 for (unsigned Part = 1; Part < UF; ++Part) { 3965 Value *RdxPart = State.get(LoopExitInstDef, Part); 3966 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 3967 ReducedPartRdx = Builder.CreateBinOp( 3968 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 3969 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 3970 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 3971 ReducedPartRdx, RdxPart); 3972 else 3973 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 3974 } 3975 } 3976 3977 // Create the reduction after the loop. Note that inloop reductions create the 3978 // target reduction in the loop using a Reduction recipe. 3979 if (VF.isVector() && !PhiR->isInLoop()) { 3980 ReducedPartRdx = 3981 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 3982 // If the reduction can be performed in a smaller type, we need to extend 3983 // the reduction to the wider type before we branch to the original loop. 3984 if (PhiTy != RdxDesc.getRecurrenceType()) 3985 ReducedPartRdx = RdxDesc.isSigned() 3986 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 3987 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 3988 } 3989 3990 PHINode *ResumePhi = 3991 dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue()); 3992 3993 // Create a phi node that merges control-flow from the backedge-taken check 3994 // block and the middle block. 3995 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 3996 LoopScalarPreHeader->getTerminator()); 3997 3998 // If we are fixing reductions in the epilogue loop then we should already 3999 // have created a bc.merge.rdx Phi after the main vector body. Ensure that 4000 // we carry over the incoming values correctly. 4001 for (auto *Incoming : predecessors(LoopScalarPreHeader)) { 4002 if (Incoming == LoopMiddleBlock) 4003 BCBlockPhi->addIncoming(ReducedPartRdx, Incoming); 4004 else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming)) 4005 BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming), 4006 Incoming); 4007 else 4008 BCBlockPhi->addIncoming(ReductionStartValue, Incoming); 4009 } 4010 4011 // Set the resume value for this reduction 4012 ReductionResumeValues.insert({&RdxDesc, BCBlockPhi}); 4013 4014 // If there were stores of the reduction value to a uniform memory address 4015 // inside the loop, create the final store here. 4016 if (StoreInst *SI = RdxDesc.IntermediateStore) { 4017 StoreInst *NewSI = 4018 Builder.CreateStore(ReducedPartRdx, SI->getPointerOperand()); 4019 propagateMetadata(NewSI, SI); 4020 4021 // If the reduction value is used in other places, 4022 // then let the code below create PHI's for that. 4023 } 4024 4025 // Now, we need to fix the users of the reduction variable 4026 // inside and outside of the scalar remainder loop. 4027 4028 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4029 // in the exit blocks. See comment on analogous loop in 4030 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4031 if (!Cost->requiresScalarEpilogue(VF)) 4032 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4033 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) { 4034 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4035 State.Plan->removeLiveOut(&LCSSAPhi); 4036 } 4037 4038 // Fix the scalar loop reduction variable with the incoming reduction sum 4039 // from the vector body and from the backedge value. 4040 int IncomingEdgeBlockIdx = 4041 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4042 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4043 // Pick the other block. 4044 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4045 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4046 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4047 } 4048 4049 void InnerLoopVectorizer::clearReductionWrapFlags(VPReductionPHIRecipe *PhiR, 4050 VPTransformState &State) { 4051 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 4052 RecurKind RK = RdxDesc.getRecurrenceKind(); 4053 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4054 return; 4055 4056 SmallVector<VPValue *, 8> Worklist; 4057 SmallPtrSet<VPValue *, 8> Visited; 4058 Worklist.push_back(PhiR); 4059 Visited.insert(PhiR); 4060 4061 while (!Worklist.empty()) { 4062 VPValue *Cur = Worklist.pop_back_val(); 4063 for (unsigned Part = 0; Part < UF; ++Part) { 4064 Value *V = State.get(Cur, Part); 4065 if (!isa<OverflowingBinaryOperator>(V)) 4066 break; 4067 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4068 } 4069 4070 for (VPUser *U : Cur->users()) { 4071 auto *UserRecipe = dyn_cast<VPRecipeBase>(U); 4072 if (!UserRecipe) 4073 continue; 4074 for (VPValue *V : UserRecipe->definedValues()) 4075 if (Visited.insert(V).second) 4076 Worklist.push_back(V); 4077 } 4078 } 4079 } 4080 4081 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4082 // The basic block and loop containing the predicated instruction. 4083 auto *PredBB = PredInst->getParent(); 4084 auto *VectorLoop = LI->getLoopFor(PredBB); 4085 4086 // Initialize a worklist with the operands of the predicated instruction. 4087 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4088 4089 // Holds instructions that we need to analyze again. An instruction may be 4090 // reanalyzed if we don't yet know if we can sink it or not. 4091 SmallVector<Instruction *, 8> InstsToReanalyze; 4092 4093 // Returns true if a given use occurs in the predicated block. Phi nodes use 4094 // their operands in their corresponding predecessor blocks. 4095 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4096 auto *I = cast<Instruction>(U.getUser()); 4097 BasicBlock *BB = I->getParent(); 4098 if (auto *Phi = dyn_cast<PHINode>(I)) 4099 BB = Phi->getIncomingBlock( 4100 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4101 return BB == PredBB; 4102 }; 4103 4104 // Iteratively sink the scalarized operands of the predicated instruction 4105 // into the block we created for it. When an instruction is sunk, it's 4106 // operands are then added to the worklist. The algorithm ends after one pass 4107 // through the worklist doesn't sink a single instruction. 4108 bool Changed; 4109 do { 4110 // Add the instructions that need to be reanalyzed to the worklist, and 4111 // reset the changed indicator. 4112 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4113 InstsToReanalyze.clear(); 4114 Changed = false; 4115 4116 while (!Worklist.empty()) { 4117 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4118 4119 // We can't sink an instruction if it is a phi node, is not in the loop, 4120 // or may have side effects. 4121 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4122 I->mayHaveSideEffects()) 4123 continue; 4124 4125 // If the instruction is already in PredBB, check if we can sink its 4126 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4127 // sinking the scalar instruction I, hence it appears in PredBB; but it 4128 // may have failed to sink I's operands (recursively), which we try 4129 // (again) here. 4130 if (I->getParent() == PredBB) { 4131 Worklist.insert(I->op_begin(), I->op_end()); 4132 continue; 4133 } 4134 4135 // It's legal to sink the instruction if all its uses occur in the 4136 // predicated block. Otherwise, there's nothing to do yet, and we may 4137 // need to reanalyze the instruction. 4138 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4139 InstsToReanalyze.push_back(I); 4140 continue; 4141 } 4142 4143 // Move the instruction to the beginning of the predicated block, and add 4144 // it's operands to the worklist. 4145 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4146 Worklist.insert(I->op_begin(), I->op_end()); 4147 4148 // The sinking may have enabled other instructions to be sunk, so we will 4149 // need to iterate. 4150 Changed = true; 4151 } 4152 } while (Changed); 4153 } 4154 4155 void InnerLoopVectorizer::fixNonInductionPHIs(VPlan &Plan, 4156 VPTransformState &State) { 4157 auto Iter = depth_first( 4158 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(Plan.getEntry())); 4159 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 4160 for (VPRecipeBase &P : VPBB->phis()) { 4161 VPWidenPHIRecipe *VPPhi = dyn_cast<VPWidenPHIRecipe>(&P); 4162 if (!VPPhi) 4163 continue; 4164 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4165 // Make sure the builder has a valid insert point. 4166 Builder.SetInsertPoint(NewPhi); 4167 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4168 VPValue *Inc = VPPhi->getIncomingValue(i); 4169 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4170 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4171 } 4172 } 4173 } 4174 } 4175 4176 bool InnerLoopVectorizer::useOrderedReductions( 4177 const RecurrenceDescriptor &RdxDesc) { 4178 return Cost->useOrderedReductions(RdxDesc); 4179 } 4180 4181 void InnerLoopVectorizer::widenCallInstruction(CallInst &CI, VPValue *Def, 4182 VPUser &ArgOperands, 4183 VPTransformState &State) { 4184 assert(!isa<DbgInfoIntrinsic>(CI) && 4185 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4186 State.setDebugLocFromInst(&CI); 4187 4188 SmallVector<Type *, 4> Tys; 4189 for (Value *ArgOperand : CI.args()) 4190 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4191 4192 Intrinsic::ID ID = getVectorIntrinsicIDForCall(&CI, TLI); 4193 4194 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4195 // version of the instruction. 4196 // Is it beneficial to perform intrinsic call compared to lib call? 4197 bool NeedToScalarize = false; 4198 InstructionCost CallCost = Cost->getVectorCallCost(&CI, VF, NeedToScalarize); 4199 InstructionCost IntrinsicCost = 4200 ID ? Cost->getVectorIntrinsicCost(&CI, VF) : 0; 4201 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4202 assert((UseVectorIntrinsic || !NeedToScalarize) && 4203 "Instruction should be scalarized elsewhere."); 4204 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4205 "Either the intrinsic cost or vector call cost must be valid"); 4206 4207 for (unsigned Part = 0; Part < UF; ++Part) { 4208 SmallVector<Type *, 2> TysForDecl = {CI.getType()}; 4209 SmallVector<Value *, 4> Args; 4210 for (auto &I : enumerate(ArgOperands.operands())) { 4211 // Some intrinsics have a scalar argument - don't replace it with a 4212 // vector. 4213 Value *Arg; 4214 if (!UseVectorIntrinsic || 4215 !isVectorIntrinsicWithScalarOpAtArg(ID, I.index())) 4216 Arg = State.get(I.value(), Part); 4217 else 4218 Arg = State.get(I.value(), VPIteration(0, 0)); 4219 if (isVectorIntrinsicWithOverloadTypeAtArg(ID, I.index())) 4220 TysForDecl.push_back(Arg->getType()); 4221 Args.push_back(Arg); 4222 } 4223 4224 Function *VectorF; 4225 if (UseVectorIntrinsic) { 4226 // Use vector version of the intrinsic. 4227 if (VF.isVector()) 4228 TysForDecl[0] = VectorType::get(CI.getType()->getScalarType(), VF); 4229 Module *M = State.Builder.GetInsertBlock()->getModule(); 4230 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4231 assert(VectorF && "Can't retrieve vector intrinsic."); 4232 } else { 4233 // Use vector version of the function call. 4234 const VFShape Shape = VFShape::get(CI, VF, false /*HasGlobalPred*/); 4235 #ifndef NDEBUG 4236 assert(VFDatabase(CI).getVectorizedFunction(Shape) != nullptr && 4237 "Can't create vector function."); 4238 #endif 4239 VectorF = VFDatabase(CI).getVectorizedFunction(Shape); 4240 } 4241 SmallVector<OperandBundleDef, 1> OpBundles; 4242 CI.getOperandBundlesAsDefs(OpBundles); 4243 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4244 4245 if (isa<FPMathOperator>(V)) 4246 V->copyFastMathFlags(&CI); 4247 4248 State.set(Def, V, Part); 4249 State.addMetadata(V, &CI); 4250 } 4251 } 4252 4253 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4254 // We should not collect Scalars more than once per VF. Right now, this 4255 // function is called from collectUniformsAndScalars(), which already does 4256 // this check. Collecting Scalars for VF=1 does not make any sense. 4257 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4258 "This function should not be visited twice for the same VF"); 4259 4260 // This avoids any chances of creating a REPLICATE recipe during planning 4261 // since that would result in generation of scalarized code during execution, 4262 // which is not supported for scalable vectors. 4263 if (VF.isScalable()) { 4264 Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4265 return; 4266 } 4267 4268 SmallSetVector<Instruction *, 8> Worklist; 4269 4270 // These sets are used to seed the analysis with pointers used by memory 4271 // accesses that will remain scalar. 4272 SmallSetVector<Instruction *, 8> ScalarPtrs; 4273 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4274 auto *Latch = TheLoop->getLoopLatch(); 4275 4276 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4277 // The pointer operands of loads and stores will be scalar as long as the 4278 // memory access is not a gather or scatter operation. The value operand of a 4279 // store will remain scalar if the store is scalarized. 4280 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4281 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4282 assert(WideningDecision != CM_Unknown && 4283 "Widening decision should be ready at this moment"); 4284 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4285 if (Ptr == Store->getValueOperand()) 4286 return WideningDecision == CM_Scalarize; 4287 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4288 "Ptr is neither a value or pointer operand"); 4289 return WideningDecision != CM_GatherScatter; 4290 }; 4291 4292 // A helper that returns true if the given value is a bitcast or 4293 // getelementptr instruction contained in the loop. 4294 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4295 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4296 isa<GetElementPtrInst>(V)) && 4297 !TheLoop->isLoopInvariant(V); 4298 }; 4299 4300 // A helper that evaluates a memory access's use of a pointer. If the use will 4301 // be a scalar use and the pointer is only used by memory accesses, we place 4302 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4303 // PossibleNonScalarPtrs. 4304 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4305 // We only care about bitcast and getelementptr instructions contained in 4306 // the loop. 4307 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4308 return; 4309 4310 // If the pointer has already been identified as scalar (e.g., if it was 4311 // also identified as uniform), there's nothing to do. 4312 auto *I = cast<Instruction>(Ptr); 4313 if (Worklist.count(I)) 4314 return; 4315 4316 // If the use of the pointer will be a scalar use, and all users of the 4317 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4318 // place the pointer in PossibleNonScalarPtrs. 4319 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4320 return isa<LoadInst>(U) || isa<StoreInst>(U); 4321 })) 4322 ScalarPtrs.insert(I); 4323 else 4324 PossibleNonScalarPtrs.insert(I); 4325 }; 4326 4327 // We seed the scalars analysis with three classes of instructions: (1) 4328 // instructions marked uniform-after-vectorization and (2) bitcast, 4329 // getelementptr and (pointer) phi instructions used by memory accesses 4330 // requiring a scalar use. 4331 // 4332 // (1) Add to the worklist all instructions that have been identified as 4333 // uniform-after-vectorization. 4334 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4335 4336 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4337 // memory accesses requiring a scalar use. The pointer operands of loads and 4338 // stores will be scalar as long as the memory accesses is not a gather or 4339 // scatter operation. The value operand of a store will remain scalar if the 4340 // store is scalarized. 4341 for (auto *BB : TheLoop->blocks()) 4342 for (auto &I : *BB) { 4343 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4344 evaluatePtrUse(Load, Load->getPointerOperand()); 4345 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4346 evaluatePtrUse(Store, Store->getPointerOperand()); 4347 evaluatePtrUse(Store, Store->getValueOperand()); 4348 } 4349 } 4350 for (auto *I : ScalarPtrs) 4351 if (!PossibleNonScalarPtrs.count(I)) { 4352 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4353 Worklist.insert(I); 4354 } 4355 4356 // Insert the forced scalars. 4357 // FIXME: Currently VPWidenPHIRecipe() often creates a dead vector 4358 // induction variable when the PHI user is scalarized. 4359 auto ForcedScalar = ForcedScalars.find(VF); 4360 if (ForcedScalar != ForcedScalars.end()) 4361 for (auto *I : ForcedScalar->second) 4362 Worklist.insert(I); 4363 4364 // Expand the worklist by looking through any bitcasts and getelementptr 4365 // instructions we've already identified as scalar. This is similar to the 4366 // expansion step in collectLoopUniforms(); however, here we're only 4367 // expanding to include additional bitcasts and getelementptr instructions. 4368 unsigned Idx = 0; 4369 while (Idx != Worklist.size()) { 4370 Instruction *Dst = Worklist[Idx++]; 4371 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4372 continue; 4373 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4374 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4375 auto *J = cast<Instruction>(U); 4376 return !TheLoop->contains(J) || Worklist.count(J) || 4377 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4378 isScalarUse(J, Src)); 4379 })) { 4380 Worklist.insert(Src); 4381 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4382 } 4383 } 4384 4385 // An induction variable will remain scalar if all users of the induction 4386 // variable and induction variable update remain scalar. 4387 for (auto &Induction : Legal->getInductionVars()) { 4388 auto *Ind = Induction.first; 4389 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4390 4391 // If tail-folding is applied, the primary induction variable will be used 4392 // to feed a vector compare. 4393 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 4394 continue; 4395 4396 // Returns true if \p Indvar is a pointer induction that is used directly by 4397 // load/store instruction \p I. 4398 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar, 4399 Instruction *I) { 4400 return Induction.second.getKind() == 4401 InductionDescriptor::IK_PtrInduction && 4402 (isa<LoadInst>(I) || isa<StoreInst>(I)) && 4403 Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar); 4404 }; 4405 4406 // Determine if all users of the induction variable are scalar after 4407 // vectorization. 4408 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4409 auto *I = cast<Instruction>(U); 4410 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4411 IsDirectLoadStoreFromPtrIndvar(Ind, I); 4412 }); 4413 if (!ScalarInd) 4414 continue; 4415 4416 // Determine if all users of the induction variable update instruction are 4417 // scalar after vectorization. 4418 auto ScalarIndUpdate = 4419 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4420 auto *I = cast<Instruction>(U); 4421 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4422 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I); 4423 }); 4424 if (!ScalarIndUpdate) 4425 continue; 4426 4427 // The induction variable and its update instruction will remain scalar. 4428 Worklist.insert(Ind); 4429 Worklist.insert(IndUpdate); 4430 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4431 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4432 << "\n"); 4433 } 4434 4435 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4436 } 4437 4438 bool LoopVectorizationCostModel::isScalarWithPredication( 4439 Instruction *I, ElementCount VF) const { 4440 if (!blockNeedsPredicationForAnyReason(I->getParent())) 4441 return false; 4442 switch(I->getOpcode()) { 4443 default: 4444 break; 4445 case Instruction::Load: 4446 case Instruction::Store: { 4447 if (!Legal->isMaskRequired(I)) 4448 return false; 4449 auto *Ptr = getLoadStorePointerOperand(I); 4450 auto *Ty = getLoadStoreType(I); 4451 Type *VTy = Ty; 4452 if (VF.isVector()) 4453 VTy = VectorType::get(Ty, VF); 4454 const Align Alignment = getLoadStoreAlignment(I); 4455 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4456 TTI.isLegalMaskedGather(VTy, Alignment)) 4457 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4458 TTI.isLegalMaskedScatter(VTy, Alignment)); 4459 } 4460 case Instruction::UDiv: 4461 case Instruction::SDiv: 4462 case Instruction::SRem: 4463 case Instruction::URem: 4464 // TODO: We can use the loop-preheader as context point here and get 4465 // context sensitive reasoning 4466 return !isSafeToSpeculativelyExecute(I); 4467 } 4468 return false; 4469 } 4470 4471 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 4472 Instruction *I, ElementCount VF) { 4473 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4474 assert(getWideningDecision(I, VF) == CM_Unknown && 4475 "Decision should not be set yet."); 4476 auto *Group = getInterleavedAccessGroup(I); 4477 assert(Group && "Must have a group."); 4478 4479 // If the instruction's allocated size doesn't equal it's type size, it 4480 // requires padding and will be scalarized. 4481 auto &DL = I->getModule()->getDataLayout(); 4482 auto *ScalarTy = getLoadStoreType(I); 4483 if (hasIrregularType(ScalarTy, DL)) 4484 return false; 4485 4486 // If the group involves a non-integral pointer, we may not be able to 4487 // losslessly cast all values to a common type. 4488 unsigned InterleaveFactor = Group->getFactor(); 4489 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy); 4490 for (unsigned i = 0; i < InterleaveFactor; i++) { 4491 Instruction *Member = Group->getMember(i); 4492 if (!Member) 4493 continue; 4494 auto *MemberTy = getLoadStoreType(Member); 4495 bool MemberNI = DL.isNonIntegralPointerType(MemberTy); 4496 // Don't coerce non-integral pointers to integers or vice versa. 4497 if (MemberNI != ScalarNI) { 4498 // TODO: Consider adding special nullptr value case here 4499 return false; 4500 } else if (MemberNI && ScalarNI && 4501 ScalarTy->getPointerAddressSpace() != 4502 MemberTy->getPointerAddressSpace()) { 4503 return false; 4504 } 4505 } 4506 4507 // Check if masking is required. 4508 // A Group may need masking for one of two reasons: it resides in a block that 4509 // needs predication, or it was decided to use masking to deal with gaps 4510 // (either a gap at the end of a load-access that may result in a speculative 4511 // load, or any gaps in a store-access). 4512 bool PredicatedAccessRequiresMasking = 4513 blockNeedsPredicationForAnyReason(I->getParent()) && 4514 Legal->isMaskRequired(I); 4515 bool LoadAccessWithGapsRequiresEpilogMasking = 4516 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 4517 !isScalarEpilogueAllowed(); 4518 bool StoreAccessWithGapsRequiresMasking = 4519 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 4520 if (!PredicatedAccessRequiresMasking && 4521 !LoadAccessWithGapsRequiresEpilogMasking && 4522 !StoreAccessWithGapsRequiresMasking) 4523 return true; 4524 4525 // If masked interleaving is required, we expect that the user/target had 4526 // enabled it, because otherwise it either wouldn't have been created or 4527 // it should have been invalidated by the CostModel. 4528 assert(useMaskedInterleavedAccesses(TTI) && 4529 "Masked interleave-groups for predicated accesses are not enabled."); 4530 4531 if (Group->isReverse()) 4532 return false; 4533 4534 auto *Ty = getLoadStoreType(I); 4535 const Align Alignment = getLoadStoreAlignment(I); 4536 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 4537 : TTI.isLegalMaskedStore(Ty, Alignment); 4538 } 4539 4540 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 4541 Instruction *I, ElementCount VF) { 4542 // Get and ensure we have a valid memory instruction. 4543 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 4544 4545 auto *Ptr = getLoadStorePointerOperand(I); 4546 auto *ScalarTy = getLoadStoreType(I); 4547 4548 // In order to be widened, the pointer should be consecutive, first of all. 4549 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 4550 return false; 4551 4552 // If the instruction is a store located in a predicated block, it will be 4553 // scalarized. 4554 if (isScalarWithPredication(I, VF)) 4555 return false; 4556 4557 // If the instruction's allocated size doesn't equal it's type size, it 4558 // requires padding and will be scalarized. 4559 auto &DL = I->getModule()->getDataLayout(); 4560 if (hasIrregularType(ScalarTy, DL)) 4561 return false; 4562 4563 return true; 4564 } 4565 4566 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 4567 // We should not collect Uniforms more than once per VF. Right now, 4568 // this function is called from collectUniformsAndScalars(), which 4569 // already does this check. Collecting Uniforms for VF=1 does not make any 4570 // sense. 4571 4572 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 4573 "This function should not be visited twice for the same VF"); 4574 4575 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4576 // not analyze again. Uniforms.count(VF) will return 1. 4577 Uniforms[VF].clear(); 4578 4579 // We now know that the loop is vectorizable! 4580 // Collect instructions inside the loop that will remain uniform after 4581 // vectorization. 4582 4583 // Global values, params and instructions outside of current loop are out of 4584 // scope. 4585 auto isOutOfScope = [&](Value *V) -> bool { 4586 Instruction *I = dyn_cast<Instruction>(V); 4587 return (!I || !TheLoop->contains(I)); 4588 }; 4589 4590 // Worklist containing uniform instructions demanding lane 0. 4591 SetVector<Instruction *> Worklist; 4592 BasicBlock *Latch = TheLoop->getLoopLatch(); 4593 4594 // Add uniform instructions demanding lane 0 to the worklist. Instructions 4595 // that are scalar with predication must not be considered uniform after 4596 // vectorization, because that would create an erroneous replicating region 4597 // where only a single instance out of VF should be formed. 4598 // TODO: optimize such seldom cases if found important, see PR40816. 4599 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 4600 if (isOutOfScope(I)) { 4601 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 4602 << *I << "\n"); 4603 return; 4604 } 4605 if (isScalarWithPredication(I, VF)) { 4606 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 4607 << *I << "\n"); 4608 return; 4609 } 4610 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 4611 Worklist.insert(I); 4612 }; 4613 4614 // Start with the conditional branch. If the branch condition is an 4615 // instruction contained in the loop that is only used by the branch, it is 4616 // uniform. 4617 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4618 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 4619 addToWorklistIfAllowed(Cmp); 4620 4621 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 4622 InstWidening WideningDecision = getWideningDecision(I, VF); 4623 assert(WideningDecision != CM_Unknown && 4624 "Widening decision should be ready at this moment"); 4625 4626 // A uniform memory op is itself uniform. We exclude uniform stores 4627 // here as they demand the last lane, not the first one. 4628 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 4629 assert(WideningDecision == CM_Scalarize); 4630 return true; 4631 } 4632 4633 return (WideningDecision == CM_Widen || 4634 WideningDecision == CM_Widen_Reverse || 4635 WideningDecision == CM_Interleave); 4636 }; 4637 4638 4639 // Returns true if Ptr is the pointer operand of a memory access instruction 4640 // I, and I is known to not require scalarization. 4641 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4642 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4643 }; 4644 4645 // Holds a list of values which are known to have at least one uniform use. 4646 // Note that there may be other uses which aren't uniform. A "uniform use" 4647 // here is something which only demands lane 0 of the unrolled iterations; 4648 // it does not imply that all lanes produce the same value (e.g. this is not 4649 // the usual meaning of uniform) 4650 SetVector<Value *> HasUniformUse; 4651 4652 // Scan the loop for instructions which are either a) known to have only 4653 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 4654 for (auto *BB : TheLoop->blocks()) 4655 for (auto &I : *BB) { 4656 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 4657 switch (II->getIntrinsicID()) { 4658 case Intrinsic::sideeffect: 4659 case Intrinsic::experimental_noalias_scope_decl: 4660 case Intrinsic::assume: 4661 case Intrinsic::lifetime_start: 4662 case Intrinsic::lifetime_end: 4663 if (TheLoop->hasLoopInvariantOperands(&I)) 4664 addToWorklistIfAllowed(&I); 4665 break; 4666 default: 4667 break; 4668 } 4669 } 4670 4671 // ExtractValue instructions must be uniform, because the operands are 4672 // known to be loop-invariant. 4673 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 4674 assert(isOutOfScope(EVI->getAggregateOperand()) && 4675 "Expected aggregate value to be loop invariant"); 4676 addToWorklistIfAllowed(EVI); 4677 continue; 4678 } 4679 4680 // If there's no pointer operand, there's nothing to do. 4681 auto *Ptr = getLoadStorePointerOperand(&I); 4682 if (!Ptr) 4683 continue; 4684 4685 // A uniform memory op is itself uniform. We exclude uniform stores 4686 // here as they demand the last lane, not the first one. 4687 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 4688 addToWorklistIfAllowed(&I); 4689 4690 if (isUniformDecision(&I, VF)) { 4691 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 4692 HasUniformUse.insert(Ptr); 4693 } 4694 } 4695 4696 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 4697 // demanding) users. Since loops are assumed to be in LCSSA form, this 4698 // disallows uses outside the loop as well. 4699 for (auto *V : HasUniformUse) { 4700 if (isOutOfScope(V)) 4701 continue; 4702 auto *I = cast<Instruction>(V); 4703 auto UsersAreMemAccesses = 4704 llvm::all_of(I->users(), [&](User *U) -> bool { 4705 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 4706 }); 4707 if (UsersAreMemAccesses) 4708 addToWorklistIfAllowed(I); 4709 } 4710 4711 // Expand Worklist in topological order: whenever a new instruction 4712 // is added , its users should be already inside Worklist. It ensures 4713 // a uniform instruction will only be used by uniform instructions. 4714 unsigned idx = 0; 4715 while (idx != Worklist.size()) { 4716 Instruction *I = Worklist[idx++]; 4717 4718 for (auto OV : I->operand_values()) { 4719 // isOutOfScope operands cannot be uniform instructions. 4720 if (isOutOfScope(OV)) 4721 continue; 4722 // First order recurrence Phi's should typically be considered 4723 // non-uniform. 4724 auto *OP = dyn_cast<PHINode>(OV); 4725 if (OP && Legal->isFirstOrderRecurrence(OP)) 4726 continue; 4727 // If all the users of the operand are uniform, then add the 4728 // operand into the uniform worklist. 4729 auto *OI = cast<Instruction>(OV); 4730 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4731 auto *J = cast<Instruction>(U); 4732 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 4733 })) 4734 addToWorklistIfAllowed(OI); 4735 } 4736 } 4737 4738 // For an instruction to be added into Worklist above, all its users inside 4739 // the loop should also be in Worklist. However, this condition cannot be 4740 // true for phi nodes that form a cyclic dependence. We must process phi 4741 // nodes separately. An induction variable will remain uniform if all users 4742 // of the induction variable and induction variable update remain uniform. 4743 // The code below handles both pointer and non-pointer induction variables. 4744 for (auto &Induction : Legal->getInductionVars()) { 4745 auto *Ind = Induction.first; 4746 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4747 4748 // Determine if all users of the induction variable are uniform after 4749 // vectorization. 4750 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4751 auto *I = cast<Instruction>(U); 4752 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4753 isVectorizedMemAccessUse(I, Ind); 4754 }); 4755 if (!UniformInd) 4756 continue; 4757 4758 // Determine if all users of the induction variable update instruction are 4759 // uniform after vectorization. 4760 auto UniformIndUpdate = 4761 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4762 auto *I = cast<Instruction>(U); 4763 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4764 isVectorizedMemAccessUse(I, IndUpdate); 4765 }); 4766 if (!UniformIndUpdate) 4767 continue; 4768 4769 // The induction variable and its update instruction will remain uniform. 4770 addToWorklistIfAllowed(Ind); 4771 addToWorklistIfAllowed(IndUpdate); 4772 } 4773 4774 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4775 } 4776 4777 bool LoopVectorizationCostModel::runtimeChecksRequired() { 4778 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 4779 4780 if (Legal->getRuntimePointerChecking()->Need) { 4781 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 4782 "runtime pointer checks needed. Enable vectorization of this " 4783 "loop with '#pragma clang loop vectorize(enable)' when " 4784 "compiling with -Os/-Oz", 4785 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4786 return true; 4787 } 4788 4789 if (!PSE.getPredicate().isAlwaysTrue()) { 4790 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 4791 "runtime SCEV checks needed. Enable vectorization of this " 4792 "loop with '#pragma clang loop vectorize(enable)' when " 4793 "compiling with -Os/-Oz", 4794 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4795 return true; 4796 } 4797 4798 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4799 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4800 reportVectorizationFailure("Runtime stride check for small trip count", 4801 "runtime stride == 1 checks needed. Enable vectorization of " 4802 "this loop without such check by compiling with -Os/-Oz", 4803 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4804 return true; 4805 } 4806 4807 return false; 4808 } 4809 4810 ElementCount 4811 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 4812 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 4813 return ElementCount::getScalable(0); 4814 4815 if (Hints->isScalableVectorizationDisabled()) { 4816 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 4817 "ScalableVectorizationDisabled", ORE, TheLoop); 4818 return ElementCount::getScalable(0); 4819 } 4820 4821 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 4822 4823 auto MaxScalableVF = ElementCount::getScalable( 4824 std::numeric_limits<ElementCount::ScalarTy>::max()); 4825 4826 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 4827 // FIXME: While for scalable vectors this is currently sufficient, this should 4828 // be replaced by a more detailed mechanism that filters out specific VFs, 4829 // instead of invalidating vectorization for a whole set of VFs based on the 4830 // MaxVF. 4831 4832 // Disable scalable vectorization if the loop contains unsupported reductions. 4833 if (!canVectorizeReductions(MaxScalableVF)) { 4834 reportVectorizationInfo( 4835 "Scalable vectorization not supported for the reduction " 4836 "operations found in this loop.", 4837 "ScalableVFUnfeasible", ORE, TheLoop); 4838 return ElementCount::getScalable(0); 4839 } 4840 4841 // Disable scalable vectorization if the loop contains any instructions 4842 // with element types not supported for scalable vectors. 4843 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 4844 return !Ty->isVoidTy() && 4845 !this->TTI.isElementTypeLegalForScalableVector(Ty); 4846 })) { 4847 reportVectorizationInfo("Scalable vectorization is not supported " 4848 "for all element types found in this loop.", 4849 "ScalableVFUnfeasible", ORE, TheLoop); 4850 return ElementCount::getScalable(0); 4851 } 4852 4853 if (Legal->isSafeForAnyVectorWidth()) 4854 return MaxScalableVF; 4855 4856 // Limit MaxScalableVF by the maximum safe dependence distance. 4857 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 4858 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) 4859 MaxVScale = 4860 TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax(); 4861 MaxScalableVF = ElementCount::getScalable( 4862 MaxVScale ? (MaxSafeElements / MaxVScale.value()) : 0); 4863 if (!MaxScalableVF) 4864 reportVectorizationInfo( 4865 "Max legal vector width too small, scalable vectorization " 4866 "unfeasible.", 4867 "ScalableVFUnfeasible", ORE, TheLoop); 4868 4869 return MaxScalableVF; 4870 } 4871 4872 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF( 4873 unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) { 4874 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4875 unsigned SmallestType, WidestType; 4876 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 4877 4878 // Get the maximum safe dependence distance in bits computed by LAA. 4879 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 4880 // the memory accesses that is most restrictive (involved in the smallest 4881 // dependence distance). 4882 unsigned MaxSafeElements = 4883 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 4884 4885 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 4886 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 4887 4888 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 4889 << ".\n"); 4890 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 4891 << ".\n"); 4892 4893 // First analyze the UserVF, fall back if the UserVF should be ignored. 4894 if (UserVF) { 4895 auto MaxSafeUserVF = 4896 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 4897 4898 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 4899 // If `VF=vscale x N` is safe, then so is `VF=N` 4900 if (UserVF.isScalable()) 4901 return FixedScalableVFPair( 4902 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 4903 else 4904 return UserVF; 4905 } 4906 4907 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 4908 4909 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 4910 // is better to ignore the hint and let the compiler choose a suitable VF. 4911 if (!UserVF.isScalable()) { 4912 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4913 << " is unsafe, clamping to max safe VF=" 4914 << MaxSafeFixedVF << ".\n"); 4915 ORE->emit([&]() { 4916 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4917 TheLoop->getStartLoc(), 4918 TheLoop->getHeader()) 4919 << "User-specified vectorization factor " 4920 << ore::NV("UserVectorizationFactor", UserVF) 4921 << " is unsafe, clamping to maximum safe vectorization factor " 4922 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 4923 }); 4924 return MaxSafeFixedVF; 4925 } 4926 4927 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 4928 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4929 << " is ignored because scalable vectors are not " 4930 "available.\n"); 4931 ORE->emit([&]() { 4932 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4933 TheLoop->getStartLoc(), 4934 TheLoop->getHeader()) 4935 << "User-specified vectorization factor " 4936 << ore::NV("UserVectorizationFactor", UserVF) 4937 << " is ignored because the target does not support scalable " 4938 "vectors. The compiler will pick a more suitable value."; 4939 }); 4940 } else { 4941 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4942 << " is unsafe. Ignoring scalable UserVF.\n"); 4943 ORE->emit([&]() { 4944 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4945 TheLoop->getStartLoc(), 4946 TheLoop->getHeader()) 4947 << "User-specified vectorization factor " 4948 << ore::NV("UserVectorizationFactor", UserVF) 4949 << " is unsafe. Ignoring the hint to let the compiler pick a " 4950 "more suitable value."; 4951 }); 4952 } 4953 } 4954 4955 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 4956 << " / " << WidestType << " bits.\n"); 4957 4958 FixedScalableVFPair Result(ElementCount::getFixed(1), 4959 ElementCount::getScalable(0)); 4960 if (auto MaxVF = 4961 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 4962 MaxSafeFixedVF, FoldTailByMasking)) 4963 Result.FixedVF = MaxVF; 4964 4965 if (auto MaxVF = 4966 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 4967 MaxSafeScalableVF, FoldTailByMasking)) 4968 if (MaxVF.isScalable()) { 4969 Result.ScalableVF = MaxVF; 4970 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 4971 << "\n"); 4972 } 4973 4974 return Result; 4975 } 4976 4977 FixedScalableVFPair 4978 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 4979 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 4980 // TODO: It may by useful to do since it's still likely to be dynamically 4981 // uniform if the target can skip. 4982 reportVectorizationFailure( 4983 "Not inserting runtime ptr check for divergent target", 4984 "runtime pointer checks needed. Not enabled for divergent target", 4985 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 4986 return FixedScalableVFPair::getNone(); 4987 } 4988 4989 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4990 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 4991 if (TC == 1) { 4992 reportVectorizationFailure("Single iteration (non) loop", 4993 "loop trip count is one, irrelevant for vectorization", 4994 "SingleIterationLoop", ORE, TheLoop); 4995 return FixedScalableVFPair::getNone(); 4996 } 4997 4998 switch (ScalarEpilogueStatus) { 4999 case CM_ScalarEpilogueAllowed: 5000 return computeFeasibleMaxVF(TC, UserVF, false); 5001 case CM_ScalarEpilogueNotAllowedUsePredicate: 5002 LLVM_FALLTHROUGH; 5003 case CM_ScalarEpilogueNotNeededUsePredicate: 5004 LLVM_DEBUG( 5005 dbgs() << "LV: vector predicate hint/switch found.\n" 5006 << "LV: Not allowing scalar epilogue, creating predicated " 5007 << "vector loop.\n"); 5008 break; 5009 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5010 // fallthrough as a special case of OptForSize 5011 case CM_ScalarEpilogueNotAllowedOptSize: 5012 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5013 LLVM_DEBUG( 5014 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5015 else 5016 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5017 << "count.\n"); 5018 5019 // Bail if runtime checks are required, which are not good when optimising 5020 // for size. 5021 if (runtimeChecksRequired()) 5022 return FixedScalableVFPair::getNone(); 5023 5024 break; 5025 } 5026 5027 // The only loops we can vectorize without a scalar epilogue, are loops with 5028 // a bottom-test and a single exiting block. We'd have to handle the fact 5029 // that not every instruction executes on the last iteration. This will 5030 // require a lane mask which varies through the vector loop body. (TODO) 5031 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5032 // If there was a tail-folding hint/switch, but we can't fold the tail by 5033 // masking, fallback to a vectorization with a scalar epilogue. 5034 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5035 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5036 "scalar epilogue instead.\n"); 5037 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5038 return computeFeasibleMaxVF(TC, UserVF, false); 5039 } 5040 return FixedScalableVFPair::getNone(); 5041 } 5042 5043 // Now try the tail folding 5044 5045 // Invalidate interleave groups that require an epilogue if we can't mask 5046 // the interleave-group. 5047 if (!useMaskedInterleavedAccesses(TTI)) { 5048 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5049 "No decisions should have been taken at this point"); 5050 // Note: There is no need to invalidate any cost modeling decisions here, as 5051 // non where taken so far. 5052 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5053 } 5054 5055 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true); 5056 // Avoid tail folding if the trip count is known to be a multiple of any VF 5057 // we chose. 5058 // FIXME: The condition below pessimises the case for fixed-width vectors, 5059 // when scalable VFs are also candidates for vectorization. 5060 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5061 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5062 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5063 "MaxFixedVF must be a power of 2"); 5064 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5065 : MaxFixedVF.getFixedValue(); 5066 ScalarEvolution *SE = PSE.getSE(); 5067 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5068 const SCEV *ExitCount = SE->getAddExpr( 5069 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5070 const SCEV *Rem = SE->getURemExpr( 5071 SE->applyLoopGuards(ExitCount, TheLoop), 5072 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5073 if (Rem->isZero()) { 5074 // Accept MaxFixedVF if we do not have a tail. 5075 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5076 return MaxFactors; 5077 } 5078 } 5079 5080 // If we don't know the precise trip count, or if the trip count that we 5081 // found modulo the vectorization factor is not zero, try to fold the tail 5082 // by masking. 5083 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5084 if (Legal->prepareToFoldTailByMasking()) { 5085 FoldTailByMasking = true; 5086 return MaxFactors; 5087 } 5088 5089 // If there was a tail-folding hint/switch, but we can't fold the tail by 5090 // masking, fallback to a vectorization with a scalar epilogue. 5091 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5092 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5093 "scalar epilogue instead.\n"); 5094 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5095 return MaxFactors; 5096 } 5097 5098 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5099 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5100 return FixedScalableVFPair::getNone(); 5101 } 5102 5103 if (TC == 0) { 5104 reportVectorizationFailure( 5105 "Unable to calculate the loop count due to complex control flow", 5106 "unable to calculate the loop count due to complex control flow", 5107 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5108 return FixedScalableVFPair::getNone(); 5109 } 5110 5111 reportVectorizationFailure( 5112 "Cannot optimize for size and vectorize at the same time.", 5113 "cannot optimize for size and vectorize at the same time. " 5114 "Enable vectorization of this loop with '#pragma clang loop " 5115 "vectorize(enable)' when compiling with -Os/-Oz", 5116 "NoTailLoopWithOptForSize", ORE, TheLoop); 5117 return FixedScalableVFPair::getNone(); 5118 } 5119 5120 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5121 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5122 ElementCount MaxSafeVF, bool FoldTailByMasking) { 5123 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5124 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5125 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5126 : TargetTransformInfo::RGK_FixedWidthVector); 5127 5128 // Convenience function to return the minimum of two ElementCounts. 5129 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5130 assert((LHS.isScalable() == RHS.isScalable()) && 5131 "Scalable flags must match"); 5132 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5133 }; 5134 5135 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5136 // Note that both WidestRegister and WidestType may not be a powers of 2. 5137 auto MaxVectorElementCount = ElementCount::get( 5138 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5139 ComputeScalableMaxVF); 5140 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5141 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5142 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5143 5144 if (!MaxVectorElementCount) { 5145 LLVM_DEBUG(dbgs() << "LV: The target has no " 5146 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5147 << " vector registers.\n"); 5148 return ElementCount::getFixed(1); 5149 } 5150 5151 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5152 if (ConstTripCount && 5153 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5154 (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) { 5155 // If loop trip count (TC) is known at compile time there is no point in 5156 // choosing VF greater than TC (as done in the loop below). Select maximum 5157 // power of two which doesn't exceed TC. 5158 // If MaxVectorElementCount is scalable, we only fall back on a fixed VF 5159 // when the TC is less than or equal to the known number of lanes. 5160 auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount); 5161 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not " 5162 "exceeding the constant trip count: " 5163 << ClampedConstTripCount << "\n"); 5164 return ElementCount::getFixed(ClampedConstTripCount); 5165 } 5166 5167 TargetTransformInfo::RegisterKind RegKind = 5168 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5169 : TargetTransformInfo::RGK_FixedWidthVector; 5170 ElementCount MaxVF = MaxVectorElementCount; 5171 if (MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 && 5172 TTI.shouldMaximizeVectorBandwidth(RegKind))) { 5173 auto MaxVectorElementCountMaxBW = ElementCount::get( 5174 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5175 ComputeScalableMaxVF); 5176 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5177 5178 // Collect all viable vectorization factors larger than the default MaxVF 5179 // (i.e. MaxVectorElementCount). 5180 SmallVector<ElementCount, 8> VFs; 5181 for (ElementCount VS = MaxVectorElementCount * 2; 5182 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5183 VFs.push_back(VS); 5184 5185 // For each VF calculate its register usage. 5186 auto RUs = calculateRegisterUsage(VFs); 5187 5188 // Select the largest VF which doesn't require more registers than existing 5189 // ones. 5190 for (int i = RUs.size() - 1; i >= 0; --i) { 5191 bool Selected = true; 5192 for (auto &pair : RUs[i].MaxLocalUsers) { 5193 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5194 if (pair.second > TargetNumRegisters) 5195 Selected = false; 5196 } 5197 if (Selected) { 5198 MaxVF = VFs[i]; 5199 break; 5200 } 5201 } 5202 if (ElementCount MinVF = 5203 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5204 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5205 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5206 << ") with target's minimum: " << MinVF << '\n'); 5207 MaxVF = MinVF; 5208 } 5209 } 5210 5211 // Invalidate any widening decisions we might have made, in case the loop 5212 // requires prediction (decided later), but we have already made some 5213 // load/store widening decisions. 5214 invalidateCostModelingDecisions(); 5215 } 5216 return MaxVF; 5217 } 5218 5219 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const { 5220 if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) { 5221 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange); 5222 auto Min = Attr.getVScaleRangeMin(); 5223 auto Max = Attr.getVScaleRangeMax(); 5224 if (Max && Min == Max) 5225 return Max; 5226 } 5227 5228 return TTI.getVScaleForTuning(); 5229 } 5230 5231 bool LoopVectorizationCostModel::isMoreProfitable( 5232 const VectorizationFactor &A, const VectorizationFactor &B) const { 5233 InstructionCost CostA = A.Cost; 5234 InstructionCost CostB = B.Cost; 5235 5236 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 5237 5238 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 5239 MaxTripCount) { 5240 // If we are folding the tail and the trip count is a known (possibly small) 5241 // constant, the trip count will be rounded up to an integer number of 5242 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 5243 // which we compare directly. When not folding the tail, the total cost will 5244 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 5245 // approximated with the per-lane cost below instead of using the tripcount 5246 // as here. 5247 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 5248 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 5249 return RTCostA < RTCostB; 5250 } 5251 5252 // Improve estimate for the vector width if it is scalable. 5253 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 5254 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 5255 if (Optional<unsigned> VScale = getVScaleForTuning()) { 5256 if (A.Width.isScalable()) 5257 EstimatedWidthA *= VScale.value(); 5258 if (B.Width.isScalable()) 5259 EstimatedWidthB *= VScale.value(); 5260 } 5261 5262 // Assume vscale may be larger than 1 (or the value being tuned for), 5263 // so that scalable vectorization is slightly favorable over fixed-width 5264 // vectorization. 5265 if (A.Width.isScalable() && !B.Width.isScalable()) 5266 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 5267 5268 // To avoid the need for FP division: 5269 // (CostA / A.Width) < (CostB / B.Width) 5270 // <=> (CostA * B.Width) < (CostB * A.Width) 5271 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 5272 } 5273 5274 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 5275 const ElementCountSet &VFCandidates) { 5276 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5277 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5278 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5279 assert(VFCandidates.count(ElementCount::getFixed(1)) && 5280 "Expected Scalar VF to be a candidate"); 5281 5282 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost, 5283 ExpectedCost); 5284 VectorizationFactor ChosenFactor = ScalarCost; 5285 5286 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5287 if (ForceVectorization && VFCandidates.size() > 1) { 5288 // Ignore scalar width, because the user explicitly wants vectorization. 5289 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5290 // evaluation. 5291 ChosenFactor.Cost = InstructionCost::getMax(); 5292 } 5293 5294 SmallVector<InstructionVFPair> InvalidCosts; 5295 for (const auto &i : VFCandidates) { 5296 // The cost for scalar VF=1 is already calculated, so ignore it. 5297 if (i.isScalar()) 5298 continue; 5299 5300 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 5301 VectorizationFactor Candidate(i, C.first, ScalarCost.ScalarCost); 5302 5303 #ifndef NDEBUG 5304 unsigned AssumedMinimumVscale = 1; 5305 if (Optional<unsigned> VScale = getVScaleForTuning()) 5306 AssumedMinimumVscale = *VScale; 5307 unsigned Width = 5308 Candidate.Width.isScalable() 5309 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 5310 : Candidate.Width.getFixedValue(); 5311 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5312 << " costs: " << (Candidate.Cost / Width)); 5313 if (i.isScalable()) 5314 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 5315 << AssumedMinimumVscale << ")"); 5316 LLVM_DEBUG(dbgs() << ".\n"); 5317 #endif 5318 5319 if (!C.second && !ForceVectorization) { 5320 LLVM_DEBUG( 5321 dbgs() << "LV: Not considering vector loop of width " << i 5322 << " because it will not generate any vector instructions.\n"); 5323 continue; 5324 } 5325 5326 // If profitable add it to ProfitableVF list. 5327 if (isMoreProfitable(Candidate, ScalarCost)) 5328 ProfitableVFs.push_back(Candidate); 5329 5330 if (isMoreProfitable(Candidate, ChosenFactor)) 5331 ChosenFactor = Candidate; 5332 } 5333 5334 // Emit a report of VFs with invalid costs in the loop. 5335 if (!InvalidCosts.empty()) { 5336 // Group the remarks per instruction, keeping the instruction order from 5337 // InvalidCosts. 5338 std::map<Instruction *, unsigned> Numbering; 5339 unsigned I = 0; 5340 for (auto &Pair : InvalidCosts) 5341 if (!Numbering.count(Pair.first)) 5342 Numbering[Pair.first] = I++; 5343 5344 // Sort the list, first on instruction(number) then on VF. 5345 llvm::sort(InvalidCosts, 5346 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 5347 if (Numbering[A.first] != Numbering[B.first]) 5348 return Numbering[A.first] < Numbering[B.first]; 5349 ElementCountComparator ECC; 5350 return ECC(A.second, B.second); 5351 }); 5352 5353 // For a list of ordered instruction-vf pairs: 5354 // [(load, vf1), (load, vf2), (store, vf1)] 5355 // Group the instructions together to emit separate remarks for: 5356 // load (vf1, vf2) 5357 // store (vf1) 5358 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 5359 auto Subset = ArrayRef<InstructionVFPair>(); 5360 do { 5361 if (Subset.empty()) 5362 Subset = Tail.take_front(1); 5363 5364 Instruction *I = Subset.front().first; 5365 5366 // If the next instruction is different, or if there are no other pairs, 5367 // emit a remark for the collated subset. e.g. 5368 // [(load, vf1), (load, vf2))] 5369 // to emit: 5370 // remark: invalid costs for 'load' at VF=(vf, vf2) 5371 if (Subset == Tail || Tail[Subset.size()].first != I) { 5372 std::string OutString; 5373 raw_string_ostream OS(OutString); 5374 assert(!Subset.empty() && "Unexpected empty range"); 5375 OS << "Instruction with invalid costs prevented vectorization at VF=("; 5376 for (auto &Pair : Subset) 5377 OS << (Pair.second == Subset.front().second ? "" : ", ") 5378 << Pair.second; 5379 OS << "):"; 5380 if (auto *CI = dyn_cast<CallInst>(I)) 5381 OS << " call to " << CI->getCalledFunction()->getName(); 5382 else 5383 OS << " " << I->getOpcodeName(); 5384 OS.flush(); 5385 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 5386 Tail = Tail.drop_front(Subset.size()); 5387 Subset = {}; 5388 } else 5389 // Grow the subset by one element 5390 Subset = Tail.take_front(Subset.size() + 1); 5391 } while (!Tail.empty()); 5392 } 5393 5394 if (!EnableCondStoresVectorization && NumPredStores) { 5395 reportVectorizationFailure("There are conditional stores.", 5396 "store that is conditionally executed prevents vectorization", 5397 "ConditionalStore", ORE, TheLoop); 5398 ChosenFactor = ScalarCost; 5399 } 5400 5401 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 5402 !isMoreProfitable(ChosenFactor, ScalarCost)) dbgs() 5403 << "LV: Vectorization seems to be not beneficial, " 5404 << "but was forced by a user.\n"); 5405 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 5406 return ChosenFactor; 5407 } 5408 5409 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5410 const Loop &L, ElementCount VF) const { 5411 // Cross iteration phis such as reductions need special handling and are 5412 // currently unsupported. 5413 if (any_of(L.getHeader()->phis(), 5414 [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); })) 5415 return false; 5416 5417 // Phis with uses outside of the loop require special handling and are 5418 // currently unsupported. 5419 for (auto &Entry : Legal->getInductionVars()) { 5420 // Look for uses of the value of the induction at the last iteration. 5421 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5422 for (User *U : PostInc->users()) 5423 if (!L.contains(cast<Instruction>(U))) 5424 return false; 5425 // Look for uses of penultimate value of the induction. 5426 for (User *U : Entry.first->users()) 5427 if (!L.contains(cast<Instruction>(U))) 5428 return false; 5429 } 5430 5431 // Induction variables that are widened require special handling that is 5432 // currently not supported. 5433 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5434 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5435 this->isProfitableToScalarize(Entry.first, VF)); 5436 })) 5437 return false; 5438 5439 // Epilogue vectorization code has not been auditted to ensure it handles 5440 // non-latch exits properly. It may be fine, but it needs auditted and 5441 // tested. 5442 if (L.getExitingBlock() != L.getLoopLatch()) 5443 return false; 5444 5445 return true; 5446 } 5447 5448 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5449 const ElementCount VF) const { 5450 // FIXME: We need a much better cost-model to take different parameters such 5451 // as register pressure, code size increase and cost of extra branches into 5452 // account. For now we apply a very crude heuristic and only consider loops 5453 // with vectorization factors larger than a certain value. 5454 // We also consider epilogue vectorization unprofitable for targets that don't 5455 // consider interleaving beneficial (eg. MVE). 5456 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5457 return false; 5458 // FIXME: We should consider changing the threshold for scalable 5459 // vectors to take VScaleForTuning into account. 5460 if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF) 5461 return true; 5462 return false; 5463 } 5464 5465 VectorizationFactor 5466 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5467 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5468 VectorizationFactor Result = VectorizationFactor::Disabled(); 5469 if (!EnableEpilogueVectorization) { 5470 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5471 return Result; 5472 } 5473 5474 if (!isScalarEpilogueAllowed()) { 5475 LLVM_DEBUG( 5476 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5477 "allowed.\n";); 5478 return Result; 5479 } 5480 5481 // Not really a cost consideration, but check for unsupported cases here to 5482 // simplify the logic. 5483 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5484 LLVM_DEBUG( 5485 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5486 "not a supported candidate.\n";); 5487 return Result; 5488 } 5489 5490 if (EpilogueVectorizationForceVF > 1) { 5491 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5492 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 5493 if (LVP.hasPlanWithVF(ForcedEC)) 5494 return {ForcedEC, 0, 0}; 5495 else { 5496 LLVM_DEBUG( 5497 dbgs() 5498 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5499 return Result; 5500 } 5501 } 5502 5503 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5504 TheLoop->getHeader()->getParent()->hasMinSize()) { 5505 LLVM_DEBUG( 5506 dbgs() 5507 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5508 return Result; 5509 } 5510 5511 if (!isEpilogueVectorizationProfitable(MainLoopVF)) { 5512 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 5513 "this loop\n"); 5514 return Result; 5515 } 5516 5517 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know 5518 // the main loop handles 8 lanes per iteration. We could still benefit from 5519 // vectorizing the epilogue loop with VF=4. 5520 ElementCount EstimatedRuntimeVF = MainLoopVF; 5521 if (MainLoopVF.isScalable()) { 5522 EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 5523 if (Optional<unsigned> VScale = getVScaleForTuning()) 5524 EstimatedRuntimeVF *= *VScale; 5525 } 5526 5527 for (auto &NextVF : ProfitableVFs) 5528 if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() && 5529 ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) || 5530 ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) && 5531 (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) && 5532 LVP.hasPlanWithVF(NextVF.Width)) 5533 Result = NextVF; 5534 5535 if (Result != VectorizationFactor::Disabled()) 5536 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5537 << Result.Width << "\n";); 5538 return Result; 5539 } 5540 5541 std::pair<unsigned, unsigned> 5542 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5543 unsigned MinWidth = -1U; 5544 unsigned MaxWidth = 8; 5545 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5546 // For in-loop reductions, no element types are added to ElementTypesInLoop 5547 // if there are no loads/stores in the loop. In this case, check through the 5548 // reduction variables to determine the maximum width. 5549 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) { 5550 // Reset MaxWidth so that we can find the smallest type used by recurrences 5551 // in the loop. 5552 MaxWidth = -1U; 5553 for (auto &PhiDescriptorPair : Legal->getReductionVars()) { 5554 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second; 5555 // When finding the min width used by the recurrence we need to account 5556 // for casts on the input operands of the recurrence. 5557 MaxWidth = std::min<unsigned>( 5558 MaxWidth, std::min<unsigned>( 5559 RdxDesc.getMinWidthCastToRecurrenceTypeInBits(), 5560 RdxDesc.getRecurrenceType()->getScalarSizeInBits())); 5561 } 5562 } else { 5563 for (Type *T : ElementTypesInLoop) { 5564 MinWidth = std::min<unsigned>( 5565 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5566 MaxWidth = std::max<unsigned>( 5567 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5568 } 5569 } 5570 return {MinWidth, MaxWidth}; 5571 } 5572 5573 void LoopVectorizationCostModel::collectElementTypesForWidening() { 5574 ElementTypesInLoop.clear(); 5575 // For each block. 5576 for (BasicBlock *BB : TheLoop->blocks()) { 5577 // For each instruction in the loop. 5578 for (Instruction &I : BB->instructionsWithoutDebug()) { 5579 Type *T = I.getType(); 5580 5581 // Skip ignored values. 5582 if (ValuesToIgnore.count(&I)) 5583 continue; 5584 5585 // Only examine Loads, Stores and PHINodes. 5586 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5587 continue; 5588 5589 // Examine PHI nodes that are reduction variables. Update the type to 5590 // account for the recurrence type. 5591 if (auto *PN = dyn_cast<PHINode>(&I)) { 5592 if (!Legal->isReductionVariable(PN)) 5593 continue; 5594 const RecurrenceDescriptor &RdxDesc = 5595 Legal->getReductionVars().find(PN)->second; 5596 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 5597 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 5598 RdxDesc.getRecurrenceType(), 5599 TargetTransformInfo::ReductionFlags())) 5600 continue; 5601 T = RdxDesc.getRecurrenceType(); 5602 } 5603 5604 // Examine the stored values. 5605 if (auto *ST = dyn_cast<StoreInst>(&I)) 5606 T = ST->getValueOperand()->getType(); 5607 5608 assert(T->isSized() && 5609 "Expected the load/store/recurrence type to be sized"); 5610 5611 ElementTypesInLoop.insert(T); 5612 } 5613 } 5614 } 5615 5616 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 5617 unsigned LoopCost) { 5618 // -- The interleave heuristics -- 5619 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5620 // There are many micro-architectural considerations that we can't predict 5621 // at this level. For example, frontend pressure (on decode or fetch) due to 5622 // code size, or the number and capabilities of the execution ports. 5623 // 5624 // We use the following heuristics to select the interleave count: 5625 // 1. If the code has reductions, then we interleave to break the cross 5626 // iteration dependency. 5627 // 2. If the loop is really small, then we interleave to reduce the loop 5628 // overhead. 5629 // 3. We don't interleave if we think that we will spill registers to memory 5630 // due to the increased register pressure. 5631 5632 if (!isScalarEpilogueAllowed()) 5633 return 1; 5634 5635 // We used the distance for the interleave count. 5636 if (Legal->getMaxSafeDepDistBytes() != -1U) 5637 return 1; 5638 5639 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 5640 const bool HasReductions = !Legal->getReductionVars().empty(); 5641 // Do not interleave loops with a relatively small known or estimated trip 5642 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 5643 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 5644 // because with the above conditions interleaving can expose ILP and break 5645 // cross iteration dependences for reductions. 5646 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 5647 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 5648 return 1; 5649 5650 // If we did not calculate the cost for VF (because the user selected the VF) 5651 // then we calculate the cost of VF here. 5652 if (LoopCost == 0) { 5653 InstructionCost C = expectedCost(VF).first; 5654 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 5655 LoopCost = *C.getValue(); 5656 5657 // Loop body is free and there is no need for interleaving. 5658 if (LoopCost == 0) 5659 return 1; 5660 } 5661 5662 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5663 // We divide by these constants so assume that we have at least one 5664 // instruction that uses at least one register. 5665 for (auto& pair : R.MaxLocalUsers) { 5666 pair.second = std::max(pair.second, 1U); 5667 } 5668 5669 // We calculate the interleave count using the following formula. 5670 // Subtract the number of loop invariants from the number of available 5671 // registers. These registers are used by all of the interleaved instances. 5672 // Next, divide the remaining registers by the number of registers that is 5673 // required by the loop, in order to estimate how many parallel instances 5674 // fit without causing spills. All of this is rounded down if necessary to be 5675 // a power of two. We want power of two interleave count to simplify any 5676 // addressing operations or alignment considerations. 5677 // We also want power of two interleave counts to ensure that the induction 5678 // variable of the vector loop wraps to zero, when tail is folded by masking; 5679 // this currently happens when OptForSize, in which case IC is set to 1 above. 5680 unsigned IC = UINT_MAX; 5681 5682 for (auto& pair : R.MaxLocalUsers) { 5683 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5684 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5685 << " registers of " 5686 << TTI.getRegisterClassName(pair.first) << " register class\n"); 5687 if (VF.isScalar()) { 5688 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5689 TargetNumRegisters = ForceTargetNumScalarRegs; 5690 } else { 5691 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5692 TargetNumRegisters = ForceTargetNumVectorRegs; 5693 } 5694 unsigned MaxLocalUsers = pair.second; 5695 unsigned LoopInvariantRegs = 0; 5696 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 5697 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 5698 5699 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 5700 // Don't count the induction variable as interleaved. 5701 if (EnableIndVarRegisterHeur) { 5702 TmpIC = 5703 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 5704 std::max(1U, (MaxLocalUsers - 1))); 5705 } 5706 5707 IC = std::min(IC, TmpIC); 5708 } 5709 5710 // Clamp the interleave ranges to reasonable counts. 5711 unsigned MaxInterleaveCount = 5712 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 5713 5714 // Check if the user has overridden the max. 5715 if (VF.isScalar()) { 5716 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5717 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5718 } else { 5719 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5720 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5721 } 5722 5723 // If trip count is known or estimated compile time constant, limit the 5724 // interleave count to be less than the trip count divided by VF, provided it 5725 // is at least 1. 5726 // 5727 // For scalable vectors we can't know if interleaving is beneficial. It may 5728 // not be beneficial for small loops if none of the lanes in the second vector 5729 // iterations is enabled. However, for larger loops, there is likely to be a 5730 // similar benefit as for fixed-width vectors. For now, we choose to leave 5731 // the InterleaveCount as if vscale is '1', although if some information about 5732 // the vector is known (e.g. min vector size), we can make a better decision. 5733 if (BestKnownTC) { 5734 MaxInterleaveCount = 5735 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 5736 // Make sure MaxInterleaveCount is greater than 0. 5737 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 5738 } 5739 5740 assert(MaxInterleaveCount > 0 && 5741 "Maximum interleave count must be greater than 0"); 5742 5743 // Clamp the calculated IC to be between the 1 and the max interleave count 5744 // that the target and trip count allows. 5745 if (IC > MaxInterleaveCount) 5746 IC = MaxInterleaveCount; 5747 else 5748 // Make sure IC is greater than 0. 5749 IC = std::max(1u, IC); 5750 5751 assert(IC > 0 && "Interleave count must be greater than 0."); 5752 5753 // Interleave if we vectorized this loop and there is a reduction that could 5754 // benefit from interleaving. 5755 if (VF.isVector() && HasReductions) { 5756 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5757 return IC; 5758 } 5759 5760 // For any scalar loop that either requires runtime checks or predication we 5761 // are better off leaving this to the unroller. Note that if we've already 5762 // vectorized the loop we will have done the runtime check and so interleaving 5763 // won't require further checks. 5764 bool ScalarInterleavingRequiresPredication = 5765 (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) { 5766 return Legal->blockNeedsPredication(BB); 5767 })); 5768 bool ScalarInterleavingRequiresRuntimePointerCheck = 5769 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 5770 5771 // We want to interleave small loops in order to reduce the loop overhead and 5772 // potentially expose ILP opportunities. 5773 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 5774 << "LV: IC is " << IC << '\n' 5775 << "LV: VF is " << VF << '\n'); 5776 const bool AggressivelyInterleaveReductions = 5777 TTI.enableAggressiveInterleaving(HasReductions); 5778 if (!ScalarInterleavingRequiresRuntimePointerCheck && 5779 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) { 5780 // We assume that the cost overhead is 1 and we use the cost model 5781 // to estimate the cost of the loop and interleave until the cost of the 5782 // loop overhead is about 5% of the cost of the loop. 5783 unsigned SmallIC = 5784 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5785 5786 // Interleave until store/load ports (estimated by max interleave count) are 5787 // saturated. 5788 unsigned NumStores = Legal->getNumStores(); 5789 unsigned NumLoads = Legal->getNumLoads(); 5790 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5791 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5792 5793 // There is little point in interleaving for reductions containing selects 5794 // and compares when VF=1 since it may just create more overhead than it's 5795 // worth for loops with small trip counts. This is because we still have to 5796 // do the final reduction after the loop. 5797 bool HasSelectCmpReductions = 5798 HasReductions && 5799 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5800 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5801 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 5802 RdxDesc.getRecurrenceKind()); 5803 }); 5804 if (HasSelectCmpReductions) { 5805 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 5806 return 1; 5807 } 5808 5809 // If we have a scalar reduction (vector reductions are already dealt with 5810 // by this point), we can increase the critical path length if the loop 5811 // we're interleaving is inside another loop. For tree-wise reductions 5812 // set the limit to 2, and for ordered reductions it's best to disable 5813 // interleaving entirely. 5814 if (HasReductions && TheLoop->getLoopDepth() > 1) { 5815 bool HasOrderedReductions = 5816 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5817 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5818 return RdxDesc.isOrdered(); 5819 }); 5820 if (HasOrderedReductions) { 5821 LLVM_DEBUG( 5822 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 5823 return 1; 5824 } 5825 5826 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5827 SmallIC = std::min(SmallIC, F); 5828 StoresIC = std::min(StoresIC, F); 5829 LoadsIC = std::min(LoadsIC, F); 5830 } 5831 5832 if (EnableLoadStoreRuntimeInterleave && 5833 std::max(StoresIC, LoadsIC) > SmallIC) { 5834 LLVM_DEBUG( 5835 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5836 return std::max(StoresIC, LoadsIC); 5837 } 5838 5839 // If there are scalar reductions and TTI has enabled aggressive 5840 // interleaving for reductions, we will interleave to expose ILP. 5841 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 5842 AggressivelyInterleaveReductions) { 5843 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5844 // Interleave no less than SmallIC but not as aggressive as the normal IC 5845 // to satisfy the rare situation when resources are too limited. 5846 return std::max(IC / 2, SmallIC); 5847 } else { 5848 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5849 return SmallIC; 5850 } 5851 } 5852 5853 // Interleave if this is a large loop (small loops are already dealt with by 5854 // this point) that could benefit from interleaving. 5855 if (AggressivelyInterleaveReductions) { 5856 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5857 return IC; 5858 } 5859 5860 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5861 return 1; 5862 } 5863 5864 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5865 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 5866 // This function calculates the register usage by measuring the highest number 5867 // of values that are alive at a single location. Obviously, this is a very 5868 // rough estimation. We scan the loop in a topological order in order and 5869 // assign a number to each instruction. We use RPO to ensure that defs are 5870 // met before their users. We assume that each instruction that has in-loop 5871 // users starts an interval. We record every time that an in-loop value is 5872 // used, so we have a list of the first and last occurrences of each 5873 // instruction. Next, we transpose this data structure into a multi map that 5874 // holds the list of intervals that *end* at a specific location. This multi 5875 // map allows us to perform a linear search. We scan the instructions linearly 5876 // and record each time that a new interval starts, by placing it in a set. 5877 // If we find this value in the multi-map then we remove it from the set. 5878 // The max register usage is the maximum size of the set. 5879 // We also search for instructions that are defined outside the loop, but are 5880 // used inside the loop. We need this number separately from the max-interval 5881 // usage number because when we unroll, loop-invariant values do not take 5882 // more register. 5883 LoopBlocksDFS DFS(TheLoop); 5884 DFS.perform(LI); 5885 5886 RegisterUsage RU; 5887 5888 // Each 'key' in the map opens a new interval. The values 5889 // of the map are the index of the 'last seen' usage of the 5890 // instruction that is the key. 5891 using IntervalMap = DenseMap<Instruction *, unsigned>; 5892 5893 // Maps instruction to its index. 5894 SmallVector<Instruction *, 64> IdxToInstr; 5895 // Marks the end of each interval. 5896 IntervalMap EndPoint; 5897 // Saves the list of instruction indices that are used in the loop. 5898 SmallPtrSet<Instruction *, 8> Ends; 5899 // Saves the list of values that are used in the loop but are 5900 // defined outside the loop, such as arguments and constants. 5901 SmallPtrSet<Value *, 8> LoopInvariants; 5902 5903 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5904 for (Instruction &I : BB->instructionsWithoutDebug()) { 5905 IdxToInstr.push_back(&I); 5906 5907 // Save the end location of each USE. 5908 for (Value *U : I.operands()) { 5909 auto *Instr = dyn_cast<Instruction>(U); 5910 5911 // Ignore non-instruction values such as arguments, constants, etc. 5912 if (!Instr) 5913 continue; 5914 5915 // If this instruction is outside the loop then record it and continue. 5916 if (!TheLoop->contains(Instr)) { 5917 LoopInvariants.insert(Instr); 5918 continue; 5919 } 5920 5921 // Overwrite previous end points. 5922 EndPoint[Instr] = IdxToInstr.size(); 5923 Ends.insert(Instr); 5924 } 5925 } 5926 } 5927 5928 // Saves the list of intervals that end with the index in 'key'. 5929 using InstrList = SmallVector<Instruction *, 2>; 5930 DenseMap<unsigned, InstrList> TransposeEnds; 5931 5932 // Transpose the EndPoints to a list of values that end at each index. 5933 for (auto &Interval : EndPoint) 5934 TransposeEnds[Interval.second].push_back(Interval.first); 5935 5936 SmallPtrSet<Instruction *, 8> OpenIntervals; 5937 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5938 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 5939 5940 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5941 5942 const auto &TTICapture = TTI; 5943 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 5944 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 5945 return 0; 5946 return TTICapture.getRegUsageForType(VectorType::get(Ty, VF)); 5947 }; 5948 5949 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 5950 Instruction *I = IdxToInstr[i]; 5951 5952 // Remove all of the instructions that end at this location. 5953 InstrList &List = TransposeEnds[i]; 5954 for (Instruction *ToRemove : List) 5955 OpenIntervals.erase(ToRemove); 5956 5957 // Ignore instructions that are never used within the loop. 5958 if (!Ends.count(I)) 5959 continue; 5960 5961 // Skip ignored values. 5962 if (ValuesToIgnore.count(I)) 5963 continue; 5964 5965 // For each VF find the maximum usage of registers. 5966 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5967 // Count the number of live intervals. 5968 SmallMapVector<unsigned, unsigned, 4> RegUsage; 5969 5970 if (VFs[j].isScalar()) { 5971 for (auto Inst : OpenIntervals) { 5972 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 5973 if (RegUsage.find(ClassID) == RegUsage.end()) 5974 RegUsage[ClassID] = 1; 5975 else 5976 RegUsage[ClassID] += 1; 5977 } 5978 } else { 5979 collectUniformsAndScalars(VFs[j]); 5980 for (auto Inst : OpenIntervals) { 5981 // Skip ignored values for VF > 1. 5982 if (VecValuesToIgnore.count(Inst)) 5983 continue; 5984 if (isScalarAfterVectorization(Inst, VFs[j])) { 5985 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 5986 if (RegUsage.find(ClassID) == RegUsage.end()) 5987 RegUsage[ClassID] = 1; 5988 else 5989 RegUsage[ClassID] += 1; 5990 } else { 5991 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 5992 if (RegUsage.find(ClassID) == RegUsage.end()) 5993 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 5994 else 5995 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 5996 } 5997 } 5998 } 5999 6000 for (auto& pair : RegUsage) { 6001 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6002 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6003 else 6004 MaxUsages[j][pair.first] = pair.second; 6005 } 6006 } 6007 6008 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6009 << OpenIntervals.size() << '\n'); 6010 6011 // Add the current instruction to the list of open intervals. 6012 OpenIntervals.insert(I); 6013 } 6014 6015 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6016 SmallMapVector<unsigned, unsigned, 4> Invariant; 6017 6018 for (auto Inst : LoopInvariants) { 6019 unsigned Usage = 6020 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6021 unsigned ClassID = 6022 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6023 if (Invariant.find(ClassID) == Invariant.end()) 6024 Invariant[ClassID] = Usage; 6025 else 6026 Invariant[ClassID] += Usage; 6027 } 6028 6029 LLVM_DEBUG({ 6030 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6031 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6032 << " item\n"; 6033 for (const auto &pair : MaxUsages[i]) { 6034 dbgs() << "LV(REG): RegisterClass: " 6035 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6036 << " registers\n"; 6037 } 6038 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6039 << " item\n"; 6040 for (const auto &pair : Invariant) { 6041 dbgs() << "LV(REG): RegisterClass: " 6042 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6043 << " registers\n"; 6044 } 6045 }); 6046 6047 RU.LoopInvariantRegs = Invariant; 6048 RU.MaxLocalUsers = MaxUsages[i]; 6049 RUs[i] = RU; 6050 } 6051 6052 return RUs; 6053 } 6054 6055 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I, 6056 ElementCount VF) { 6057 // TODO: Cost model for emulated masked load/store is completely 6058 // broken. This hack guides the cost model to use an artificially 6059 // high enough value to practically disable vectorization with such 6060 // operations, except where previously deployed legality hack allowed 6061 // using very low cost values. This is to avoid regressions coming simply 6062 // from moving "masked load/store" check from legality to cost model. 6063 // Masked Load/Gather emulation was previously never allowed. 6064 // Limited number of Masked Store/Scatter emulation was allowed. 6065 assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction"); 6066 return isa<LoadInst>(I) || 6067 (isa<StoreInst>(I) && 6068 NumPredStores > NumberOfStoresToPredicate); 6069 } 6070 6071 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6072 // If we aren't vectorizing the loop, or if we've already collected the 6073 // instructions to scalarize, there's nothing to do. Collection may already 6074 // have occurred if we have a user-selected VF and are now computing the 6075 // expected cost for interleaving. 6076 if (VF.isScalar() || VF.isZero() || 6077 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6078 return; 6079 6080 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6081 // not profitable to scalarize any instructions, the presence of VF in the 6082 // map will indicate that we've analyzed it already. 6083 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6084 6085 PredicatedBBsAfterVectorization[VF].clear(); 6086 6087 // Find all the instructions that are scalar with predication in the loop and 6088 // determine if it would be better to not if-convert the blocks they are in. 6089 // If so, we also record the instructions to scalarize. 6090 for (BasicBlock *BB : TheLoop->blocks()) { 6091 if (!blockNeedsPredicationForAnyReason(BB)) 6092 continue; 6093 for (Instruction &I : *BB) 6094 if (isScalarWithPredication(&I, VF)) { 6095 ScalarCostsTy ScalarCosts; 6096 // Do not apply discount if scalable, because that would lead to 6097 // invalid scalarization costs. 6098 // Do not apply discount logic if hacked cost is needed 6099 // for emulated masked memrefs. 6100 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) && 6101 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6102 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6103 // Remember that BB will remain after vectorization. 6104 PredicatedBBsAfterVectorization[VF].insert(BB); 6105 } 6106 } 6107 } 6108 6109 int LoopVectorizationCostModel::computePredInstDiscount( 6110 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6111 assert(!isUniformAfterVectorization(PredInst, VF) && 6112 "Instruction marked uniform-after-vectorization will be predicated"); 6113 6114 // Initialize the discount to zero, meaning that the scalar version and the 6115 // vector version cost the same. 6116 InstructionCost Discount = 0; 6117 6118 // Holds instructions to analyze. The instructions we visit are mapped in 6119 // ScalarCosts. Those instructions are the ones that would be scalarized if 6120 // we find that the scalar version costs less. 6121 SmallVector<Instruction *, 8> Worklist; 6122 6123 // Returns true if the given instruction can be scalarized. 6124 auto canBeScalarized = [&](Instruction *I) -> bool { 6125 // We only attempt to scalarize instructions forming a single-use chain 6126 // from the original predicated block that would otherwise be vectorized. 6127 // Although not strictly necessary, we give up on instructions we know will 6128 // already be scalar to avoid traversing chains that are unlikely to be 6129 // beneficial. 6130 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6131 isScalarAfterVectorization(I, VF)) 6132 return false; 6133 6134 // If the instruction is scalar with predication, it will be analyzed 6135 // separately. We ignore it within the context of PredInst. 6136 if (isScalarWithPredication(I, VF)) 6137 return false; 6138 6139 // If any of the instruction's operands are uniform after vectorization, 6140 // the instruction cannot be scalarized. This prevents, for example, a 6141 // masked load from being scalarized. 6142 // 6143 // We assume we will only emit a value for lane zero of an instruction 6144 // marked uniform after vectorization, rather than VF identical values. 6145 // Thus, if we scalarize an instruction that uses a uniform, we would 6146 // create uses of values corresponding to the lanes we aren't emitting code 6147 // for. This behavior can be changed by allowing getScalarValue to clone 6148 // the lane zero values for uniforms rather than asserting. 6149 for (Use &U : I->operands()) 6150 if (auto *J = dyn_cast<Instruction>(U.get())) 6151 if (isUniformAfterVectorization(J, VF)) 6152 return false; 6153 6154 // Otherwise, we can scalarize the instruction. 6155 return true; 6156 }; 6157 6158 // Compute the expected cost discount from scalarizing the entire expression 6159 // feeding the predicated instruction. We currently only consider expressions 6160 // that are single-use instruction chains. 6161 Worklist.push_back(PredInst); 6162 while (!Worklist.empty()) { 6163 Instruction *I = Worklist.pop_back_val(); 6164 6165 // If we've already analyzed the instruction, there's nothing to do. 6166 if (ScalarCosts.find(I) != ScalarCosts.end()) 6167 continue; 6168 6169 // Compute the cost of the vector instruction. Note that this cost already 6170 // includes the scalarization overhead of the predicated instruction. 6171 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6172 6173 // Compute the cost of the scalarized instruction. This cost is the cost of 6174 // the instruction as if it wasn't if-converted and instead remained in the 6175 // predicated block. We will scale this cost by block probability after 6176 // computing the scalarization overhead. 6177 InstructionCost ScalarCost = 6178 VF.getFixedValue() * 6179 getInstructionCost(I, ElementCount::getFixed(1)).first; 6180 6181 // Compute the scalarization overhead of needed insertelement instructions 6182 // and phi nodes. 6183 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) { 6184 ScalarCost += TTI.getScalarizationOverhead( 6185 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6186 APInt::getAllOnes(VF.getFixedValue()), true, false); 6187 ScalarCost += 6188 VF.getFixedValue() * 6189 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6190 } 6191 6192 // Compute the scalarization overhead of needed extractelement 6193 // instructions. For each of the instruction's operands, if the operand can 6194 // be scalarized, add it to the worklist; otherwise, account for the 6195 // overhead. 6196 for (Use &U : I->operands()) 6197 if (auto *J = dyn_cast<Instruction>(U.get())) { 6198 assert(VectorType::isValidElementType(J->getType()) && 6199 "Instruction has non-scalar type"); 6200 if (canBeScalarized(J)) 6201 Worklist.push_back(J); 6202 else if (needsExtract(J, VF)) { 6203 ScalarCost += TTI.getScalarizationOverhead( 6204 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6205 APInt::getAllOnes(VF.getFixedValue()), false, true); 6206 } 6207 } 6208 6209 // Scale the total scalar cost by block probability. 6210 ScalarCost /= getReciprocalPredBlockProb(); 6211 6212 // Compute the discount. A non-negative discount means the vector version 6213 // of the instruction costs more, and scalarizing would be beneficial. 6214 Discount += VectorCost - ScalarCost; 6215 ScalarCosts[I] = ScalarCost; 6216 } 6217 6218 return *Discount.getValue(); 6219 } 6220 6221 LoopVectorizationCostModel::VectorizationCostTy 6222 LoopVectorizationCostModel::expectedCost( 6223 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6224 VectorizationCostTy Cost; 6225 6226 // For each block. 6227 for (BasicBlock *BB : TheLoop->blocks()) { 6228 VectorizationCostTy BlockCost; 6229 6230 // For each instruction in the old loop. 6231 for (Instruction &I : BB->instructionsWithoutDebug()) { 6232 // Skip ignored values. 6233 if (ValuesToIgnore.count(&I) || 6234 (VF.isVector() && VecValuesToIgnore.count(&I))) 6235 continue; 6236 6237 VectorizationCostTy C = getInstructionCost(&I, VF); 6238 6239 // Check if we should override the cost. 6240 if (C.first.isValid() && 6241 ForceTargetInstructionCost.getNumOccurrences() > 0) 6242 C.first = InstructionCost(ForceTargetInstructionCost); 6243 6244 // Keep a list of instructions with invalid costs. 6245 if (Invalid && !C.first.isValid()) 6246 Invalid->emplace_back(&I, VF); 6247 6248 BlockCost.first += C.first; 6249 BlockCost.second |= C.second; 6250 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6251 << " for VF " << VF << " For instruction: " << I 6252 << '\n'); 6253 } 6254 6255 // If we are vectorizing a predicated block, it will have been 6256 // if-converted. This means that the block's instructions (aside from 6257 // stores and instructions that may divide by zero) will now be 6258 // unconditionally executed. For the scalar case, we may not always execute 6259 // the predicated block, if it is an if-else block. Thus, scale the block's 6260 // cost by the probability of executing it. blockNeedsPredication from 6261 // Legal is used so as to not include all blocks in tail folded loops. 6262 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6263 BlockCost.first /= getReciprocalPredBlockProb(); 6264 6265 Cost.first += BlockCost.first; 6266 Cost.second |= BlockCost.second; 6267 } 6268 6269 return Cost; 6270 } 6271 6272 /// Gets Address Access SCEV after verifying that the access pattern 6273 /// is loop invariant except the induction variable dependence. 6274 /// 6275 /// This SCEV can be sent to the Target in order to estimate the address 6276 /// calculation cost. 6277 static const SCEV *getAddressAccessSCEV( 6278 Value *Ptr, 6279 LoopVectorizationLegality *Legal, 6280 PredicatedScalarEvolution &PSE, 6281 const Loop *TheLoop) { 6282 6283 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6284 if (!Gep) 6285 return nullptr; 6286 6287 // We are looking for a gep with all loop invariant indices except for one 6288 // which should be an induction variable. 6289 auto SE = PSE.getSE(); 6290 unsigned NumOperands = Gep->getNumOperands(); 6291 for (unsigned i = 1; i < NumOperands; ++i) { 6292 Value *Opd = Gep->getOperand(i); 6293 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6294 !Legal->isInductionVariable(Opd)) 6295 return nullptr; 6296 } 6297 6298 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6299 return PSE.getSCEV(Ptr); 6300 } 6301 6302 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6303 return Legal->hasStride(I->getOperand(0)) || 6304 Legal->hasStride(I->getOperand(1)); 6305 } 6306 6307 InstructionCost 6308 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6309 ElementCount VF) { 6310 assert(VF.isVector() && 6311 "Scalarization cost of instruction implies vectorization."); 6312 if (VF.isScalable()) 6313 return InstructionCost::getInvalid(); 6314 6315 Type *ValTy = getLoadStoreType(I); 6316 auto SE = PSE.getSE(); 6317 6318 unsigned AS = getLoadStoreAddressSpace(I); 6319 Value *Ptr = getLoadStorePointerOperand(I); 6320 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6321 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost` 6322 // that it is being called from this specific place. 6323 6324 // Figure out whether the access is strided and get the stride value 6325 // if it's known in compile time 6326 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6327 6328 // Get the cost of the scalar memory instruction and address computation. 6329 InstructionCost Cost = 6330 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6331 6332 // Don't pass *I here, since it is scalar but will actually be part of a 6333 // vectorized loop where the user of it is a vectorized instruction. 6334 const Align Alignment = getLoadStoreAlignment(I); 6335 Cost += VF.getKnownMinValue() * 6336 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6337 AS, TTI::TCK_RecipThroughput); 6338 6339 // Get the overhead of the extractelement and insertelement instructions 6340 // we might create due to scalarization. 6341 Cost += getScalarizationOverhead(I, VF); 6342 6343 // If we have a predicated load/store, it will need extra i1 extracts and 6344 // conditional branches, but may not be executed for each vector lane. Scale 6345 // the cost by the probability of executing the predicated block. 6346 if (isPredicatedInst(I, VF)) { 6347 Cost /= getReciprocalPredBlockProb(); 6348 6349 // Add the cost of an i1 extract and a branch 6350 auto *Vec_i1Ty = 6351 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6352 Cost += TTI.getScalarizationOverhead( 6353 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 6354 /*Insert=*/false, /*Extract=*/true); 6355 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6356 6357 if (useEmulatedMaskMemRefHack(I, VF)) 6358 // Artificially setting to a high enough value to practically disable 6359 // vectorization with such operations. 6360 Cost = 3000000; 6361 } 6362 6363 return Cost; 6364 } 6365 6366 InstructionCost 6367 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6368 ElementCount VF) { 6369 Type *ValTy = getLoadStoreType(I); 6370 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6371 Value *Ptr = getLoadStorePointerOperand(I); 6372 unsigned AS = getLoadStoreAddressSpace(I); 6373 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 6374 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6375 6376 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6377 "Stride should be 1 or -1 for consecutive memory access"); 6378 const Align Alignment = getLoadStoreAlignment(I); 6379 InstructionCost Cost = 0; 6380 if (Legal->isMaskRequired(I)) 6381 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6382 CostKind); 6383 else 6384 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6385 CostKind, I); 6386 6387 bool Reverse = ConsecutiveStride < 0; 6388 if (Reverse) 6389 Cost += 6390 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6391 return Cost; 6392 } 6393 6394 InstructionCost 6395 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6396 ElementCount VF) { 6397 assert(Legal->isUniformMemOp(*I)); 6398 6399 Type *ValTy = getLoadStoreType(I); 6400 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6401 const Align Alignment = getLoadStoreAlignment(I); 6402 unsigned AS = getLoadStoreAddressSpace(I); 6403 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6404 if (isa<LoadInst>(I)) { 6405 return TTI.getAddressComputationCost(ValTy) + 6406 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6407 CostKind) + 6408 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6409 } 6410 StoreInst *SI = cast<StoreInst>(I); 6411 6412 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6413 return TTI.getAddressComputationCost(ValTy) + 6414 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6415 CostKind) + 6416 (isLoopInvariantStoreValue 6417 ? 0 6418 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6419 VF.getKnownMinValue() - 1)); 6420 } 6421 6422 InstructionCost 6423 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6424 ElementCount VF) { 6425 Type *ValTy = getLoadStoreType(I); 6426 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6427 const Align Alignment = getLoadStoreAlignment(I); 6428 const Value *Ptr = getLoadStorePointerOperand(I); 6429 6430 return TTI.getAddressComputationCost(VectorTy) + 6431 TTI.getGatherScatterOpCost( 6432 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6433 TargetTransformInfo::TCK_RecipThroughput, I); 6434 } 6435 6436 InstructionCost 6437 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6438 ElementCount VF) { 6439 // TODO: Once we have support for interleaving with scalable vectors 6440 // we can calculate the cost properly here. 6441 if (VF.isScalable()) 6442 return InstructionCost::getInvalid(); 6443 6444 Type *ValTy = getLoadStoreType(I); 6445 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6446 unsigned AS = getLoadStoreAddressSpace(I); 6447 6448 auto Group = getInterleavedAccessGroup(I); 6449 assert(Group && "Fail to get an interleaved access group."); 6450 6451 unsigned InterleaveFactor = Group->getFactor(); 6452 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6453 6454 // Holds the indices of existing members in the interleaved group. 6455 SmallVector<unsigned, 4> Indices; 6456 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 6457 if (Group->getMember(IF)) 6458 Indices.push_back(IF); 6459 6460 // Calculate the cost of the whole interleaved group. 6461 bool UseMaskForGaps = 6462 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 6463 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 6464 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6465 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6466 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6467 6468 if (Group->isReverse()) { 6469 // TODO: Add support for reversed masked interleaved access. 6470 assert(!Legal->isMaskRequired(I) && 6471 "Reverse masked interleaved access not supported."); 6472 Cost += 6473 Group->getNumMembers() * 6474 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6475 } 6476 return Cost; 6477 } 6478 6479 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 6480 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6481 using namespace llvm::PatternMatch; 6482 // Early exit for no inloop reductions 6483 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6484 return None; 6485 auto *VectorTy = cast<VectorType>(Ty); 6486 6487 // We are looking for a pattern of, and finding the minimal acceptable cost: 6488 // reduce(mul(ext(A), ext(B))) or 6489 // reduce(mul(A, B)) or 6490 // reduce(ext(A)) or 6491 // reduce(A). 6492 // The basic idea is that we walk down the tree to do that, finding the root 6493 // reduction instruction in InLoopReductionImmediateChains. From there we find 6494 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6495 // of the components. If the reduction cost is lower then we return it for the 6496 // reduction instruction and 0 for the other instructions in the pattern. If 6497 // it is not we return an invalid cost specifying the orignal cost method 6498 // should be used. 6499 Instruction *RetI = I; 6500 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 6501 if (!RetI->hasOneUser()) 6502 return None; 6503 RetI = RetI->user_back(); 6504 } 6505 if (match(RetI, m_Mul(m_Value(), m_Value())) && 6506 RetI->user_back()->getOpcode() == Instruction::Add) { 6507 if (!RetI->hasOneUser()) 6508 return None; 6509 RetI = RetI->user_back(); 6510 } 6511 6512 // Test if the found instruction is a reduction, and if not return an invalid 6513 // cost specifying the parent to use the original cost modelling. 6514 if (!InLoopReductionImmediateChains.count(RetI)) 6515 return None; 6516 6517 // Find the reduction this chain is a part of and calculate the basic cost of 6518 // the reduction on its own. 6519 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6520 Instruction *ReductionPhi = LastChain; 6521 while (!isa<PHINode>(ReductionPhi)) 6522 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6523 6524 const RecurrenceDescriptor &RdxDesc = 6525 Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second; 6526 6527 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 6528 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 6529 6530 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a 6531 // normal fmul instruction to the cost of the fadd reduction. 6532 if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd) 6533 BaseCost += 6534 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind); 6535 6536 // If we're using ordered reductions then we can just return the base cost 6537 // here, since getArithmeticReductionCost calculates the full ordered 6538 // reduction cost when FP reassociation is not allowed. 6539 if (useOrderedReductions(RdxDesc)) 6540 return BaseCost; 6541 6542 // Get the operand that was not the reduction chain and match it to one of the 6543 // patterns, returning the better cost if it is found. 6544 Instruction *RedOp = RetI->getOperand(1) == LastChain 6545 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6546 : dyn_cast<Instruction>(RetI->getOperand(1)); 6547 6548 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6549 6550 Instruction *Op0, *Op1; 6551 if (RedOp && 6552 match(RedOp, 6553 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 6554 match(Op0, m_ZExtOrSExt(m_Value())) && 6555 Op0->getOpcode() == Op1->getOpcode() && 6556 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6557 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 6558 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 6559 6560 // Matched reduce(ext(mul(ext(A), ext(B))) 6561 // Note that the extend opcodes need to all match, or if A==B they will have 6562 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 6563 // which is equally fine. 6564 bool IsUnsigned = isa<ZExtInst>(Op0); 6565 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6566 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 6567 6568 InstructionCost ExtCost = 6569 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 6570 TTI::CastContextHint::None, CostKind, Op0); 6571 InstructionCost MulCost = 6572 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 6573 InstructionCost Ext2Cost = 6574 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 6575 TTI::CastContextHint::None, CostKind, RedOp); 6576 6577 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6578 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6579 CostKind); 6580 6581 if (RedCost.isValid() && 6582 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 6583 return I == RetI ? RedCost : 0; 6584 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 6585 !TheLoop->isLoopInvariant(RedOp)) { 6586 // Matched reduce(ext(A)) 6587 bool IsUnsigned = isa<ZExtInst>(RedOp); 6588 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6589 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6590 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6591 CostKind); 6592 6593 InstructionCost ExtCost = 6594 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6595 TTI::CastContextHint::None, CostKind, RedOp); 6596 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6597 return I == RetI ? RedCost : 0; 6598 } else if (RedOp && 6599 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 6600 if (match(Op0, m_ZExtOrSExt(m_Value())) && 6601 Op0->getOpcode() == Op1->getOpcode() && 6602 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 6603 bool IsUnsigned = isa<ZExtInst>(Op0); 6604 Type *Op0Ty = Op0->getOperand(0)->getType(); 6605 Type *Op1Ty = Op1->getOperand(0)->getType(); 6606 Type *LargestOpTy = 6607 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty 6608 : Op0Ty; 6609 auto *ExtType = VectorType::get(LargestOpTy, VectorTy); 6610 6611 // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of 6612 // different sizes. We take the largest type as the ext to reduce, and add 6613 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))). 6614 InstructionCost ExtCost0 = TTI.getCastInstrCost( 6615 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy), 6616 TTI::CastContextHint::None, CostKind, Op0); 6617 InstructionCost ExtCost1 = TTI.getCastInstrCost( 6618 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy), 6619 TTI::CastContextHint::None, CostKind, Op1); 6620 InstructionCost MulCost = 6621 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6622 6623 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6624 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6625 CostKind); 6626 InstructionCost ExtraExtCost = 0; 6627 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) { 6628 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1; 6629 ExtraExtCost = TTI.getCastInstrCost( 6630 ExtraExtOp->getOpcode(), ExtType, 6631 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy), 6632 TTI::CastContextHint::None, CostKind, ExtraExtOp); 6633 } 6634 6635 if (RedCost.isValid() && 6636 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost)) 6637 return I == RetI ? RedCost : 0; 6638 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 6639 // Matched reduce(mul()) 6640 InstructionCost MulCost = 6641 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6642 6643 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6644 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 6645 CostKind); 6646 6647 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 6648 return I == RetI ? RedCost : 0; 6649 } 6650 } 6651 6652 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 6653 } 6654 6655 InstructionCost 6656 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6657 ElementCount VF) { 6658 // Calculate scalar cost only. Vectorization cost should be ready at this 6659 // moment. 6660 if (VF.isScalar()) { 6661 Type *ValTy = getLoadStoreType(I); 6662 const Align Alignment = getLoadStoreAlignment(I); 6663 unsigned AS = getLoadStoreAddressSpace(I); 6664 6665 return TTI.getAddressComputationCost(ValTy) + 6666 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 6667 TTI::TCK_RecipThroughput, I); 6668 } 6669 return getWideningCost(I, VF); 6670 } 6671 6672 LoopVectorizationCostModel::VectorizationCostTy 6673 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6674 ElementCount VF) { 6675 // If we know that this instruction will remain uniform, check the cost of 6676 // the scalar version. 6677 if (isUniformAfterVectorization(I, VF)) 6678 VF = ElementCount::getFixed(1); 6679 6680 if (VF.isVector() && isProfitableToScalarize(I, VF)) 6681 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6682 6683 // Forced scalars do not have any scalarization overhead. 6684 auto ForcedScalar = ForcedScalars.find(VF); 6685 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 6686 auto InstSet = ForcedScalar->second; 6687 if (InstSet.count(I)) 6688 return VectorizationCostTy( 6689 (getInstructionCost(I, ElementCount::getFixed(1)).first * 6690 VF.getKnownMinValue()), 6691 false); 6692 } 6693 6694 Type *VectorTy; 6695 InstructionCost C = getInstructionCost(I, VF, VectorTy); 6696 6697 bool TypeNotScalarized = false; 6698 if (VF.isVector() && VectorTy->isVectorTy()) { 6699 if (unsigned NumParts = TTI.getNumberOfParts(VectorTy)) { 6700 if (VF.isScalable()) 6701 // <vscale x 1 x iN> is assumed to be profitable over iN because 6702 // scalable registers are a distinct register class from scalar ones. 6703 // If we ever find a target which wants to lower scalable vectors 6704 // back to scalars, we'll need to update this code to explicitly 6705 // ask TTI about the register class uses for each part. 6706 TypeNotScalarized = NumParts <= VF.getKnownMinValue(); 6707 else 6708 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 6709 } else 6710 C = InstructionCost::getInvalid(); 6711 } 6712 return VectorizationCostTy(C, TypeNotScalarized); 6713 } 6714 6715 InstructionCost 6716 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6717 ElementCount VF) const { 6718 6719 // There is no mechanism yet to create a scalable scalarization loop, 6720 // so this is currently Invalid. 6721 if (VF.isScalable()) 6722 return InstructionCost::getInvalid(); 6723 6724 if (VF.isScalar()) 6725 return 0; 6726 6727 InstructionCost Cost = 0; 6728 Type *RetTy = ToVectorTy(I->getType(), VF); 6729 if (!RetTy->isVoidTy() && 6730 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 6731 Cost += TTI.getScalarizationOverhead( 6732 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 6733 false); 6734 6735 // Some targets keep addresses scalar. 6736 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 6737 return Cost; 6738 6739 // Some targets support efficient element stores. 6740 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 6741 return Cost; 6742 6743 // Collect operands to consider. 6744 CallInst *CI = dyn_cast<CallInst>(I); 6745 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 6746 6747 // Skip operands that do not require extraction/scalarization and do not incur 6748 // any overhead. 6749 SmallVector<Type *> Tys; 6750 for (auto *V : filterExtractingOperands(Ops, VF)) 6751 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 6752 return Cost + TTI.getOperandsScalarizationOverhead( 6753 filterExtractingOperands(Ops, VF), Tys); 6754 } 6755 6756 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 6757 if (VF.isScalar()) 6758 return; 6759 NumPredStores = 0; 6760 for (BasicBlock *BB : TheLoop->blocks()) { 6761 // For each instruction in the old loop. 6762 for (Instruction &I : *BB) { 6763 Value *Ptr = getLoadStorePointerOperand(&I); 6764 if (!Ptr) 6765 continue; 6766 6767 // TODO: We should generate better code and update the cost model for 6768 // predicated uniform stores. Today they are treated as any other 6769 // predicated store (see added test cases in 6770 // invariant-store-vectorization.ll). 6771 if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF)) 6772 NumPredStores++; 6773 6774 if (Legal->isUniformMemOp(I)) { 6775 // TODO: Avoid replicating loads and stores instead of 6776 // relying on instcombine to remove them. 6777 // Load: Scalar load + broadcast 6778 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 6779 InstructionCost Cost; 6780 if (isa<StoreInst>(&I) && VF.isScalable() && 6781 isLegalGatherOrScatter(&I, VF)) { 6782 Cost = getGatherScatterCost(&I, VF); 6783 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 6784 } else { 6785 Cost = getUniformMemOpCost(&I, VF); 6786 setWideningDecision(&I, VF, CM_Scalarize, Cost); 6787 } 6788 continue; 6789 } 6790 6791 // We assume that widening is the best solution when possible. 6792 if (memoryInstructionCanBeWidened(&I, VF)) { 6793 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 6794 int ConsecutiveStride = Legal->isConsecutivePtr( 6795 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 6796 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6797 "Expected consecutive stride."); 6798 InstWidening Decision = 6799 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 6800 setWideningDecision(&I, VF, Decision, Cost); 6801 continue; 6802 } 6803 6804 // Choose between Interleaving, Gather/Scatter or Scalarization. 6805 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 6806 unsigned NumAccesses = 1; 6807 if (isAccessInterleaved(&I)) { 6808 auto Group = getInterleavedAccessGroup(&I); 6809 assert(Group && "Fail to get an interleaved access group."); 6810 6811 // Make one decision for the whole group. 6812 if (getWideningDecision(&I, VF) != CM_Unknown) 6813 continue; 6814 6815 NumAccesses = Group->getNumMembers(); 6816 if (interleavedAccessCanBeWidened(&I, VF)) 6817 InterleaveCost = getInterleaveGroupCost(&I, VF); 6818 } 6819 6820 InstructionCost GatherScatterCost = 6821 isLegalGatherOrScatter(&I, VF) 6822 ? getGatherScatterCost(&I, VF) * NumAccesses 6823 : InstructionCost::getInvalid(); 6824 6825 InstructionCost ScalarizationCost = 6826 getMemInstScalarizationCost(&I, VF) * NumAccesses; 6827 6828 // Choose better solution for the current VF, 6829 // write down this decision and use it during vectorization. 6830 InstructionCost Cost; 6831 InstWidening Decision; 6832 if (InterleaveCost <= GatherScatterCost && 6833 InterleaveCost < ScalarizationCost) { 6834 Decision = CM_Interleave; 6835 Cost = InterleaveCost; 6836 } else if (GatherScatterCost < ScalarizationCost) { 6837 Decision = CM_GatherScatter; 6838 Cost = GatherScatterCost; 6839 } else { 6840 Decision = CM_Scalarize; 6841 Cost = ScalarizationCost; 6842 } 6843 // If the instructions belongs to an interleave group, the whole group 6844 // receives the same decision. The whole group receives the cost, but 6845 // the cost will actually be assigned to one instruction. 6846 if (auto Group = getInterleavedAccessGroup(&I)) 6847 setWideningDecision(Group, VF, Decision, Cost); 6848 else 6849 setWideningDecision(&I, VF, Decision, Cost); 6850 } 6851 } 6852 6853 // Make sure that any load of address and any other address computation 6854 // remains scalar unless there is gather/scatter support. This avoids 6855 // inevitable extracts into address registers, and also has the benefit of 6856 // activating LSR more, since that pass can't optimize vectorized 6857 // addresses. 6858 if (TTI.prefersVectorizedAddressing()) 6859 return; 6860 6861 // Start with all scalar pointer uses. 6862 SmallPtrSet<Instruction *, 8> AddrDefs; 6863 for (BasicBlock *BB : TheLoop->blocks()) 6864 for (Instruction &I : *BB) { 6865 Instruction *PtrDef = 6866 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 6867 if (PtrDef && TheLoop->contains(PtrDef) && 6868 getWideningDecision(&I, VF) != CM_GatherScatter) 6869 AddrDefs.insert(PtrDef); 6870 } 6871 6872 // Add all instructions used to generate the addresses. 6873 SmallVector<Instruction *, 4> Worklist; 6874 append_range(Worklist, AddrDefs); 6875 while (!Worklist.empty()) { 6876 Instruction *I = Worklist.pop_back_val(); 6877 for (auto &Op : I->operands()) 6878 if (auto *InstOp = dyn_cast<Instruction>(Op)) 6879 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 6880 AddrDefs.insert(InstOp).second) 6881 Worklist.push_back(InstOp); 6882 } 6883 6884 for (auto *I : AddrDefs) { 6885 if (isa<LoadInst>(I)) { 6886 // Setting the desired widening decision should ideally be handled in 6887 // by cost functions, but since this involves the task of finding out 6888 // if the loaded register is involved in an address computation, it is 6889 // instead changed here when we know this is the case. 6890 InstWidening Decision = getWideningDecision(I, VF); 6891 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 6892 // Scalarize a widened load of address. 6893 setWideningDecision( 6894 I, VF, CM_Scalarize, 6895 (VF.getKnownMinValue() * 6896 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 6897 else if (auto Group = getInterleavedAccessGroup(I)) { 6898 // Scalarize an interleave group of address loads. 6899 for (unsigned I = 0; I < Group->getFactor(); ++I) { 6900 if (Instruction *Member = Group->getMember(I)) 6901 setWideningDecision( 6902 Member, VF, CM_Scalarize, 6903 (VF.getKnownMinValue() * 6904 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 6905 } 6906 } 6907 } else 6908 // Make sure I gets scalarized and a cost estimate without 6909 // scalarization overhead. 6910 ForcedScalars[VF].insert(I); 6911 } 6912 } 6913 6914 InstructionCost 6915 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 6916 Type *&VectorTy) { 6917 Type *RetTy = I->getType(); 6918 if (canTruncateToMinimalBitwidth(I, VF)) 6919 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6920 auto SE = PSE.getSE(); 6921 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6922 6923 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 6924 ElementCount VF) -> bool { 6925 if (VF.isScalar()) 6926 return true; 6927 6928 auto Scalarized = InstsToScalarize.find(VF); 6929 assert(Scalarized != InstsToScalarize.end() && 6930 "VF not yet analyzed for scalarization profitability"); 6931 return !Scalarized->second.count(I) && 6932 llvm::all_of(I->users(), [&](User *U) { 6933 auto *UI = cast<Instruction>(U); 6934 return !Scalarized->second.count(UI); 6935 }); 6936 }; 6937 (void) hasSingleCopyAfterVectorization; 6938 6939 if (isScalarAfterVectorization(I, VF)) { 6940 // With the exception of GEPs and PHIs, after scalarization there should 6941 // only be one copy of the instruction generated in the loop. This is 6942 // because the VF is either 1, or any instructions that need scalarizing 6943 // have already been dealt with by the the time we get here. As a result, 6944 // it means we don't have to multiply the instruction cost by VF. 6945 assert(I->getOpcode() == Instruction::GetElementPtr || 6946 I->getOpcode() == Instruction::PHI || 6947 (I->getOpcode() == Instruction::BitCast && 6948 I->getType()->isPointerTy()) || 6949 hasSingleCopyAfterVectorization(I, VF)); 6950 VectorTy = RetTy; 6951 } else 6952 VectorTy = ToVectorTy(RetTy, VF); 6953 6954 // TODO: We need to estimate the cost of intrinsic calls. 6955 switch (I->getOpcode()) { 6956 case Instruction::GetElementPtr: 6957 // We mark this instruction as zero-cost because the cost of GEPs in 6958 // vectorized code depends on whether the corresponding memory instruction 6959 // is scalarized or not. Therefore, we handle GEPs with the memory 6960 // instruction cost. 6961 return 0; 6962 case Instruction::Br: { 6963 // In cases of scalarized and predicated instructions, there will be VF 6964 // predicated blocks in the vectorized loop. Each branch around these 6965 // blocks requires also an extract of its vector compare i1 element. 6966 bool ScalarPredicatedBB = false; 6967 BranchInst *BI = cast<BranchInst>(I); 6968 if (VF.isVector() && BI->isConditional() && 6969 (PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(0)) || 6970 PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(1)))) 6971 ScalarPredicatedBB = true; 6972 6973 if (ScalarPredicatedBB) { 6974 // Not possible to scalarize scalable vector with predicated instructions. 6975 if (VF.isScalable()) 6976 return InstructionCost::getInvalid(); 6977 // Return cost for branches around scalarized and predicated blocks. 6978 auto *Vec_i1Ty = 6979 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 6980 return ( 6981 TTI.getScalarizationOverhead( 6982 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 6983 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 6984 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 6985 // The back-edge branch will remain, as will all scalar branches. 6986 return TTI.getCFInstrCost(Instruction::Br, CostKind); 6987 else 6988 // This branch will be eliminated by if-conversion. 6989 return 0; 6990 // Note: We currently assume zero cost for an unconditional branch inside 6991 // a predicated block since it will become a fall-through, although we 6992 // may decide in the future to call TTI for all branches. 6993 } 6994 case Instruction::PHI: { 6995 auto *Phi = cast<PHINode>(I); 6996 6997 // First-order recurrences are replaced by vector shuffles inside the loop. 6998 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 6999 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7000 return TTI.getShuffleCost( 7001 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7002 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7003 7004 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7005 // converted into select instructions. We require N - 1 selects per phi 7006 // node, where N is the number of incoming values. 7007 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7008 return (Phi->getNumIncomingValues() - 1) * 7009 TTI.getCmpSelInstrCost( 7010 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7011 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7012 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7013 7014 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7015 } 7016 case Instruction::UDiv: 7017 case Instruction::SDiv: 7018 case Instruction::URem: 7019 case Instruction::SRem: 7020 // If we have a predicated instruction, it may not be executed for each 7021 // vector lane. Get the scalarization cost and scale this amount by the 7022 // probability of executing the predicated block. If the instruction is not 7023 // predicated, we fall through to the next case. 7024 if (VF.isVector() && isScalarWithPredication(I, VF)) { 7025 InstructionCost Cost = 0; 7026 7027 // These instructions have a non-void type, so account for the phi nodes 7028 // that we will create. This cost is likely to be zero. The phi node 7029 // cost, if any, should be scaled by the block probability because it 7030 // models a copy at the end of each predicated block. 7031 Cost += VF.getKnownMinValue() * 7032 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7033 7034 // The cost of the non-predicated instruction. 7035 Cost += VF.getKnownMinValue() * 7036 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7037 7038 // The cost of insertelement and extractelement instructions needed for 7039 // scalarization. 7040 Cost += getScalarizationOverhead(I, VF); 7041 7042 // Scale the cost by the probability of executing the predicated blocks. 7043 // This assumes the predicated block for each vector lane is equally 7044 // likely. 7045 return Cost / getReciprocalPredBlockProb(); 7046 } 7047 LLVM_FALLTHROUGH; 7048 case Instruction::Add: 7049 case Instruction::FAdd: 7050 case Instruction::Sub: 7051 case Instruction::FSub: 7052 case Instruction::Mul: 7053 case Instruction::FMul: 7054 case Instruction::FDiv: 7055 case Instruction::FRem: 7056 case Instruction::Shl: 7057 case Instruction::LShr: 7058 case Instruction::AShr: 7059 case Instruction::And: 7060 case Instruction::Or: 7061 case Instruction::Xor: { 7062 // Since we will replace the stride by 1 the multiplication should go away. 7063 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7064 return 0; 7065 7066 // Detect reduction patterns 7067 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7068 return *RedCost; 7069 7070 // Certain instructions can be cheaper to vectorize if they have a constant 7071 // second vector operand. One example of this are shifts on x86. 7072 Value *Op2 = I->getOperand(1); 7073 TargetTransformInfo::OperandValueProperties Op2VP; 7074 TargetTransformInfo::OperandValueKind Op2VK = 7075 TTI.getOperandInfo(Op2, Op2VP); 7076 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7077 Op2VK = TargetTransformInfo::OK_UniformValue; 7078 7079 SmallVector<const Value *, 4> Operands(I->operand_values()); 7080 return TTI.getArithmeticInstrCost( 7081 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7082 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7083 } 7084 case Instruction::FNeg: { 7085 return TTI.getArithmeticInstrCost( 7086 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7087 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7088 TargetTransformInfo::OP_None, I->getOperand(0), I); 7089 } 7090 case Instruction::Select: { 7091 SelectInst *SI = cast<SelectInst>(I); 7092 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7093 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7094 7095 const Value *Op0, *Op1; 7096 using namespace llvm::PatternMatch; 7097 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7098 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7099 // select x, y, false --> x & y 7100 // select x, true, y --> x | y 7101 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7102 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7103 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7104 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7105 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7106 Op1->getType()->getScalarSizeInBits() == 1); 7107 7108 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7109 return TTI.getArithmeticInstrCost( 7110 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7111 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7112 } 7113 7114 Type *CondTy = SI->getCondition()->getType(); 7115 if (!ScalarCond) 7116 CondTy = VectorType::get(CondTy, VF); 7117 7118 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 7119 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition())) 7120 Pred = Cmp->getPredicate(); 7121 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred, 7122 CostKind, I); 7123 } 7124 case Instruction::ICmp: 7125 case Instruction::FCmp: { 7126 Type *ValTy = I->getOperand(0)->getType(); 7127 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7128 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7129 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7130 VectorTy = ToVectorTy(ValTy, VF); 7131 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7132 cast<CmpInst>(I)->getPredicate(), CostKind, 7133 I); 7134 } 7135 case Instruction::Store: 7136 case Instruction::Load: { 7137 ElementCount Width = VF; 7138 if (Width.isVector()) { 7139 InstWidening Decision = getWideningDecision(I, Width); 7140 assert(Decision != CM_Unknown && 7141 "CM decision should be taken at this point"); 7142 if (Decision == CM_Scalarize) { 7143 if (VF.isScalable() && isa<StoreInst>(I)) 7144 // We can't scalarize a scalable vector store (even a uniform one 7145 // currently), return an invalid cost so as to prevent vectorization. 7146 return InstructionCost::getInvalid(); 7147 Width = ElementCount::getFixed(1); 7148 } 7149 } 7150 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7151 return getMemoryInstructionCost(I, VF); 7152 } 7153 case Instruction::BitCast: 7154 if (I->getType()->isPointerTy()) 7155 return 0; 7156 LLVM_FALLTHROUGH; 7157 case Instruction::ZExt: 7158 case Instruction::SExt: 7159 case Instruction::FPToUI: 7160 case Instruction::FPToSI: 7161 case Instruction::FPExt: 7162 case Instruction::PtrToInt: 7163 case Instruction::IntToPtr: 7164 case Instruction::SIToFP: 7165 case Instruction::UIToFP: 7166 case Instruction::Trunc: 7167 case Instruction::FPTrunc: { 7168 // Computes the CastContextHint from a Load/Store instruction. 7169 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7170 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7171 "Expected a load or a store!"); 7172 7173 if (VF.isScalar() || !TheLoop->contains(I)) 7174 return TTI::CastContextHint::Normal; 7175 7176 switch (getWideningDecision(I, VF)) { 7177 case LoopVectorizationCostModel::CM_GatherScatter: 7178 return TTI::CastContextHint::GatherScatter; 7179 case LoopVectorizationCostModel::CM_Interleave: 7180 return TTI::CastContextHint::Interleave; 7181 case LoopVectorizationCostModel::CM_Scalarize: 7182 case LoopVectorizationCostModel::CM_Widen: 7183 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7184 : TTI::CastContextHint::Normal; 7185 case LoopVectorizationCostModel::CM_Widen_Reverse: 7186 return TTI::CastContextHint::Reversed; 7187 case LoopVectorizationCostModel::CM_Unknown: 7188 llvm_unreachable("Instr did not go through cost modelling?"); 7189 } 7190 7191 llvm_unreachable("Unhandled case!"); 7192 }; 7193 7194 unsigned Opcode = I->getOpcode(); 7195 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7196 // For Trunc, the context is the only user, which must be a StoreInst. 7197 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7198 if (I->hasOneUse()) 7199 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7200 CCH = ComputeCCH(Store); 7201 } 7202 // For Z/Sext, the context is the operand, which must be a LoadInst. 7203 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7204 Opcode == Instruction::FPExt) { 7205 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7206 CCH = ComputeCCH(Load); 7207 } 7208 7209 // We optimize the truncation of induction variables having constant 7210 // integer steps. The cost of these truncations is the same as the scalar 7211 // operation. 7212 if (isOptimizableIVTruncate(I, VF)) { 7213 auto *Trunc = cast<TruncInst>(I); 7214 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7215 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7216 } 7217 7218 // Detect reduction patterns 7219 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7220 return *RedCost; 7221 7222 Type *SrcScalarTy = I->getOperand(0)->getType(); 7223 Type *SrcVecTy = 7224 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7225 if (canTruncateToMinimalBitwidth(I, VF)) { 7226 // This cast is going to be shrunk. This may remove the cast or it might 7227 // turn it into slightly different cast. For example, if MinBW == 16, 7228 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7229 // 7230 // Calculate the modified src and dest types. 7231 Type *MinVecTy = VectorTy; 7232 if (Opcode == Instruction::Trunc) { 7233 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7234 VectorTy = 7235 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7236 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7237 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7238 VectorTy = 7239 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7240 } 7241 } 7242 7243 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7244 } 7245 case Instruction::Call: { 7246 if (RecurrenceDescriptor::isFMulAddIntrinsic(I)) 7247 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7248 return *RedCost; 7249 bool NeedToScalarize; 7250 CallInst *CI = cast<CallInst>(I); 7251 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7252 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7253 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7254 return std::min(CallCost, IntrinsicCost); 7255 } 7256 return CallCost; 7257 } 7258 case Instruction::ExtractValue: 7259 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7260 case Instruction::Alloca: 7261 // We cannot easily widen alloca to a scalable alloca, as 7262 // the result would need to be a vector of pointers. 7263 if (VF.isScalable()) 7264 return InstructionCost::getInvalid(); 7265 LLVM_FALLTHROUGH; 7266 default: 7267 // This opcode is unknown. Assume that it is the same as 'mul'. 7268 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7269 } // end of switch. 7270 } 7271 7272 char LoopVectorize::ID = 0; 7273 7274 static const char lv_name[] = "Loop Vectorization"; 7275 7276 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7277 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7278 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7279 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7280 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7281 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7282 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7283 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7284 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7285 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7286 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7287 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7288 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7289 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7290 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7291 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7292 7293 namespace llvm { 7294 7295 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7296 7297 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7298 bool VectorizeOnlyWhenForced) { 7299 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7300 } 7301 7302 } // end namespace llvm 7303 7304 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7305 // Check if the pointer operand of a load or store instruction is 7306 // consecutive. 7307 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7308 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 7309 return false; 7310 } 7311 7312 void LoopVectorizationCostModel::collectValuesToIgnore() { 7313 // Ignore ephemeral values. 7314 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7315 7316 // Find all stores to invariant variables. Since they are going to sink 7317 // outside the loop we do not need calculate cost for them. 7318 for (BasicBlock *BB : TheLoop->blocks()) 7319 for (Instruction &I : *BB) { 7320 StoreInst *SI; 7321 if ((SI = dyn_cast<StoreInst>(&I)) && 7322 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) 7323 ValuesToIgnore.insert(&I); 7324 } 7325 7326 // Ignore type-promoting instructions we identified during reduction 7327 // detection. 7328 for (auto &Reduction : Legal->getReductionVars()) { 7329 const RecurrenceDescriptor &RedDes = Reduction.second; 7330 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7331 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7332 } 7333 // Ignore type-casting instructions we identified during induction 7334 // detection. 7335 for (auto &Induction : Legal->getInductionVars()) { 7336 const InductionDescriptor &IndDes = Induction.second; 7337 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7338 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7339 } 7340 } 7341 7342 void LoopVectorizationCostModel::collectInLoopReductions() { 7343 for (auto &Reduction : Legal->getReductionVars()) { 7344 PHINode *Phi = Reduction.first; 7345 const RecurrenceDescriptor &RdxDesc = Reduction.second; 7346 7347 // We don't collect reductions that are type promoted (yet). 7348 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7349 continue; 7350 7351 // If the target would prefer this reduction to happen "in-loop", then we 7352 // want to record it as such. 7353 unsigned Opcode = RdxDesc.getOpcode(); 7354 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7355 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7356 TargetTransformInfo::ReductionFlags())) 7357 continue; 7358 7359 // Check that we can correctly put the reductions into the loop, by 7360 // finding the chain of operations that leads from the phi to the loop 7361 // exit value. 7362 SmallVector<Instruction *, 4> ReductionOperations = 7363 RdxDesc.getReductionOpChain(Phi, TheLoop); 7364 bool InLoop = !ReductionOperations.empty(); 7365 if (InLoop) { 7366 InLoopReductionChains[Phi] = ReductionOperations; 7367 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7368 Instruction *LastChain = Phi; 7369 for (auto *I : ReductionOperations) { 7370 InLoopReductionImmediateChains[I] = LastChain; 7371 LastChain = I; 7372 } 7373 } 7374 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7375 << " reduction for phi: " << *Phi << "\n"); 7376 } 7377 } 7378 7379 // TODO: we could return a pair of values that specify the max VF and 7380 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7381 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7382 // doesn't have a cost model that can choose which plan to execute if 7383 // more than one is generated. 7384 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7385 LoopVectorizationCostModel &CM) { 7386 unsigned WidestType; 7387 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7388 return WidestVectorRegBits / WidestType; 7389 } 7390 7391 VectorizationFactor 7392 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7393 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7394 ElementCount VF = UserVF; 7395 // Outer loop handling: They may require CFG and instruction level 7396 // transformations before even evaluating whether vectorization is profitable. 7397 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7398 // the vectorization pipeline. 7399 if (!OrigLoop->isInnermost()) { 7400 // If the user doesn't provide a vectorization factor, determine a 7401 // reasonable one. 7402 if (UserVF.isZero()) { 7403 VF = ElementCount::getFixed(determineVPlanVF( 7404 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7405 .getFixedSize(), 7406 CM)); 7407 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7408 7409 // Make sure we have a VF > 1 for stress testing. 7410 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7411 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7412 << "overriding computed VF.\n"); 7413 VF = ElementCount::getFixed(4); 7414 } 7415 } 7416 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7417 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7418 "VF needs to be a power of two"); 7419 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7420 << "VF " << VF << " to build VPlans.\n"); 7421 buildVPlans(VF, VF); 7422 7423 // For VPlan build stress testing, we bail out after VPlan construction. 7424 if (VPlanBuildStressTest) 7425 return VectorizationFactor::Disabled(); 7426 7427 return {VF, 0 /*Cost*/, 0 /* ScalarCost */}; 7428 } 7429 7430 LLVM_DEBUG( 7431 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7432 "VPlan-native path.\n"); 7433 return VectorizationFactor::Disabled(); 7434 } 7435 7436 Optional<VectorizationFactor> 7437 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7438 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7439 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 7440 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 7441 return None; 7442 7443 // Invalidate interleave groups if all blocks of loop will be predicated. 7444 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 7445 !useMaskedInterleavedAccesses(*TTI)) { 7446 LLVM_DEBUG( 7447 dbgs() 7448 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7449 "which requires masked-interleaved support.\n"); 7450 if (CM.InterleaveInfo.invalidateGroups()) 7451 // Invalidating interleave groups also requires invalidating all decisions 7452 // based on them, which includes widening decisions and uniform and scalar 7453 // values. 7454 CM.invalidateCostModelingDecisions(); 7455 } 7456 7457 ElementCount MaxUserVF = 7458 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 7459 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 7460 if (!UserVF.isZero() && UserVFIsLegal) { 7461 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 7462 "VF needs to be a power of two"); 7463 // Collect the instructions (and their associated costs) that will be more 7464 // profitable to scalarize. 7465 if (CM.selectUserVectorizationFactor(UserVF)) { 7466 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7467 CM.collectInLoopReductions(); 7468 buildVPlansWithVPRecipes(UserVF, UserVF); 7469 LLVM_DEBUG(printPlans(dbgs())); 7470 return {{UserVF, 0, 0}}; 7471 } else 7472 reportVectorizationInfo("UserVF ignored because of invalid costs.", 7473 "InvalidCost", ORE, OrigLoop); 7474 } 7475 7476 // Populate the set of Vectorization Factor Candidates. 7477 ElementCountSet VFCandidates; 7478 for (auto VF = ElementCount::getFixed(1); 7479 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 7480 VFCandidates.insert(VF); 7481 for (auto VF = ElementCount::getScalable(1); 7482 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 7483 VFCandidates.insert(VF); 7484 7485 for (const auto &VF : VFCandidates) { 7486 // Collect Uniform and Scalar instructions after vectorization with VF. 7487 CM.collectUniformsAndScalars(VF); 7488 7489 // Collect the instructions (and their associated costs) that will be more 7490 // profitable to scalarize. 7491 if (VF.isVector()) 7492 CM.collectInstsToScalarize(VF); 7493 } 7494 7495 CM.collectInLoopReductions(); 7496 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 7497 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 7498 7499 LLVM_DEBUG(printPlans(dbgs())); 7500 if (!MaxFactors.hasVector()) 7501 return VectorizationFactor::Disabled(); 7502 7503 // Select the optimal vectorization factor. 7504 VectorizationFactor VF = CM.selectVectorizationFactor(VFCandidates); 7505 assert((VF.Width.isScalar() || VF.ScalarCost > 0) && "when vectorizing, the scalar cost must be non-zero."); 7506 return VF; 7507 } 7508 7509 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 7510 assert(count_if(VPlans, 7511 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 7512 1 && 7513 "Best VF has not a single VPlan."); 7514 7515 for (const VPlanPtr &Plan : VPlans) { 7516 if (Plan->hasVF(VF)) 7517 return *Plan.get(); 7518 } 7519 llvm_unreachable("No plan found!"); 7520 } 7521 7522 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7523 SmallVector<Metadata *, 4> MDs; 7524 // Reserve first location for self reference to the LoopID metadata node. 7525 MDs.push_back(nullptr); 7526 bool IsUnrollMetadata = false; 7527 MDNode *LoopID = L->getLoopID(); 7528 if (LoopID) { 7529 // First find existing loop unrolling disable metadata. 7530 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7531 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7532 if (MD) { 7533 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7534 IsUnrollMetadata = 7535 S && S->getString().startswith("llvm.loop.unroll.disable"); 7536 } 7537 MDs.push_back(LoopID->getOperand(i)); 7538 } 7539 } 7540 7541 if (!IsUnrollMetadata) { 7542 // Add runtime unroll disable metadata. 7543 LLVMContext &Context = L->getHeader()->getContext(); 7544 SmallVector<Metadata *, 1> DisableOperands; 7545 DisableOperands.push_back( 7546 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7547 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7548 MDs.push_back(DisableNode); 7549 MDNode *NewLoopID = MDNode::get(Context, MDs); 7550 // Set operand 0 to refer to the loop id itself. 7551 NewLoopID->replaceOperandWith(0, NewLoopID); 7552 L->setLoopID(NewLoopID); 7553 } 7554 } 7555 7556 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 7557 VPlan &BestVPlan, 7558 InnerLoopVectorizer &ILV, 7559 DominatorTree *DT, 7560 bool IsEpilogueVectorization) { 7561 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 7562 << '\n'); 7563 7564 // Perform the actual loop transformation. 7565 7566 // 1. Set up the skeleton for vectorization, including vector pre-header and 7567 // middle block. The vector loop is created during VPlan execution. 7568 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 7569 Value *CanonicalIVStartValue; 7570 std::tie(State.CFG.PrevBB, CanonicalIVStartValue) = 7571 ILV.createVectorizedLoopSkeleton(); 7572 7573 // Only use noalias metadata when using memory checks guaranteeing no overlap 7574 // across all iterations. 7575 const LoopAccessInfo *LAI = ILV.Legal->getLAI(); 7576 if (LAI && !LAI->getRuntimePointerChecking()->getChecks().empty() && 7577 !LAI->getRuntimePointerChecking()->getDiffChecks()) { 7578 7579 // We currently don't use LoopVersioning for the actual loop cloning but we 7580 // still use it to add the noalias metadata. 7581 // TODO: Find a better way to re-use LoopVersioning functionality to add 7582 // metadata. 7583 State.LVer = std::make_unique<LoopVersioning>( 7584 *LAI, LAI->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, DT, 7585 PSE.getSE()); 7586 State.LVer->prepareNoAliasMetadata(); 7587 } 7588 7589 ILV.collectPoisonGeneratingRecipes(State); 7590 7591 ILV.printDebugTracesAtStart(); 7592 7593 //===------------------------------------------------===// 7594 // 7595 // Notice: any optimization or new instruction that go 7596 // into the code below should also be implemented in 7597 // the cost-model. 7598 // 7599 //===------------------------------------------------===// 7600 7601 // 2. Copy and widen instructions from the old loop into the new loop. 7602 BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr), 7603 ILV.getOrCreateVectorTripCount(nullptr), 7604 CanonicalIVStartValue, State, 7605 IsEpilogueVectorization); 7606 7607 BestVPlan.execute(&State); 7608 7609 // Keep all loop hints from the original loop on the vector loop (we'll 7610 // replace the vectorizer-specific hints below). 7611 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7612 7613 Optional<MDNode *> VectorizedLoopID = 7614 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7615 LLVMLoopVectorizeFollowupVectorized}); 7616 7617 VPBasicBlock *HeaderVPBB = 7618 BestVPlan.getVectorLoopRegion()->getEntryBasicBlock(); 7619 Loop *L = LI->getLoopFor(State.CFG.VPBB2IRBB[HeaderVPBB]); 7620 if (VectorizedLoopID) 7621 L->setLoopID(VectorizedLoopID.value()); 7622 else { 7623 // Keep all loop hints from the original loop on the vector loop (we'll 7624 // replace the vectorizer-specific hints below). 7625 if (MDNode *LID = OrigLoop->getLoopID()) 7626 L->setLoopID(LID); 7627 7628 LoopVectorizeHints Hints(L, true, *ORE); 7629 Hints.setAlreadyVectorized(); 7630 } 7631 // Disable runtime unrolling when vectorizing the epilogue loop. 7632 if (CanonicalIVStartValue) 7633 AddRuntimeUnrollDisableMetaData(L); 7634 7635 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7636 // predication, updating analyses. 7637 ILV.fixVectorizedLoop(State, BestVPlan); 7638 7639 ILV.printDebugTracesAtEnd(); 7640 } 7641 7642 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 7643 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 7644 for (const auto &Plan : VPlans) 7645 if (PrintVPlansInDotFormat) 7646 Plan->printDOT(O); 7647 else 7648 Plan->print(O); 7649 } 7650 #endif 7651 7652 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7653 7654 //===--------------------------------------------------------------------===// 7655 // EpilogueVectorizerMainLoop 7656 //===--------------------------------------------------------------------===// 7657 7658 /// This function is partially responsible for generating the control flow 7659 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7660 std::pair<BasicBlock *, Value *> 7661 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 7662 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7663 7664 // Workaround! Compute the trip count of the original loop and cache it 7665 // before we start modifying the CFG. This code has a systemic problem 7666 // wherein it tries to run analysis over partially constructed IR; this is 7667 // wrong, and not simply for SCEV. The trip count of the original loop 7668 // simply happens to be prone to hitting this in practice. In theory, we 7669 // can hit the same issue for any SCEV, or ValueTracking query done during 7670 // mutation. See PR49900. 7671 getOrCreateTripCount(OrigLoop->getLoopPreheader()); 7672 createVectorLoopSkeleton(""); 7673 7674 // Generate the code to check the minimum iteration count of the vector 7675 // epilogue (see below). 7676 EPI.EpilogueIterationCountCheck = 7677 emitIterationCountCheck(LoopScalarPreHeader, true); 7678 EPI.EpilogueIterationCountCheck->setName("iter.check"); 7679 7680 // Generate the code to check any assumptions that we've made for SCEV 7681 // expressions. 7682 EPI.SCEVSafetyCheck = emitSCEVChecks(LoopScalarPreHeader); 7683 7684 // Generate the code that checks at runtime if arrays overlap. We put the 7685 // checks into a separate block to make the more common case of few elements 7686 // faster. 7687 EPI.MemSafetyCheck = emitMemRuntimeChecks(LoopScalarPreHeader); 7688 7689 // Generate the iteration count check for the main loop, *after* the check 7690 // for the epilogue loop, so that the path-length is shorter for the case 7691 // that goes directly through the vector epilogue. The longer-path length for 7692 // the main loop is compensated for, by the gain from vectorizing the larger 7693 // trip count. Note: the branch will get updated later on when we vectorize 7694 // the epilogue. 7695 EPI.MainLoopIterationCountCheck = 7696 emitIterationCountCheck(LoopScalarPreHeader, false); 7697 7698 // Generate the induction variable. 7699 EPI.VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 7700 7701 // Skip induction resume value creation here because they will be created in 7702 // the second pass. If we created them here, they wouldn't be used anyway, 7703 // because the vplan in the second pass still contains the inductions from the 7704 // original loop. 7705 7706 return {completeLoopSkeleton(OrigLoopID), nullptr}; 7707 } 7708 7709 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 7710 LLVM_DEBUG({ 7711 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 7712 << "Main Loop VF:" << EPI.MainLoopVF 7713 << ", Main Loop UF:" << EPI.MainLoopUF 7714 << ", Epilogue Loop VF:" << EPI.EpilogueVF 7715 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7716 }); 7717 } 7718 7719 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 7720 DEBUG_WITH_TYPE(VerboseDebug, { 7721 dbgs() << "intermediate fn:\n" 7722 << *OrigLoop->getHeader()->getParent() << "\n"; 7723 }); 7724 } 7725 7726 BasicBlock * 7727 EpilogueVectorizerMainLoop::emitIterationCountCheck(BasicBlock *Bypass, 7728 bool ForEpilogue) { 7729 assert(Bypass && "Expected valid bypass basic block."); 7730 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 7731 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 7732 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 7733 // Reuse existing vector loop preheader for TC checks. 7734 // Note that new preheader block is generated for vector loop. 7735 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 7736 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 7737 7738 // Generate code to check if the loop's trip count is less than VF * UF of the 7739 // main vector loop. 7740 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 7741 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7742 7743 Value *CheckMinIters = Builder.CreateICmp( 7744 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 7745 "min.iters.check"); 7746 7747 if (!ForEpilogue) 7748 TCCheckBlock->setName("vector.main.loop.iter.check"); 7749 7750 // Create new preheader for vector loop. 7751 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 7752 DT, LI, nullptr, "vector.ph"); 7753 7754 if (ForEpilogue) { 7755 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 7756 DT->getNode(Bypass)->getIDom()) && 7757 "TC check is expected to dominate Bypass"); 7758 7759 // Update dominator for Bypass & LoopExit. 7760 DT->changeImmediateDominator(Bypass, TCCheckBlock); 7761 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7762 // For loops with multiple exits, there's no edge from the middle block 7763 // to exit blocks (as the epilogue must run) and thus no need to update 7764 // the immediate dominator of the exit blocks. 7765 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 7766 7767 LoopBypassBlocks.push_back(TCCheckBlock); 7768 7769 // Save the trip count so we don't have to regenerate it in the 7770 // vec.epilog.iter.check. This is safe to do because the trip count 7771 // generated here dominates the vector epilog iter check. 7772 EPI.TripCount = Count; 7773 } 7774 7775 ReplaceInstWithInst( 7776 TCCheckBlock->getTerminator(), 7777 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7778 7779 return TCCheckBlock; 7780 } 7781 7782 //===--------------------------------------------------------------------===// 7783 // EpilogueVectorizerEpilogueLoop 7784 //===--------------------------------------------------------------------===// 7785 7786 /// This function is partially responsible for generating the control flow 7787 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7788 std::pair<BasicBlock *, Value *> 7789 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 7790 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7791 createVectorLoopSkeleton("vec.epilog."); 7792 7793 // Now, compare the remaining count and if there aren't enough iterations to 7794 // execute the vectorized epilogue skip to the scalar part. 7795 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 7796 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 7797 LoopVectorPreHeader = 7798 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 7799 LI, nullptr, "vec.epilog.ph"); 7800 emitMinimumVectorEpilogueIterCountCheck(LoopScalarPreHeader, 7801 VecEpilogueIterationCountCheck); 7802 7803 // Adjust the control flow taking the state info from the main loop 7804 // vectorization into account. 7805 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 7806 "expected this to be saved from the previous pass."); 7807 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 7808 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 7809 7810 DT->changeImmediateDominator(LoopVectorPreHeader, 7811 EPI.MainLoopIterationCountCheck); 7812 7813 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 7814 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7815 7816 if (EPI.SCEVSafetyCheck) 7817 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 7818 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7819 if (EPI.MemSafetyCheck) 7820 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 7821 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7822 7823 DT->changeImmediateDominator( 7824 VecEpilogueIterationCountCheck, 7825 VecEpilogueIterationCountCheck->getSinglePredecessor()); 7826 7827 DT->changeImmediateDominator(LoopScalarPreHeader, 7828 EPI.EpilogueIterationCountCheck); 7829 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7830 // If there is an epilogue which must run, there's no edge from the 7831 // middle block to exit blocks and thus no need to update the immediate 7832 // dominator of the exit blocks. 7833 DT->changeImmediateDominator(LoopExitBlock, 7834 EPI.EpilogueIterationCountCheck); 7835 7836 // Keep track of bypass blocks, as they feed start values to the induction 7837 // phis in the scalar loop preheader. 7838 if (EPI.SCEVSafetyCheck) 7839 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 7840 if (EPI.MemSafetyCheck) 7841 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 7842 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 7843 7844 // The vec.epilog.iter.check block may contain Phi nodes from reductions which 7845 // merge control-flow from the latch block and the middle block. Update the 7846 // incoming values here and move the Phi into the preheader. 7847 SmallVector<PHINode *, 4> PhisInBlock; 7848 for (PHINode &Phi : VecEpilogueIterationCountCheck->phis()) 7849 PhisInBlock.push_back(&Phi); 7850 7851 for (PHINode *Phi : PhisInBlock) { 7852 Phi->replaceIncomingBlockWith( 7853 VecEpilogueIterationCountCheck->getSinglePredecessor(), 7854 VecEpilogueIterationCountCheck); 7855 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck); 7856 if (EPI.SCEVSafetyCheck) 7857 Phi->removeIncomingValue(EPI.SCEVSafetyCheck); 7858 if (EPI.MemSafetyCheck) 7859 Phi->removeIncomingValue(EPI.MemSafetyCheck); 7860 Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI()); 7861 } 7862 7863 // Generate a resume induction for the vector epilogue and put it in the 7864 // vector epilogue preheader 7865 Type *IdxTy = Legal->getWidestInductionType(); 7866 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 7867 LoopVectorPreHeader->getFirstNonPHI()); 7868 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 7869 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 7870 EPI.MainLoopIterationCountCheck); 7871 7872 // Generate induction resume values. These variables save the new starting 7873 // indexes for the scalar loop. They are used to test if there are any tail 7874 // iterations left once the vector loop has completed. 7875 // Note that when the vectorized epilogue is skipped due to iteration count 7876 // check, then the resume value for the induction variable comes from 7877 // the trip count of the main vector loop, hence passing the AdditionalBypass 7878 // argument. 7879 createInductionResumeValues({VecEpilogueIterationCountCheck, 7880 EPI.VectorTripCount} /* AdditionalBypass */); 7881 7882 return {completeLoopSkeleton(OrigLoopID), EPResumeVal}; 7883 } 7884 7885 BasicBlock * 7886 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 7887 BasicBlock *Bypass, BasicBlock *Insert) { 7888 7889 assert(EPI.TripCount && 7890 "Expected trip count to have been safed in the first pass."); 7891 assert( 7892 (!isa<Instruction>(EPI.TripCount) || 7893 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 7894 "saved trip count does not dominate insertion point."); 7895 Value *TC = EPI.TripCount; 7896 IRBuilder<> Builder(Insert->getTerminator()); 7897 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 7898 7899 // Generate code to check if the loop's trip count is less than VF * UF of the 7900 // vector epilogue loop. 7901 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 7902 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7903 7904 Value *CheckMinIters = 7905 Builder.CreateICmp(P, Count, 7906 createStepForVF(Builder, Count->getType(), 7907 EPI.EpilogueVF, EPI.EpilogueUF), 7908 "min.epilog.iters.check"); 7909 7910 ReplaceInstWithInst( 7911 Insert->getTerminator(), 7912 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7913 7914 LoopBypassBlocks.push_back(Insert); 7915 return Insert; 7916 } 7917 7918 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 7919 LLVM_DEBUG({ 7920 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 7921 << "Epilogue Loop VF:" << EPI.EpilogueVF 7922 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7923 }); 7924 } 7925 7926 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 7927 DEBUG_WITH_TYPE(VerboseDebug, { 7928 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n"; 7929 }); 7930 } 7931 7932 bool LoopVectorizationPlanner::getDecisionAndClampRange( 7933 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 7934 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 7935 bool PredicateAtRangeStart = Predicate(Range.Start); 7936 7937 for (ElementCount TmpVF = Range.Start * 2; 7938 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 7939 if (Predicate(TmpVF) != PredicateAtRangeStart) { 7940 Range.End = TmpVF; 7941 break; 7942 } 7943 7944 return PredicateAtRangeStart; 7945 } 7946 7947 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 7948 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 7949 /// of VF's starting at a given VF and extending it as much as possible. Each 7950 /// vectorization decision can potentially shorten this sub-range during 7951 /// buildVPlan(). 7952 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 7953 ElementCount MaxVF) { 7954 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 7955 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 7956 VFRange SubRange = {VF, MaxVFPlusOne}; 7957 VPlans.push_back(buildVPlan(SubRange)); 7958 VF = SubRange.End; 7959 } 7960 } 7961 7962 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 7963 VPlanPtr &Plan) { 7964 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 7965 7966 // Look for cached value. 7967 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 7968 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 7969 if (ECEntryIt != EdgeMaskCache.end()) 7970 return ECEntryIt->second; 7971 7972 VPValue *SrcMask = createBlockInMask(Src, Plan); 7973 7974 // The terminator has to be a branch inst! 7975 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 7976 assert(BI && "Unexpected terminator found"); 7977 7978 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 7979 return EdgeMaskCache[Edge] = SrcMask; 7980 7981 // If source is an exiting block, we know the exit edge is dynamically dead 7982 // in the vector loop, and thus we don't need to restrict the mask. Avoid 7983 // adding uses of an otherwise potentially dead instruction. 7984 if (OrigLoop->isLoopExiting(Src)) 7985 return EdgeMaskCache[Edge] = SrcMask; 7986 7987 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 7988 assert(EdgeMask && "No Edge Mask found for condition"); 7989 7990 if (BI->getSuccessor(0) != Dst) 7991 EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc()); 7992 7993 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 7994 // The condition is 'SrcMask && EdgeMask', which is equivalent to 7995 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 7996 // The select version does not introduce new UB if SrcMask is false and 7997 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 7998 VPValue *False = Plan->getOrAddVPValue( 7999 ConstantInt::getFalse(BI->getCondition()->getType())); 8000 EdgeMask = 8001 Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc()); 8002 } 8003 8004 return EdgeMaskCache[Edge] = EdgeMask; 8005 } 8006 8007 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8008 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8009 8010 // Look for cached value. 8011 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8012 if (BCEntryIt != BlockMaskCache.end()) 8013 return BCEntryIt->second; 8014 8015 // All-one mask is modelled as no-mask following the convention for masked 8016 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8017 VPValue *BlockMask = nullptr; 8018 8019 if (OrigLoop->getHeader() == BB) { 8020 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8021 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8022 8023 assert(CM.foldTailByMasking() && "must fold the tail"); 8024 8025 // If we're using the active lane mask for control flow, then we get the 8026 // mask from the active lane mask PHI that is cached in the VPlan. 8027 PredicationStyle EmitGetActiveLaneMask = CM.TTI.emitGetActiveLaneMask(); 8028 if (EmitGetActiveLaneMask == PredicationStyle::DataAndControlFlow) 8029 return BlockMaskCache[BB] = Plan->getActiveLaneMaskPhi(); 8030 8031 // Introduce the early-exit compare IV <= BTC to form header block mask. 8032 // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by 8033 // constructing the desired canonical IV in the header block as its first 8034 // non-phi instructions. 8035 8036 VPBasicBlock *HeaderVPBB = 8037 Plan->getVectorLoopRegion()->getEntryBasicBlock(); 8038 auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi(); 8039 auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV()); 8040 HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi()); 8041 8042 VPBuilder::InsertPointGuard Guard(Builder); 8043 Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint); 8044 if (EmitGetActiveLaneMask != PredicationStyle::None) { 8045 VPValue *TC = Plan->getOrCreateTripCount(); 8046 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC}, 8047 nullptr, "active.lane.mask"); 8048 } else { 8049 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8050 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8051 } 8052 return BlockMaskCache[BB] = BlockMask; 8053 } 8054 8055 // This is the block mask. We OR all incoming edges. 8056 for (auto *Predecessor : predecessors(BB)) { 8057 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8058 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8059 return BlockMaskCache[BB] = EdgeMask; 8060 8061 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8062 BlockMask = EdgeMask; 8063 continue; 8064 } 8065 8066 BlockMask = Builder.createOr(BlockMask, EdgeMask, {}); 8067 } 8068 8069 return BlockMaskCache[BB] = BlockMask; 8070 } 8071 8072 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8073 ArrayRef<VPValue *> Operands, 8074 VFRange &Range, 8075 VPlanPtr &Plan) { 8076 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8077 "Must be called with either a load or store"); 8078 8079 auto willWiden = [&](ElementCount VF) -> bool { 8080 LoopVectorizationCostModel::InstWidening Decision = 8081 CM.getWideningDecision(I, VF); 8082 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8083 "CM decision should be taken at this point."); 8084 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8085 return true; 8086 if (CM.isScalarAfterVectorization(I, VF) || 8087 CM.isProfitableToScalarize(I, VF)) 8088 return false; 8089 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8090 }; 8091 8092 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8093 return nullptr; 8094 8095 VPValue *Mask = nullptr; 8096 if (Legal->isMaskRequired(I)) 8097 Mask = createBlockInMask(I->getParent(), Plan); 8098 8099 // Determine if the pointer operand of the access is either consecutive or 8100 // reverse consecutive. 8101 LoopVectorizationCostModel::InstWidening Decision = 8102 CM.getWideningDecision(I, Range.Start); 8103 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8104 bool Consecutive = 8105 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8106 8107 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8108 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8109 Consecutive, Reverse); 8110 8111 StoreInst *Store = cast<StoreInst>(I); 8112 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8113 Mask, Consecutive, Reverse); 8114 } 8115 8116 /// Creates a VPWidenIntOrFpInductionRecpipe for \p Phi. If needed, it will also 8117 /// insert a recipe to expand the step for the induction recipe. 8118 static VPWidenIntOrFpInductionRecipe *createWidenInductionRecipes( 8119 PHINode *Phi, Instruction *PhiOrTrunc, VPValue *Start, 8120 const InductionDescriptor &IndDesc, LoopVectorizationCostModel &CM, 8121 VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop, VFRange &Range) { 8122 // Returns true if an instruction \p I should be scalarized instead of 8123 // vectorized for the chosen vectorization factor. 8124 auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) { 8125 return CM.isScalarAfterVectorization(I, VF) || 8126 CM.isProfitableToScalarize(I, VF); 8127 }; 8128 8129 bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange( 8130 [&](ElementCount VF) { 8131 return ShouldScalarizeInstruction(PhiOrTrunc, VF); 8132 }, 8133 Range); 8134 assert(IndDesc.getStartValue() == 8135 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader())); 8136 assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) && 8137 "step must be loop invariant"); 8138 8139 VPValue *Step = 8140 vputils::getOrCreateVPValueForSCEVExpr(Plan, IndDesc.getStep(), SE); 8141 if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) { 8142 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, IndDesc, TruncI, 8143 !NeedsScalarIVOnly); 8144 } 8145 assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here"); 8146 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, IndDesc, 8147 !NeedsScalarIVOnly); 8148 } 8149 8150 VPRecipeBase *VPRecipeBuilder::tryToOptimizeInductionPHI( 8151 PHINode *Phi, ArrayRef<VPValue *> Operands, VPlan &Plan, VFRange &Range) { 8152 8153 // Check if this is an integer or fp induction. If so, build the recipe that 8154 // produces its scalar and vector values. 8155 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) 8156 return createWidenInductionRecipes(Phi, Phi, Operands[0], *II, CM, Plan, 8157 *PSE.getSE(), *OrigLoop, Range); 8158 8159 // Check if this is pointer induction. If so, build the recipe for it. 8160 if (auto *II = Legal->getPointerInductionDescriptor(Phi)) 8161 return new VPWidenPointerInductionRecipe(Phi, Operands[0], *II, 8162 *PSE.getSE()); 8163 return nullptr; 8164 } 8165 8166 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8167 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, VPlan &Plan) { 8168 // Optimize the special case where the source is a constant integer 8169 // induction variable. Notice that we can only optimize the 'trunc' case 8170 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8171 // (c) other casts depend on pointer size. 8172 8173 // Determine whether \p K is a truncation based on an induction variable that 8174 // can be optimized. 8175 auto isOptimizableIVTruncate = 8176 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8177 return [=](ElementCount VF) -> bool { 8178 return CM.isOptimizableIVTruncate(K, VF); 8179 }; 8180 }; 8181 8182 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8183 isOptimizableIVTruncate(I), Range)) { 8184 8185 auto *Phi = cast<PHINode>(I->getOperand(0)); 8186 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi); 8187 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8188 return createWidenInductionRecipes(Phi, I, Start, II, CM, Plan, 8189 *PSE.getSE(), *OrigLoop, Range); 8190 } 8191 return nullptr; 8192 } 8193 8194 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8195 ArrayRef<VPValue *> Operands, 8196 VPlanPtr &Plan) { 8197 // If all incoming values are equal, the incoming VPValue can be used directly 8198 // instead of creating a new VPBlendRecipe. 8199 VPValue *FirstIncoming = Operands[0]; 8200 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8201 return FirstIncoming == Inc; 8202 })) { 8203 return Operands[0]; 8204 } 8205 8206 unsigned NumIncoming = Phi->getNumIncomingValues(); 8207 // For in-loop reductions, we do not need to create an additional select. 8208 VPValue *InLoopVal = nullptr; 8209 for (unsigned In = 0; In < NumIncoming; In++) { 8210 PHINode *PhiOp = 8211 dyn_cast_or_null<PHINode>(Operands[In]->getUnderlyingValue()); 8212 if (PhiOp && CM.isInLoopReduction(PhiOp)) { 8213 assert(!InLoopVal && "Found more than one in-loop reduction!"); 8214 InLoopVal = Operands[In]; 8215 } 8216 } 8217 8218 assert((!InLoopVal || NumIncoming == 2) && 8219 "Found an in-loop reduction for PHI with unexpected number of " 8220 "incoming values"); 8221 if (InLoopVal) 8222 return Operands[Operands[0] == InLoopVal ? 1 : 0]; 8223 8224 // We know that all PHIs in non-header blocks are converted into selects, so 8225 // we don't have to worry about the insertion order and we can just use the 8226 // builder. At this point we generate the predication tree. There may be 8227 // duplications since this is a simple recursive scan, but future 8228 // optimizations will clean it up. 8229 SmallVector<VPValue *, 2> OperandsWithMask; 8230 8231 for (unsigned In = 0; In < NumIncoming; In++) { 8232 VPValue *EdgeMask = 8233 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8234 assert((EdgeMask || NumIncoming == 1) && 8235 "Multiple predecessors with one having a full mask"); 8236 OperandsWithMask.push_back(Operands[In]); 8237 if (EdgeMask) 8238 OperandsWithMask.push_back(EdgeMask); 8239 } 8240 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8241 } 8242 8243 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8244 ArrayRef<VPValue *> Operands, 8245 VFRange &Range) const { 8246 8247 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8248 [this, CI](ElementCount VF) { 8249 return CM.isScalarWithPredication(CI, VF); 8250 }, 8251 Range); 8252 8253 if (IsPredicated) 8254 return nullptr; 8255 8256 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8257 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8258 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8259 ID == Intrinsic::pseudoprobe || 8260 ID == Intrinsic::experimental_noalias_scope_decl)) 8261 return nullptr; 8262 8263 auto willWiden = [&](ElementCount VF) -> bool { 8264 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8265 // The following case may be scalarized depending on the VF. 8266 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8267 // version of the instruction. 8268 // Is it beneficial to perform intrinsic call compared to lib call? 8269 bool NeedToScalarize = false; 8270 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8271 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8272 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8273 return UseVectorIntrinsic || !NeedToScalarize; 8274 }; 8275 8276 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8277 return nullptr; 8278 8279 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8280 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8281 } 8282 8283 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8284 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8285 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8286 // Instruction should be widened, unless it is scalar after vectorization, 8287 // scalarization is profitable or it is predicated. 8288 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8289 return CM.isScalarAfterVectorization(I, VF) || 8290 CM.isProfitableToScalarize(I, VF) || 8291 CM.isScalarWithPredication(I, VF); 8292 }; 8293 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8294 Range); 8295 } 8296 8297 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8298 ArrayRef<VPValue *> Operands) const { 8299 auto IsVectorizableOpcode = [](unsigned Opcode) { 8300 switch (Opcode) { 8301 case Instruction::Add: 8302 case Instruction::And: 8303 case Instruction::AShr: 8304 case Instruction::BitCast: 8305 case Instruction::FAdd: 8306 case Instruction::FCmp: 8307 case Instruction::FDiv: 8308 case Instruction::FMul: 8309 case Instruction::FNeg: 8310 case Instruction::FPExt: 8311 case Instruction::FPToSI: 8312 case Instruction::FPToUI: 8313 case Instruction::FPTrunc: 8314 case Instruction::FRem: 8315 case Instruction::FSub: 8316 case Instruction::ICmp: 8317 case Instruction::IntToPtr: 8318 case Instruction::LShr: 8319 case Instruction::Mul: 8320 case Instruction::Or: 8321 case Instruction::PtrToInt: 8322 case Instruction::SDiv: 8323 case Instruction::Select: 8324 case Instruction::SExt: 8325 case Instruction::Shl: 8326 case Instruction::SIToFP: 8327 case Instruction::SRem: 8328 case Instruction::Sub: 8329 case Instruction::Trunc: 8330 case Instruction::UDiv: 8331 case Instruction::UIToFP: 8332 case Instruction::URem: 8333 case Instruction::Xor: 8334 case Instruction::ZExt: 8335 case Instruction::Freeze: 8336 return true; 8337 } 8338 return false; 8339 }; 8340 8341 if (!IsVectorizableOpcode(I->getOpcode())) 8342 return nullptr; 8343 8344 // Success: widen this instruction. 8345 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8346 } 8347 8348 void VPRecipeBuilder::fixHeaderPhis() { 8349 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8350 for (VPHeaderPHIRecipe *R : PhisToFix) { 8351 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8352 VPRecipeBase *IncR = 8353 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8354 R->addOperand(IncR->getVPSingleValue()); 8355 } 8356 } 8357 8358 VPBasicBlock *VPRecipeBuilder::handleReplication( 8359 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8360 VPlanPtr &Plan) { 8361 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8362 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8363 Range); 8364 8365 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8366 [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); }, 8367 Range); 8368 8369 // Even if the instruction is not marked as uniform, there are certain 8370 // intrinsic calls that can be effectively treated as such, so we check for 8371 // them here. Conservatively, we only do this for scalable vectors, since 8372 // for fixed-width VFs we can always fall back on full scalarization. 8373 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 8374 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 8375 case Intrinsic::assume: 8376 case Intrinsic::lifetime_start: 8377 case Intrinsic::lifetime_end: 8378 // For scalable vectors if one of the operands is variant then we still 8379 // want to mark as uniform, which will generate one instruction for just 8380 // the first lane of the vector. We can't scalarize the call in the same 8381 // way as for fixed-width vectors because we don't know how many lanes 8382 // there are. 8383 // 8384 // The reasons for doing it this way for scalable vectors are: 8385 // 1. For the assume intrinsic generating the instruction for the first 8386 // lane is still be better than not generating any at all. For 8387 // example, the input may be a splat across all lanes. 8388 // 2. For the lifetime start/end intrinsics the pointer operand only 8389 // does anything useful when the input comes from a stack object, 8390 // which suggests it should always be uniform. For non-stack objects 8391 // the effect is to poison the object, which still allows us to 8392 // remove the call. 8393 IsUniform = true; 8394 break; 8395 default: 8396 break; 8397 } 8398 } 8399 8400 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8401 IsUniform, IsPredicated); 8402 8403 // Find if I uses a predicated instruction. If so, it will use its scalar 8404 // value. Avoid hoisting the insert-element which packs the scalar value into 8405 // a vector value, as that happens iff all users use the vector value. 8406 for (VPValue *Op : Recipe->operands()) { 8407 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8408 if (!PredR) 8409 continue; 8410 auto *RepR = 8411 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8412 assert(RepR->isPredicated() && 8413 "expected Replicate recipe to be predicated"); 8414 RepR->setAlsoPack(false); 8415 } 8416 8417 // Finalize the recipe for Instr, first if it is not predicated. 8418 if (!IsPredicated) { 8419 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8420 setRecipe(I, Recipe); 8421 Plan->addVPValue(I, Recipe); 8422 VPBB->appendRecipe(Recipe); 8423 return VPBB; 8424 } 8425 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8426 8427 VPBlockBase *SingleSucc = VPBB->getSingleSuccessor(); 8428 assert(SingleSucc && "VPBB must have a single successor when handling " 8429 "predicated replication."); 8430 VPBlockUtils::disconnectBlocks(VPBB, SingleSucc); 8431 // Record predicated instructions for above packing optimizations. 8432 VPBlockBase *Region = createReplicateRegion(Recipe, Plan); 8433 VPBlockUtils::insertBlockAfter(Region, VPBB); 8434 auto *RegSucc = new VPBasicBlock(); 8435 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8436 VPBlockUtils::connectBlocks(RegSucc, SingleSucc); 8437 return RegSucc; 8438 } 8439 8440 VPRegionBlock * 8441 VPRecipeBuilder::createReplicateRegion(VPReplicateRecipe *PredRecipe, 8442 VPlanPtr &Plan) { 8443 Instruction *Instr = PredRecipe->getUnderlyingInstr(); 8444 // Instructions marked for predication are replicated and placed under an 8445 // if-then construct to prevent side-effects. 8446 // Generate recipes to compute the block mask for this region. 8447 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8448 8449 // Build the triangular if-then region. 8450 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8451 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8452 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8453 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8454 auto *PHIRecipe = Instr->getType()->isVoidTy() 8455 ? nullptr 8456 : new VPPredInstPHIRecipe(PredRecipe); 8457 if (PHIRecipe) { 8458 setRecipe(Instr, PHIRecipe); 8459 Plan->addVPValue(Instr, PHIRecipe); 8460 } else { 8461 setRecipe(Instr, PredRecipe); 8462 Plan->addVPValue(Instr, PredRecipe); 8463 } 8464 8465 auto *Exiting = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8466 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8467 VPRegionBlock *Region = new VPRegionBlock(Entry, Exiting, RegionName, true); 8468 8469 // Note: first set Entry as region entry and then connect successors starting 8470 // from it in order, to propagate the "parent" of each VPBasicBlock. 8471 VPBlockUtils::insertTwoBlocksAfter(Pred, Exiting, Entry); 8472 VPBlockUtils::connectBlocks(Pred, Exiting); 8473 8474 return Region; 8475 } 8476 8477 VPRecipeOrVPValueTy 8478 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8479 ArrayRef<VPValue *> Operands, 8480 VFRange &Range, VPlanPtr &Plan) { 8481 // First, check for specific widening recipes that deal with inductions, Phi 8482 // nodes, calls and memory operations. 8483 VPRecipeBase *Recipe; 8484 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8485 if (Phi->getParent() != OrigLoop->getHeader()) 8486 return tryToBlend(Phi, Operands, Plan); 8487 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, *Plan, Range))) 8488 return toVPRecipeResult(Recipe); 8489 8490 VPHeaderPHIRecipe *PhiRecipe = nullptr; 8491 assert((Legal->isReductionVariable(Phi) || 8492 Legal->isFirstOrderRecurrence(Phi)) && 8493 "can only widen reductions and first-order recurrences here"); 8494 VPValue *StartV = Operands[0]; 8495 if (Legal->isReductionVariable(Phi)) { 8496 const RecurrenceDescriptor &RdxDesc = 8497 Legal->getReductionVars().find(Phi)->second; 8498 assert(RdxDesc.getRecurrenceStartValue() == 8499 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8500 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 8501 CM.isInLoopReduction(Phi), 8502 CM.useOrderedReductions(RdxDesc)); 8503 } else { 8504 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 8505 } 8506 8507 // Record the incoming value from the backedge, so we can add the incoming 8508 // value from the backedge after all recipes have been created. 8509 recordRecipeOf(cast<Instruction>( 8510 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8511 PhisToFix.push_back(PhiRecipe); 8512 return toVPRecipeResult(PhiRecipe); 8513 } 8514 8515 if (isa<TruncInst>(Instr) && 8516 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8517 Range, *Plan))) 8518 return toVPRecipeResult(Recipe); 8519 8520 // All widen recipes below deal only with VF > 1. 8521 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8522 [&](ElementCount VF) { return VF.isScalar(); }, Range)) 8523 return nullptr; 8524 8525 if (auto *CI = dyn_cast<CallInst>(Instr)) 8526 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8527 8528 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8529 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8530 8531 if (!shouldWiden(Instr, Range)) 8532 return nullptr; 8533 8534 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8535 return toVPRecipeResult(new VPWidenGEPRecipe( 8536 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8537 8538 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8539 bool InvariantCond = 8540 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8541 return toVPRecipeResult(new VPWidenSelectRecipe( 8542 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8543 } 8544 8545 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8546 } 8547 8548 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8549 ElementCount MaxVF) { 8550 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8551 8552 // Add assume instructions we need to drop to DeadInstructions, to prevent 8553 // them from being added to the VPlan. 8554 // TODO: We only need to drop assumes in blocks that get flattend. If the 8555 // control flow is preserved, we should keep them. 8556 SmallPtrSet<Instruction *, 4> DeadInstructions; 8557 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8558 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8559 8560 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8561 // Dead instructions do not need sinking. Remove them from SinkAfter. 8562 for (Instruction *I : DeadInstructions) 8563 SinkAfter.erase(I); 8564 8565 // Cannot sink instructions after dead instructions (there won't be any 8566 // recipes for them). Instead, find the first non-dead previous instruction. 8567 for (auto &P : Legal->getSinkAfter()) { 8568 Instruction *SinkTarget = P.second; 8569 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 8570 (void)FirstInst; 8571 while (DeadInstructions.contains(SinkTarget)) { 8572 assert( 8573 SinkTarget != FirstInst && 8574 "Must find a live instruction (at least the one feeding the " 8575 "first-order recurrence PHI) before reaching beginning of the block"); 8576 SinkTarget = SinkTarget->getPrevNode(); 8577 assert(SinkTarget != P.first && 8578 "sink source equals target, no sinking required"); 8579 } 8580 P.second = SinkTarget; 8581 } 8582 8583 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8584 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8585 VFRange SubRange = {VF, MaxVFPlusOne}; 8586 VPlans.push_back( 8587 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8588 VF = SubRange.End; 8589 } 8590 } 8591 8592 // Add the necessary canonical IV and branch recipes required to control the 8593 // loop. 8594 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL, 8595 bool HasNUW, 8596 bool UseLaneMaskForLoopControlFlow) { 8597 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8598 auto *StartV = Plan.getOrAddVPValue(StartIdx); 8599 8600 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header. 8601 auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL); 8602 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion(); 8603 VPBasicBlock *Header = TopRegion->getEntryBasicBlock(); 8604 Header->insert(CanonicalIVPHI, Header->begin()); 8605 8606 // Add a CanonicalIVIncrement{NUW} VPInstruction to increment the scalar 8607 // IV by VF * UF. 8608 auto *CanonicalIVIncrement = 8609 new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW 8610 : VPInstruction::CanonicalIVIncrement, 8611 {CanonicalIVPHI}, DL, "index.next"); 8612 CanonicalIVPHI->addOperand(CanonicalIVIncrement); 8613 8614 VPBasicBlock *EB = TopRegion->getExitingBasicBlock(); 8615 EB->appendRecipe(CanonicalIVIncrement); 8616 8617 if (UseLaneMaskForLoopControlFlow) { 8618 // Create the active lane mask instruction in the vplan preheader. 8619 VPBasicBlock *Preheader = Plan.getEntry()->getEntryBasicBlock(); 8620 8621 // We can't use StartV directly in the ActiveLaneMask VPInstruction, since 8622 // we have to take unrolling into account. Each part needs to start at 8623 // Part * VF 8624 auto *CanonicalIVIncrementParts = 8625 new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementForPartNUW 8626 : VPInstruction::CanonicalIVIncrementForPart, 8627 {StartV}, DL, "index.part.next"); 8628 Preheader->appendRecipe(CanonicalIVIncrementParts); 8629 8630 // Create the ActiveLaneMask instruction using the correct start values. 8631 VPValue *TC = Plan.getOrCreateTripCount(); 8632 auto *EntryALM = new VPInstruction(VPInstruction::ActiveLaneMask, 8633 {CanonicalIVIncrementParts, TC}, DL, 8634 "active.lane.mask.entry"); 8635 Preheader->appendRecipe(EntryALM); 8636 8637 // Now create the ActiveLaneMaskPhi recipe in the main loop using the 8638 // preheader ActiveLaneMask instruction. 8639 auto *LaneMaskPhi = new VPActiveLaneMaskPHIRecipe(EntryALM, DebugLoc()); 8640 Header->insert(LaneMaskPhi, Header->getFirstNonPhi()); 8641 8642 // Create the active lane mask for the next iteration of the loop. 8643 CanonicalIVIncrementParts = 8644 new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementForPartNUW 8645 : VPInstruction::CanonicalIVIncrementForPart, 8646 {CanonicalIVIncrement}, DL); 8647 EB->appendRecipe(CanonicalIVIncrementParts); 8648 8649 auto *ALM = new VPInstruction(VPInstruction::ActiveLaneMask, 8650 {CanonicalIVIncrementParts, TC}, DL, 8651 "active.lane.mask.next"); 8652 EB->appendRecipe(ALM); 8653 LaneMaskPhi->addOperand(ALM); 8654 8655 // We have to invert the mask here because a true condition means jumping 8656 // to the exit block. 8657 auto *NotMask = new VPInstruction(VPInstruction::Not, ALM, DL); 8658 EB->appendRecipe(NotMask); 8659 8660 VPInstruction *BranchBack = 8661 new VPInstruction(VPInstruction::BranchOnCond, {NotMask}, DL); 8662 EB->appendRecipe(BranchBack); 8663 } else { 8664 // Add the BranchOnCount VPInstruction to the latch. 8665 VPInstruction *BranchBack = new VPInstruction( 8666 VPInstruction::BranchOnCount, 8667 {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL); 8668 EB->appendRecipe(BranchBack); 8669 } 8670 } 8671 8672 // Add exit values to \p Plan. VPLiveOuts are added for each LCSSA phi in the 8673 // original exit block. 8674 static void addUsersInExitBlock(VPBasicBlock *HeaderVPBB, 8675 VPBasicBlock *MiddleVPBB, Loop *OrigLoop, 8676 VPlan &Plan) { 8677 BasicBlock *ExitBB = OrigLoop->getUniqueExitBlock(); 8678 BasicBlock *ExitingBB = OrigLoop->getExitingBlock(); 8679 // Only handle single-exit loops with unique exit blocks for now. 8680 if (!ExitBB || !ExitBB->getSinglePredecessor() || !ExitingBB) 8681 return; 8682 8683 // Introduce VPUsers modeling the exit values. 8684 for (PHINode &ExitPhi : ExitBB->phis()) { 8685 Value *IncomingValue = 8686 ExitPhi.getIncomingValueForBlock(ExitingBB); 8687 VPValue *V = Plan.getOrAddVPValue(IncomingValue, true); 8688 Plan.addLiveOut(&ExitPhi, V); 8689 } 8690 } 8691 8692 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8693 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8694 const MapVector<Instruction *, Instruction *> &SinkAfter) { 8695 8696 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8697 8698 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8699 8700 // --------------------------------------------------------------------------- 8701 // Pre-construction: record ingredients whose recipes we'll need to further 8702 // process after constructing the initial VPlan. 8703 // --------------------------------------------------------------------------- 8704 8705 // Mark instructions we'll need to sink later and their targets as 8706 // ingredients whose recipe we'll need to record. 8707 for (auto &Entry : SinkAfter) { 8708 RecipeBuilder.recordRecipeOf(Entry.first); 8709 RecipeBuilder.recordRecipeOf(Entry.second); 8710 } 8711 for (auto &Reduction : CM.getInLoopReductionChains()) { 8712 PHINode *Phi = Reduction.first; 8713 RecurKind Kind = 8714 Legal->getReductionVars().find(Phi)->second.getRecurrenceKind(); 8715 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8716 8717 RecipeBuilder.recordRecipeOf(Phi); 8718 for (auto &R : ReductionOperations) { 8719 RecipeBuilder.recordRecipeOf(R); 8720 // For min/max reductions, where we have a pair of icmp/select, we also 8721 // need to record the ICmp recipe, so it can be removed later. 8722 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 8723 "Only min/max recurrences allowed for inloop reductions"); 8724 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8725 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8726 } 8727 } 8728 8729 // For each interleave group which is relevant for this (possibly trimmed) 8730 // Range, add it to the set of groups to be later applied to the VPlan and add 8731 // placeholders for its members' Recipes which we'll be replacing with a 8732 // single VPInterleaveRecipe. 8733 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8734 auto applyIG = [IG, this](ElementCount VF) -> bool { 8735 return (VF.isVector() && // Query is illegal for VF == 1 8736 CM.getWideningDecision(IG->getInsertPos(), VF) == 8737 LoopVectorizationCostModel::CM_Interleave); 8738 }; 8739 if (!getDecisionAndClampRange(applyIG, Range)) 8740 continue; 8741 InterleaveGroups.insert(IG); 8742 for (unsigned i = 0; i < IG->getFactor(); i++) 8743 if (Instruction *Member = IG->getMember(i)) 8744 RecipeBuilder.recordRecipeOf(Member); 8745 }; 8746 8747 // --------------------------------------------------------------------------- 8748 // Build initial VPlan: Scan the body of the loop in a topological order to 8749 // visit each basic block after having visited its predecessor basic blocks. 8750 // --------------------------------------------------------------------------- 8751 8752 // Create initial VPlan skeleton, starting with a block for the pre-header, 8753 // followed by a region for the vector loop, followed by the middle block. The 8754 // skeleton vector loop region contains a header and latch block. 8755 VPBasicBlock *Preheader = new VPBasicBlock("vector.ph"); 8756 auto Plan = std::make_unique<VPlan>(Preheader); 8757 8758 VPBasicBlock *HeaderVPBB = new VPBasicBlock("vector.body"); 8759 VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch"); 8760 VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB); 8761 auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop"); 8762 VPBlockUtils::insertBlockAfter(TopRegion, Preheader); 8763 VPBasicBlock *MiddleVPBB = new VPBasicBlock("middle.block"); 8764 VPBlockUtils::insertBlockAfter(MiddleVPBB, TopRegion); 8765 8766 Instruction *DLInst = 8767 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 8768 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), 8769 DLInst ? DLInst->getDebugLoc() : DebugLoc(), 8770 !CM.foldTailByMasking(), 8771 CM.useActiveLaneMaskForControlFlow()); 8772 8773 // Scan the body of the loop in a topological order to visit each basic block 8774 // after having visited its predecessor basic blocks. 8775 LoopBlocksDFS DFS(OrigLoop); 8776 DFS.perform(LI); 8777 8778 VPBasicBlock *VPBB = HeaderVPBB; 8779 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 8780 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8781 // Relevant instructions from basic block BB will be grouped into VPRecipe 8782 // ingredients and fill a new VPBasicBlock. 8783 unsigned VPBBsForBB = 0; 8784 if (VPBB != HeaderVPBB) 8785 VPBB->setName(BB->getName()); 8786 Builder.setInsertPoint(VPBB); 8787 8788 // Introduce each ingredient into VPlan. 8789 // TODO: Model and preserve debug intrinsics in VPlan. 8790 for (Instruction &I : BB->instructionsWithoutDebug()) { 8791 Instruction *Instr = &I; 8792 8793 // First filter out irrelevant instructions, to ensure no recipes are 8794 // built for them. 8795 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 8796 continue; 8797 8798 SmallVector<VPValue *, 4> Operands; 8799 auto *Phi = dyn_cast<PHINode>(Instr); 8800 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 8801 Operands.push_back(Plan->getOrAddVPValue( 8802 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 8803 } else { 8804 auto OpRange = Plan->mapToVPValues(Instr->operands()); 8805 Operands = {OpRange.begin(), OpRange.end()}; 8806 } 8807 8808 // Invariant stores inside loop will be deleted and a single store 8809 // with the final reduction value will be added to the exit block 8810 StoreInst *SI; 8811 if ((SI = dyn_cast<StoreInst>(&I)) && 8812 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) 8813 continue; 8814 8815 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 8816 Instr, Operands, Range, Plan)) { 8817 // If Instr can be simplified to an existing VPValue, use it. 8818 if (RecipeOrValue.is<VPValue *>()) { 8819 auto *VPV = RecipeOrValue.get<VPValue *>(); 8820 Plan->addVPValue(Instr, VPV); 8821 // If the re-used value is a recipe, register the recipe for the 8822 // instruction, in case the recipe for Instr needs to be recorded. 8823 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 8824 RecipeBuilder.setRecipe(Instr, R); 8825 continue; 8826 } 8827 // Otherwise, add the new recipe. 8828 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 8829 for (auto *Def : Recipe->definedValues()) { 8830 auto *UV = Def->getUnderlyingValue(); 8831 Plan->addVPValue(UV, Def); 8832 } 8833 8834 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 8835 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 8836 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 8837 // of the header block. That can happen for truncates of induction 8838 // variables. Those recipes are moved to the phi section of the header 8839 // block after applying SinkAfter, which relies on the original 8840 // position of the trunc. 8841 assert(isa<TruncInst>(Instr)); 8842 InductionsToMove.push_back( 8843 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 8844 } 8845 RecipeBuilder.setRecipe(Instr, Recipe); 8846 VPBB->appendRecipe(Recipe); 8847 continue; 8848 } 8849 8850 // Otherwise, if all widening options failed, Instruction is to be 8851 // replicated. This may create a successor for VPBB. 8852 VPBasicBlock *NextVPBB = 8853 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 8854 if (NextVPBB != VPBB) { 8855 VPBB = NextVPBB; 8856 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8857 : ""); 8858 } 8859 } 8860 8861 VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB); 8862 VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor()); 8863 } 8864 8865 HeaderVPBB->setName("vector.body"); 8866 8867 // Fold the last, empty block into its predecessor. 8868 VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB); 8869 assert(VPBB && "expected to fold last (empty) block"); 8870 // After here, VPBB should not be used. 8871 VPBB = nullptr; 8872 8873 addUsersInExitBlock(HeaderVPBB, MiddleVPBB, OrigLoop, *Plan); 8874 8875 assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) && 8876 !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() && 8877 "entry block must be set to a VPRegionBlock having a non-empty entry " 8878 "VPBasicBlock"); 8879 RecipeBuilder.fixHeaderPhis(); 8880 8881 // --------------------------------------------------------------------------- 8882 // Transform initial VPlan: Apply previously taken decisions, in order, to 8883 // bring the VPlan to its final state. 8884 // --------------------------------------------------------------------------- 8885 8886 // Apply Sink-After legal constraints. 8887 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 8888 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 8889 if (Region && Region->isReplicator()) { 8890 assert(Region->getNumSuccessors() == 1 && 8891 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 8892 assert(R->getParent()->size() == 1 && 8893 "A recipe in an original replicator region must be the only " 8894 "recipe in its block"); 8895 return Region; 8896 } 8897 return nullptr; 8898 }; 8899 for (auto &Entry : SinkAfter) { 8900 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 8901 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 8902 8903 auto *TargetRegion = GetReplicateRegion(Target); 8904 auto *SinkRegion = GetReplicateRegion(Sink); 8905 if (!SinkRegion) { 8906 // If the sink source is not a replicate region, sink the recipe directly. 8907 if (TargetRegion) { 8908 // The target is in a replication region, make sure to move Sink to 8909 // the block after it, not into the replication region itself. 8910 VPBasicBlock *NextBlock = 8911 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 8912 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 8913 } else 8914 Sink->moveAfter(Target); 8915 continue; 8916 } 8917 8918 // The sink source is in a replicate region. Unhook the region from the CFG. 8919 auto *SinkPred = SinkRegion->getSinglePredecessor(); 8920 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 8921 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 8922 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 8923 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 8924 8925 if (TargetRegion) { 8926 // The target recipe is also in a replicate region, move the sink region 8927 // after the target region. 8928 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 8929 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 8930 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 8931 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 8932 } else { 8933 // The sink source is in a replicate region, we need to move the whole 8934 // replicate region, which should only contain a single recipe in the 8935 // main block. 8936 auto *SplitBlock = 8937 Target->getParent()->splitAt(std::next(Target->getIterator())); 8938 8939 auto *SplitPred = SplitBlock->getSinglePredecessor(); 8940 8941 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 8942 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 8943 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 8944 } 8945 } 8946 8947 VPlanTransforms::removeRedundantCanonicalIVs(*Plan); 8948 VPlanTransforms::removeRedundantInductionCasts(*Plan); 8949 8950 // Now that sink-after is done, move induction recipes for optimized truncates 8951 // to the phi section of the header block. 8952 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 8953 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 8954 8955 // Adjust the recipes for any inloop reductions. 8956 adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExiting()), Plan, 8957 RecipeBuilder, Range.Start); 8958 8959 // Introduce a recipe to combine the incoming and previous values of a 8960 // first-order recurrence. 8961 for (VPRecipeBase &R : 8962 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) { 8963 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 8964 if (!RecurPhi) 8965 continue; 8966 8967 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 8968 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 8969 auto *Region = GetReplicateRegion(PrevRecipe); 8970 if (Region) 8971 InsertBlock = dyn_cast<VPBasicBlock>(Region->getSingleSuccessor()); 8972 if (!InsertBlock) { 8973 InsertBlock = new VPBasicBlock(Region->getName() + ".succ"); 8974 VPBlockUtils::insertBlockAfter(InsertBlock, Region); 8975 } 8976 if (Region || PrevRecipe->isPhi()) 8977 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 8978 else 8979 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 8980 8981 auto *RecurSplice = cast<VPInstruction>( 8982 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 8983 {RecurPhi, RecurPhi->getBackedgeValue()})); 8984 8985 RecurPhi->replaceAllUsesWith(RecurSplice); 8986 // Set the first operand of RecurSplice to RecurPhi again, after replacing 8987 // all users. 8988 RecurSplice->setOperand(0, RecurPhi); 8989 } 8990 8991 // Interleave memory: for each Interleave Group we marked earlier as relevant 8992 // for this VPlan, replace the Recipes widening its memory instructions with a 8993 // single VPInterleaveRecipe at its insertion point. 8994 for (auto IG : InterleaveGroups) { 8995 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 8996 RecipeBuilder.getRecipe(IG->getInsertPos())); 8997 SmallVector<VPValue *, 4> StoredValues; 8998 for (unsigned i = 0; i < IG->getFactor(); ++i) 8999 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 9000 auto *StoreR = 9001 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 9002 StoredValues.push_back(StoreR->getStoredValue()); 9003 } 9004 9005 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9006 Recipe->getMask()); 9007 VPIG->insertBefore(Recipe); 9008 unsigned J = 0; 9009 for (unsigned i = 0; i < IG->getFactor(); ++i) 9010 if (Instruction *Member = IG->getMember(i)) { 9011 if (!Member->getType()->isVoidTy()) { 9012 VPValue *OriginalV = Plan->getVPValue(Member); 9013 Plan->removeVPValueFor(Member); 9014 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9015 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9016 J++; 9017 } 9018 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9019 } 9020 } 9021 9022 std::string PlanName; 9023 raw_string_ostream RSO(PlanName); 9024 ElementCount VF = Range.Start; 9025 Plan->addVF(VF); 9026 RSO << "Initial VPlan for VF={" << VF; 9027 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9028 Plan->addVF(VF); 9029 RSO << "," << VF; 9030 } 9031 RSO << "},UF>=1"; 9032 RSO.flush(); 9033 Plan->setName(PlanName); 9034 9035 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9036 // in ways that accessing values using original IR values is incorrect. 9037 Plan->disableValue2VPValue(); 9038 9039 VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE()); 9040 VPlanTransforms::sinkScalarOperands(*Plan); 9041 VPlanTransforms::removeDeadRecipes(*Plan); 9042 VPlanTransforms::mergeReplicateRegions(*Plan); 9043 VPlanTransforms::removeRedundantExpandSCEVRecipes(*Plan); 9044 9045 // Fold Exit block into its predecessor if possible. 9046 // TODO: Fold block earlier once all VPlan transforms properly maintain a 9047 // VPBasicBlock as exit. 9048 VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExiting()); 9049 9050 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9051 return Plan; 9052 } 9053 9054 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9055 // Outer loop handling: They may require CFG and instruction level 9056 // transformations before even evaluating whether vectorization is profitable. 9057 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9058 // the vectorization pipeline. 9059 assert(!OrigLoop->isInnermost()); 9060 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9061 9062 // Create new empty VPlan 9063 auto Plan = std::make_unique<VPlan>(); 9064 9065 // Build hierarchical CFG 9066 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9067 HCFGBuilder.buildHierarchicalCFG(); 9068 9069 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9070 VF *= 2) 9071 Plan->addVF(VF); 9072 9073 SmallPtrSet<Instruction *, 1> DeadInstructions; 9074 VPlanTransforms::VPInstructionsToVPRecipes( 9075 OrigLoop, Plan, 9076 [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); }, 9077 DeadInstructions, *PSE.getSE()); 9078 9079 // Remove the existing terminator of the exiting block of the top-most region. 9080 // A BranchOnCount will be added instead when adding the canonical IV recipes. 9081 auto *Term = 9082 Plan->getVectorLoopRegion()->getExitingBasicBlock()->getTerminator(); 9083 Term->eraseFromParent(); 9084 9085 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(), 9086 true, CM.useActiveLaneMaskForControlFlow()); 9087 return Plan; 9088 } 9089 9090 // Adjust the recipes for reductions. For in-loop reductions the chain of 9091 // instructions leading from the loop exit instr to the phi need to be converted 9092 // to reductions, with one operand being vector and the other being the scalar 9093 // reduction chain. For other reductions, a select is introduced between the phi 9094 // and live-out recipes when folding the tail. 9095 void LoopVectorizationPlanner::adjustRecipesForReductions( 9096 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9097 ElementCount MinVF) { 9098 for (auto &Reduction : CM.getInLoopReductionChains()) { 9099 PHINode *Phi = Reduction.first; 9100 const RecurrenceDescriptor &RdxDesc = 9101 Legal->getReductionVars().find(Phi)->second; 9102 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9103 9104 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9105 continue; 9106 9107 // ReductionOperations are orders top-down from the phi's use to the 9108 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9109 // which of the two operands will remain scalar and which will be reduced. 9110 // For minmax the chain will be the select instructions. 9111 Instruction *Chain = Phi; 9112 for (Instruction *R : ReductionOperations) { 9113 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9114 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9115 9116 VPValue *ChainOp = Plan->getVPValue(Chain); 9117 unsigned FirstOpId; 9118 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9119 "Only min/max recurrences allowed for inloop reductions"); 9120 // Recognize a call to the llvm.fmuladd intrinsic. 9121 bool IsFMulAdd = (Kind == RecurKind::FMulAdd); 9122 assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) && 9123 "Expected instruction to be a call to the llvm.fmuladd intrinsic"); 9124 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9125 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9126 "Expected to replace a VPWidenSelectSC"); 9127 FirstOpId = 1; 9128 } else { 9129 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) || 9130 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) && 9131 "Expected to replace a VPWidenSC"); 9132 FirstOpId = 0; 9133 } 9134 unsigned VecOpId = 9135 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9136 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9137 9138 auto *CondOp = CM.blockNeedsPredicationForAnyReason(R->getParent()) 9139 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9140 : nullptr; 9141 9142 if (IsFMulAdd) { 9143 // If the instruction is a call to the llvm.fmuladd intrinsic then we 9144 // need to create an fmul recipe to use as the vector operand for the 9145 // fadd reduction. 9146 VPInstruction *FMulRecipe = new VPInstruction( 9147 Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))}); 9148 FMulRecipe->setFastMathFlags(R->getFastMathFlags()); 9149 WidenRecipe->getParent()->insert(FMulRecipe, 9150 WidenRecipe->getIterator()); 9151 VecOp = FMulRecipe; 9152 } 9153 VPReductionRecipe *RedRecipe = 9154 new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9155 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9156 Plan->removeVPValueFor(R); 9157 Plan->addVPValue(R, RedRecipe); 9158 // Append the recipe to the end of the VPBasicBlock because we need to 9159 // ensure that it comes after all of it's inputs, including CondOp. 9160 WidenRecipe->getParent()->appendRecipe(RedRecipe); 9161 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9162 WidenRecipe->eraseFromParent(); 9163 9164 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9165 VPRecipeBase *CompareRecipe = 9166 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9167 assert(isa<VPWidenRecipe>(CompareRecipe) && 9168 "Expected to replace a VPWidenSC"); 9169 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9170 "Expected no remaining users"); 9171 CompareRecipe->eraseFromParent(); 9172 } 9173 Chain = R; 9174 } 9175 } 9176 9177 // If tail is folded by masking, introduce selects between the phi 9178 // and the live-out instruction of each reduction, at the beginning of the 9179 // dedicated latch block. 9180 if (CM.foldTailByMasking()) { 9181 Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin()); 9182 for (VPRecipeBase &R : 9183 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) { 9184 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9185 if (!PhiR || PhiR->isInLoop()) 9186 continue; 9187 VPValue *Cond = 9188 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9189 VPValue *Red = PhiR->getBackedgeValue(); 9190 assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB && 9191 "reduction recipe must be defined before latch"); 9192 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9193 } 9194 } 9195 } 9196 9197 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9198 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9199 VPSlotTracker &SlotTracker) const { 9200 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9201 IG->getInsertPos()->printAsOperand(O, false); 9202 O << ", "; 9203 getAddr()->printAsOperand(O, SlotTracker); 9204 VPValue *Mask = getMask(); 9205 if (Mask) { 9206 O << ", "; 9207 Mask->printAsOperand(O, SlotTracker); 9208 } 9209 9210 unsigned OpIdx = 0; 9211 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9212 if (!IG->getMember(i)) 9213 continue; 9214 if (getNumStoreOperands() > 0) { 9215 O << "\n" << Indent << " store "; 9216 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9217 O << " to index " << i; 9218 } else { 9219 O << "\n" << Indent << " "; 9220 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9221 O << " = load from index " << i; 9222 } 9223 ++OpIdx; 9224 } 9225 } 9226 #endif 9227 9228 void VPWidenCallRecipe::execute(VPTransformState &State) { 9229 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9230 *this, State); 9231 } 9232 9233 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9234 assert(!State.Instance && "Int or FP induction being replicated."); 9235 9236 Value *Start = getStartValue()->getLiveInIRValue(); 9237 const InductionDescriptor &ID = getInductionDescriptor(); 9238 TruncInst *Trunc = getTruncInst(); 9239 IRBuilderBase &Builder = State.Builder; 9240 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 9241 assert(State.VF.isVector() && "must have vector VF"); 9242 9243 // The value from the original loop to which we are mapping the new induction 9244 // variable. 9245 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 9246 9247 // Fast-math-flags propagate from the original induction instruction. 9248 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9249 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 9250 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 9251 9252 // Now do the actual transformations, and start with fetching the step value. 9253 Value *Step = State.get(getStepValue(), VPIteration(0, 0)); 9254 9255 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 9256 "Expected either an induction phi-node or a truncate of it!"); 9257 9258 // Construct the initial value of the vector IV in the vector loop preheader 9259 auto CurrIP = Builder.saveIP(); 9260 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); 9261 Builder.SetInsertPoint(VectorPH->getTerminator()); 9262 if (isa<TruncInst>(EntryVal)) { 9263 assert(Start->getType()->isIntegerTy() && 9264 "Truncation requires an integer type"); 9265 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 9266 Step = Builder.CreateTrunc(Step, TruncType); 9267 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 9268 } 9269 9270 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 9271 Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start); 9272 Value *SteppedStart = getStepVector( 9273 SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder); 9274 9275 // We create vector phi nodes for both integer and floating-point induction 9276 // variables. Here, we determine the kind of arithmetic we will perform. 9277 Instruction::BinaryOps AddOp; 9278 Instruction::BinaryOps MulOp; 9279 if (Step->getType()->isIntegerTy()) { 9280 AddOp = Instruction::Add; 9281 MulOp = Instruction::Mul; 9282 } else { 9283 AddOp = ID.getInductionOpcode(); 9284 MulOp = Instruction::FMul; 9285 } 9286 9287 // Multiply the vectorization factor by the step using integer or 9288 // floating-point arithmetic as appropriate. 9289 Type *StepType = Step->getType(); 9290 Value *RuntimeVF; 9291 if (Step->getType()->isFloatingPointTy()) 9292 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF); 9293 else 9294 RuntimeVF = getRuntimeVF(Builder, StepType, State.VF); 9295 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 9296 9297 // Create a vector splat to use in the induction update. 9298 // 9299 // FIXME: If the step is non-constant, we create the vector splat with 9300 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 9301 // handle a constant vector splat. 9302 Value *SplatVF = isa<Constant>(Mul) 9303 ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul)) 9304 : Builder.CreateVectorSplat(State.VF, Mul); 9305 Builder.restoreIP(CurrIP); 9306 9307 // We may need to add the step a number of times, depending on the unroll 9308 // factor. The last of those goes into the PHI. 9309 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 9310 &*State.CFG.PrevBB->getFirstInsertionPt()); 9311 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 9312 Instruction *LastInduction = VecInd; 9313 for (unsigned Part = 0; Part < State.UF; ++Part) { 9314 State.set(this, LastInduction, Part); 9315 9316 if (isa<TruncInst>(EntryVal)) 9317 State.addMetadata(LastInduction, EntryVal); 9318 9319 LastInduction = cast<Instruction>( 9320 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 9321 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 9322 } 9323 9324 LastInduction->setName("vec.ind.next"); 9325 VecInd->addIncoming(SteppedStart, VectorPH); 9326 // Add induction update using an incorrect block temporarily. The phi node 9327 // will be fixed after VPlan execution. Note that at this point the latch 9328 // block cannot be used, as it does not exist yet. 9329 // TODO: Model increment value in VPlan, by turning the recipe into a 9330 // multi-def and a subclass of VPHeaderPHIRecipe. 9331 VecInd->addIncoming(LastInduction, VectorPH); 9332 } 9333 9334 void VPWidenPointerInductionRecipe::execute(VPTransformState &State) { 9335 assert(IndDesc.getKind() == InductionDescriptor::IK_PtrInduction && 9336 "Not a pointer induction according to InductionDescriptor!"); 9337 assert(cast<PHINode>(getUnderlyingInstr())->getType()->isPointerTy() && 9338 "Unexpected type."); 9339 9340 auto *IVR = getParent()->getPlan()->getCanonicalIV(); 9341 PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0)); 9342 9343 if (onlyScalarsGenerated(State.VF)) { 9344 // This is the normalized GEP that starts counting at zero. 9345 Value *PtrInd = State.Builder.CreateSExtOrTrunc( 9346 CanonicalIV, IndDesc.getStep()->getType()); 9347 // Determine the number of scalars we need to generate for each unroll 9348 // iteration. If the instruction is uniform, we only need to generate the 9349 // first lane. Otherwise, we generate all VF values. 9350 bool IsUniform = vputils::onlyFirstLaneUsed(this); 9351 assert((IsUniform || !State.VF.isScalable()) && 9352 "Cannot scalarize a scalable VF"); 9353 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 9354 9355 for (unsigned Part = 0; Part < State.UF; ++Part) { 9356 Value *PartStart = 9357 createStepForVF(State.Builder, PtrInd->getType(), State.VF, Part); 9358 9359 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 9360 Value *Idx = State.Builder.CreateAdd( 9361 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 9362 Value *GlobalIdx = State.Builder.CreateAdd(PtrInd, Idx); 9363 9364 Value *Step = CreateStepValue(IndDesc.getStep(), SE, 9365 State.CFG.PrevBB->getTerminator()); 9366 Value *SclrGep = emitTransformedIndex( 9367 State.Builder, GlobalIdx, IndDesc.getStartValue(), Step, IndDesc); 9368 SclrGep->setName("next.gep"); 9369 State.set(this, SclrGep, VPIteration(Part, Lane)); 9370 } 9371 } 9372 return; 9373 } 9374 9375 assert(isa<SCEVConstant>(IndDesc.getStep()) && 9376 "Induction step not a SCEV constant!"); 9377 Type *PhiType = IndDesc.getStep()->getType(); 9378 9379 // Build a pointer phi 9380 Value *ScalarStartValue = getStartValue()->getLiveInIRValue(); 9381 Type *ScStValueType = ScalarStartValue->getType(); 9382 PHINode *NewPointerPhi = 9383 PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV); 9384 9385 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); 9386 NewPointerPhi->addIncoming(ScalarStartValue, VectorPH); 9387 9388 // A pointer induction, performed by using a gep 9389 const DataLayout &DL = NewPointerPhi->getModule()->getDataLayout(); 9390 Instruction *InductionLoc = &*State.Builder.GetInsertPoint(); 9391 9392 const SCEV *ScalarStep = IndDesc.getStep(); 9393 SCEVExpander Exp(SE, DL, "induction"); 9394 Value *ScalarStepValue = Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 9395 Value *RuntimeVF = getRuntimeVF(State.Builder, PhiType, State.VF); 9396 Value *NumUnrolledElems = 9397 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 9398 Value *InductionGEP = GetElementPtrInst::Create( 9399 IndDesc.getElementType(), NewPointerPhi, 9400 State.Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 9401 InductionLoc); 9402 // Add induction update using an incorrect block temporarily. The phi node 9403 // will be fixed after VPlan execution. Note that at this point the latch 9404 // block cannot be used, as it does not exist yet. 9405 // TODO: Model increment value in VPlan, by turning the recipe into a 9406 // multi-def and a subclass of VPHeaderPHIRecipe. 9407 NewPointerPhi->addIncoming(InductionGEP, VectorPH); 9408 9409 // Create UF many actual address geps that use the pointer 9410 // phi as base and a vectorized version of the step value 9411 // (<step*0, ..., step*N>) as offset. 9412 for (unsigned Part = 0; Part < State.UF; ++Part) { 9413 Type *VecPhiType = VectorType::get(PhiType, State.VF); 9414 Value *StartOffsetScalar = 9415 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 9416 Value *StartOffset = 9417 State.Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 9418 // Create a vector of consecutive numbers from zero to VF. 9419 StartOffset = State.Builder.CreateAdd( 9420 StartOffset, State.Builder.CreateStepVector(VecPhiType)); 9421 9422 Value *GEP = State.Builder.CreateGEP( 9423 IndDesc.getElementType(), NewPointerPhi, 9424 State.Builder.CreateMul( 9425 StartOffset, 9426 State.Builder.CreateVectorSplat(State.VF, ScalarStepValue), 9427 "vector.gep")); 9428 State.set(this, GEP, Part); 9429 } 9430 } 9431 9432 void VPScalarIVStepsRecipe::execute(VPTransformState &State) { 9433 assert(!State.Instance && "VPScalarIVStepsRecipe being replicated."); 9434 9435 // Fast-math-flags propagate from the original induction instruction. 9436 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder); 9437 if (IndDesc.getInductionBinOp() && 9438 isa<FPMathOperator>(IndDesc.getInductionBinOp())) 9439 State.Builder.setFastMathFlags( 9440 IndDesc.getInductionBinOp()->getFastMathFlags()); 9441 9442 Value *Step = State.get(getStepValue(), VPIteration(0, 0)); 9443 auto CreateScalarIV = [&](Value *&Step) -> Value * { 9444 Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0)); 9445 auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0); 9446 if (!isCanonical() || CanonicalIV->getType() != Ty) { 9447 ScalarIV = 9448 Ty->isIntegerTy() 9449 ? State.Builder.CreateSExtOrTrunc(ScalarIV, Ty) 9450 : State.Builder.CreateCast(Instruction::SIToFP, ScalarIV, Ty); 9451 ScalarIV = emitTransformedIndex(State.Builder, ScalarIV, 9452 getStartValue()->getLiveInIRValue(), Step, 9453 IndDesc); 9454 ScalarIV->setName("offset.idx"); 9455 } 9456 if (TruncToTy) { 9457 assert(Step->getType()->isIntegerTy() && 9458 "Truncation requires an integer step"); 9459 ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncToTy); 9460 Step = State.Builder.CreateTrunc(Step, TruncToTy); 9461 } 9462 return ScalarIV; 9463 }; 9464 9465 Value *ScalarIV = CreateScalarIV(Step); 9466 if (State.VF.isVector()) { 9467 buildScalarSteps(ScalarIV, Step, IndDesc, this, State); 9468 return; 9469 } 9470 9471 for (unsigned Part = 0; Part < State.UF; ++Part) { 9472 assert(!State.VF.isScalable() && "scalable vectors not yet supported."); 9473 Value *EntryPart; 9474 if (Step->getType()->isFloatingPointTy()) { 9475 Value *StartIdx = 9476 getRuntimeVFAsFloat(State.Builder, Step->getType(), State.VF * Part); 9477 // Floating-point operations inherit FMF via the builder's flags. 9478 Value *MulOp = State.Builder.CreateFMul(StartIdx, Step); 9479 EntryPart = State.Builder.CreateBinOp(IndDesc.getInductionOpcode(), 9480 ScalarIV, MulOp); 9481 } else { 9482 Value *StartIdx = 9483 getRuntimeVF(State.Builder, Step->getType(), State.VF * Part); 9484 EntryPart = State.Builder.CreateAdd( 9485 ScalarIV, State.Builder.CreateMul(StartIdx, Step), "induction"); 9486 } 9487 State.set(this, EntryPart, Part); 9488 } 9489 } 9490 9491 void VPInterleaveRecipe::execute(VPTransformState &State) { 9492 assert(!State.Instance && "Interleave group being replicated."); 9493 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9494 getStoredValues(), getMask()); 9495 } 9496 9497 void VPReductionRecipe::execute(VPTransformState &State) { 9498 assert(!State.Instance && "Reduction being replicated."); 9499 Value *PrevInChain = State.get(getChainOp(), 0); 9500 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9501 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9502 // Propagate the fast-math flags carried by the underlying instruction. 9503 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9504 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9505 for (unsigned Part = 0; Part < State.UF; ++Part) { 9506 Value *NewVecOp = State.get(getVecOp(), Part); 9507 if (VPValue *Cond = getCondOp()) { 9508 Value *NewCond = State.get(Cond, Part); 9509 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9510 Value *Iden = RdxDesc->getRecurrenceIdentity( 9511 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9512 Value *IdenVec = 9513 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9514 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9515 NewVecOp = Select; 9516 } 9517 Value *NewRed; 9518 Value *NextInChain; 9519 if (IsOrdered) { 9520 if (State.VF.isVector()) 9521 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9522 PrevInChain); 9523 else 9524 NewRed = State.Builder.CreateBinOp( 9525 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9526 NewVecOp); 9527 PrevInChain = NewRed; 9528 } else { 9529 PrevInChain = State.get(getChainOp(), Part); 9530 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9531 } 9532 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9533 NextInChain = 9534 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9535 NewRed, PrevInChain); 9536 } else if (IsOrdered) 9537 NextInChain = NewRed; 9538 else 9539 NextInChain = State.Builder.CreateBinOp( 9540 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9541 PrevInChain); 9542 State.set(this, NextInChain, Part); 9543 } 9544 } 9545 9546 void VPReplicateRecipe::execute(VPTransformState &State) { 9547 if (State.Instance) { // Generate a single instance. 9548 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9549 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance, 9550 IsPredicated, State); 9551 // Insert scalar instance packing it into a vector. 9552 if (AlsoPack && State.VF.isVector()) { 9553 // If we're constructing lane 0, initialize to start from poison. 9554 if (State.Instance->Lane.isFirstLane()) { 9555 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9556 Value *Poison = PoisonValue::get( 9557 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9558 State.set(this, Poison, State.Instance->Part); 9559 } 9560 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9561 } 9562 return; 9563 } 9564 9565 // Generate scalar instances for all VF lanes of all UF parts, unless the 9566 // instruction is uniform inwhich case generate only the first lane for each 9567 // of the UF parts. 9568 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9569 assert((!State.VF.isScalable() || IsUniform) && 9570 "Can't scalarize a scalable vector"); 9571 for (unsigned Part = 0; Part < State.UF; ++Part) 9572 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9573 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 9574 VPIteration(Part, Lane), IsPredicated, 9575 State); 9576 } 9577 9578 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9579 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9580 9581 // Attempt to issue a wide load. 9582 LoadInst *LI = dyn_cast<LoadInst>(&Ingredient); 9583 StoreInst *SI = dyn_cast<StoreInst>(&Ingredient); 9584 9585 assert((LI || SI) && "Invalid Load/Store instruction"); 9586 assert((!SI || StoredValue) && "No stored value provided for widened store"); 9587 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 9588 9589 Type *ScalarDataTy = getLoadStoreType(&Ingredient); 9590 9591 auto *DataTy = VectorType::get(ScalarDataTy, State.VF); 9592 const Align Alignment = getLoadStoreAlignment(&Ingredient); 9593 bool CreateGatherScatter = !Consecutive; 9594 9595 auto &Builder = State.Builder; 9596 InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF); 9597 bool isMaskRequired = getMask(); 9598 if (isMaskRequired) 9599 for (unsigned Part = 0; Part < State.UF; ++Part) 9600 BlockInMaskParts[Part] = State.get(getMask(), Part); 9601 9602 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 9603 // Calculate the pointer for the specific unroll-part. 9604 GetElementPtrInst *PartPtr = nullptr; 9605 9606 bool InBounds = false; 9607 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 9608 InBounds = gep->isInBounds(); 9609 if (Reverse) { 9610 // If the address is consecutive but reversed, then the 9611 // wide store needs to start at the last vector element. 9612 // RunTimeVF = VScale * VF.getKnownMinValue() 9613 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 9614 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF); 9615 // NumElt = -Part * RunTimeVF 9616 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 9617 // LastLane = 1 - RunTimeVF 9618 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 9619 PartPtr = 9620 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 9621 PartPtr->setIsInBounds(InBounds); 9622 PartPtr = cast<GetElementPtrInst>( 9623 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 9624 PartPtr->setIsInBounds(InBounds); 9625 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 9626 BlockInMaskParts[Part] = 9627 Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse"); 9628 } else { 9629 Value *Increment = 9630 createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part); 9631 PartPtr = cast<GetElementPtrInst>( 9632 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 9633 PartPtr->setIsInBounds(InBounds); 9634 } 9635 9636 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 9637 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 9638 }; 9639 9640 // Handle Stores: 9641 if (SI) { 9642 State.setDebugLocFromInst(SI); 9643 9644 for (unsigned Part = 0; Part < State.UF; ++Part) { 9645 Instruction *NewSI = nullptr; 9646 Value *StoredVal = State.get(StoredValue, Part); 9647 if (CreateGatherScatter) { 9648 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 9649 Value *VectorGep = State.get(getAddr(), Part); 9650 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 9651 MaskPart); 9652 } else { 9653 if (Reverse) { 9654 // If we store to reverse consecutive memory locations, then we need 9655 // to reverse the order of elements in the stored value. 9656 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse"); 9657 // We don't want to update the value in the map as it might be used in 9658 // another expression. So don't call resetVectorValue(StoredVal). 9659 } 9660 auto *VecPtr = 9661 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 9662 if (isMaskRequired) 9663 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 9664 BlockInMaskParts[Part]); 9665 else 9666 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 9667 } 9668 State.addMetadata(NewSI, SI); 9669 } 9670 return; 9671 } 9672 9673 // Handle loads. 9674 assert(LI && "Must have a load instruction"); 9675 State.setDebugLocFromInst(LI); 9676 for (unsigned Part = 0; Part < State.UF; ++Part) { 9677 Value *NewLI; 9678 if (CreateGatherScatter) { 9679 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 9680 Value *VectorGep = State.get(getAddr(), Part); 9681 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 9682 nullptr, "wide.masked.gather"); 9683 State.addMetadata(NewLI, LI); 9684 } else { 9685 auto *VecPtr = 9686 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 9687 if (isMaskRequired) 9688 NewLI = Builder.CreateMaskedLoad( 9689 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 9690 PoisonValue::get(DataTy), "wide.masked.load"); 9691 else 9692 NewLI = 9693 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 9694 9695 // Add metadata to the load, but setVectorValue to the reverse shuffle. 9696 State.addMetadata(NewLI, LI); 9697 if (Reverse) 9698 NewLI = Builder.CreateVectorReverse(NewLI, "reverse"); 9699 } 9700 9701 State.set(getVPSingleValue(), NewLI, Part); 9702 } 9703 } 9704 9705 // Determine how to lower the scalar epilogue, which depends on 1) optimising 9706 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 9707 // predication, and 4) a TTI hook that analyses whether the loop is suitable 9708 // for predication. 9709 static ScalarEpilogueLowering getScalarEpilogueLowering( 9710 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 9711 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 9712 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 9713 LoopVectorizationLegality &LVL) { 9714 // 1) OptSize takes precedence over all other options, i.e. if this is set, 9715 // don't look at hints or options, and don't request a scalar epilogue. 9716 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 9717 // LoopAccessInfo (due to code dependency and not being able to reliably get 9718 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 9719 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 9720 // versioning when the vectorization is forced, unlike hasOptSize. So revert 9721 // back to the old way and vectorize with versioning when forced. See D81345.) 9722 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 9723 PGSOQueryType::IRPass) && 9724 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 9725 return CM_ScalarEpilogueNotAllowedOptSize; 9726 9727 // 2) If set, obey the directives 9728 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 9729 switch (PreferPredicateOverEpilogue) { 9730 case PreferPredicateTy::ScalarEpilogue: 9731 return CM_ScalarEpilogueAllowed; 9732 case PreferPredicateTy::PredicateElseScalarEpilogue: 9733 return CM_ScalarEpilogueNotNeededUsePredicate; 9734 case PreferPredicateTy::PredicateOrDontVectorize: 9735 return CM_ScalarEpilogueNotAllowedUsePredicate; 9736 }; 9737 } 9738 9739 // 3) If set, obey the hints 9740 switch (Hints.getPredicate()) { 9741 case LoopVectorizeHints::FK_Enabled: 9742 return CM_ScalarEpilogueNotNeededUsePredicate; 9743 case LoopVectorizeHints::FK_Disabled: 9744 return CM_ScalarEpilogueAllowed; 9745 }; 9746 9747 // 4) if the TTI hook indicates this is profitable, request predication. 9748 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 9749 LVL.getLAI())) 9750 return CM_ScalarEpilogueNotNeededUsePredicate; 9751 9752 return CM_ScalarEpilogueAllowed; 9753 } 9754 9755 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 9756 // If Values have been set for this Def return the one relevant for \p Part. 9757 if (hasVectorValue(Def, Part)) 9758 return Data.PerPartOutput[Def][Part]; 9759 9760 if (!hasScalarValue(Def, {Part, 0})) { 9761 Value *IRV = Def->getLiveInIRValue(); 9762 Value *B = ILV->getBroadcastInstrs(IRV); 9763 set(Def, B, Part); 9764 return B; 9765 } 9766 9767 Value *ScalarValue = get(Def, {Part, 0}); 9768 // If we aren't vectorizing, we can just copy the scalar map values over 9769 // to the vector map. 9770 if (VF.isScalar()) { 9771 set(Def, ScalarValue, Part); 9772 return ScalarValue; 9773 } 9774 9775 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 9776 bool IsUniform = RepR && RepR->isUniform(); 9777 9778 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 9779 // Check if there is a scalar value for the selected lane. 9780 if (!hasScalarValue(Def, {Part, LastLane})) { 9781 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 9782 assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) || 9783 isa<VPScalarIVStepsRecipe>(Def->getDef())) && 9784 "unexpected recipe found to be invariant"); 9785 IsUniform = true; 9786 LastLane = 0; 9787 } 9788 9789 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 9790 // Set the insert point after the last scalarized instruction or after the 9791 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 9792 // will directly follow the scalar definitions. 9793 auto OldIP = Builder.saveIP(); 9794 auto NewIP = 9795 isa<PHINode>(LastInst) 9796 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 9797 : std::next(BasicBlock::iterator(LastInst)); 9798 Builder.SetInsertPoint(&*NewIP); 9799 9800 // However, if we are vectorizing, we need to construct the vector values. 9801 // If the value is known to be uniform after vectorization, we can just 9802 // broadcast the scalar value corresponding to lane zero for each unroll 9803 // iteration. Otherwise, we construct the vector values using 9804 // insertelement instructions. Since the resulting vectors are stored in 9805 // State, we will only generate the insertelements once. 9806 Value *VectorValue = nullptr; 9807 if (IsUniform) { 9808 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 9809 set(Def, VectorValue, Part); 9810 } else { 9811 // Initialize packing with insertelements to start from undef. 9812 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 9813 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 9814 set(Def, Undef, Part); 9815 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 9816 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 9817 VectorValue = get(Def, Part); 9818 } 9819 Builder.restoreIP(OldIP); 9820 return VectorValue; 9821 } 9822 9823 // Process the loop in the VPlan-native vectorization path. This path builds 9824 // VPlan upfront in the vectorization pipeline, which allows to apply 9825 // VPlan-to-VPlan transformations from the very beginning without modifying the 9826 // input LLVM IR. 9827 static bool processLoopInVPlanNativePath( 9828 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 9829 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 9830 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 9831 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 9832 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 9833 LoopVectorizationRequirements &Requirements) { 9834 9835 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 9836 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 9837 return false; 9838 } 9839 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 9840 Function *F = L->getHeader()->getParent(); 9841 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 9842 9843 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9844 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 9845 9846 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 9847 &Hints, IAI); 9848 // Use the planner for outer loop vectorization. 9849 // TODO: CM is not used at this point inside the planner. Turn CM into an 9850 // optional argument if we don't need it in the future. 9851 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, ORE); 9852 9853 // Get user vectorization factor. 9854 ElementCount UserVF = Hints.getWidth(); 9855 9856 CM.collectElementTypesForWidening(); 9857 9858 // Plan how to best vectorize, return the best VF and its cost. 9859 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 9860 9861 // If we are stress testing VPlan builds, do not attempt to generate vector 9862 // code. Masked vector code generation support will follow soon. 9863 // Also, do not attempt to vectorize if no vector code will be produced. 9864 if (VPlanBuildStressTest || VectorizationFactor::Disabled() == VF) 9865 return false; 9866 9867 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 9868 9869 { 9870 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, TTI, 9871 F->getParent()->getDataLayout()); 9872 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 9873 VF.Width, 1, LVL, &CM, BFI, PSI, Checks); 9874 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 9875 << L->getHeader()->getParent()->getName() << "\"\n"); 9876 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT, false); 9877 } 9878 9879 // Mark the loop as already vectorized to avoid vectorizing again. 9880 Hints.setAlreadyVectorized(); 9881 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9882 return true; 9883 } 9884 9885 // Emit a remark if there are stores to floats that required a floating point 9886 // extension. If the vectorized loop was generated with floating point there 9887 // will be a performance penalty from the conversion overhead and the change in 9888 // the vector width. 9889 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 9890 SmallVector<Instruction *, 4> Worklist; 9891 for (BasicBlock *BB : L->getBlocks()) { 9892 for (Instruction &Inst : *BB) { 9893 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 9894 if (S->getValueOperand()->getType()->isFloatTy()) 9895 Worklist.push_back(S); 9896 } 9897 } 9898 } 9899 9900 // Traverse the floating point stores upwards searching, for floating point 9901 // conversions. 9902 SmallPtrSet<const Instruction *, 4> Visited; 9903 SmallPtrSet<const Instruction *, 4> EmittedRemark; 9904 while (!Worklist.empty()) { 9905 auto *I = Worklist.pop_back_val(); 9906 if (!L->contains(I)) 9907 continue; 9908 if (!Visited.insert(I).second) 9909 continue; 9910 9911 // Emit a remark if the floating point store required a floating 9912 // point conversion. 9913 // TODO: More work could be done to identify the root cause such as a 9914 // constant or a function return type and point the user to it. 9915 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 9916 ORE->emit([&]() { 9917 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 9918 I->getDebugLoc(), L->getHeader()) 9919 << "floating point conversion changes vector width. " 9920 << "Mixed floating point precision requires an up/down " 9921 << "cast that will negatively impact performance."; 9922 }); 9923 9924 for (Use &Op : I->operands()) 9925 if (auto *OpI = dyn_cast<Instruction>(Op)) 9926 Worklist.push_back(OpI); 9927 } 9928 } 9929 9930 static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks, 9931 VectorizationFactor &VF, 9932 Optional<unsigned> VScale, Loop *L, 9933 ScalarEvolution &SE) { 9934 InstructionCost CheckCost = Checks.getCost(); 9935 if (!CheckCost.isValid()) 9936 return false; 9937 9938 // When interleaving only scalar and vector cost will be equal, which in turn 9939 // would lead to a divide by 0. Fall back to hard threshold. 9940 if (VF.Width.isScalar()) { 9941 if (CheckCost > VectorizeMemoryCheckThreshold) { 9942 LLVM_DEBUG( 9943 dbgs() 9944 << "LV: Interleaving only is not profitable due to runtime checks\n"); 9945 return false; 9946 } 9947 return true; 9948 } 9949 9950 // The scalar cost should only be 0 when vectorizing with a user specified VF/IC. In those cases, runtime checks should always be generated. 9951 double ScalarC = *VF.ScalarCost.getValue(); 9952 if (ScalarC == 0) 9953 return true; 9954 9955 // First, compute the minimum iteration count required so that the vector 9956 // loop outperforms the scalar loop. 9957 // The total cost of the scalar loop is 9958 // ScalarC * TC 9959 // where 9960 // * TC is the actual trip count of the loop. 9961 // * ScalarC is the cost of a single scalar iteration. 9962 // 9963 // The total cost of the vector loop is 9964 // RtC + VecC * (TC / VF) + EpiC 9965 // where 9966 // * RtC is the cost of the generated runtime checks 9967 // * VecC is the cost of a single vector iteration. 9968 // * TC is the actual trip count of the loop 9969 // * VF is the vectorization factor 9970 // * EpiCost is the cost of the generated epilogue, including the cost 9971 // of the remaining scalar operations. 9972 // 9973 // Vectorization is profitable once the total vector cost is less than the 9974 // total scalar cost: 9975 // RtC + VecC * (TC / VF) + EpiC < ScalarC * TC 9976 // 9977 // Now we can compute the minimum required trip count TC as 9978 // (RtC + EpiC) / (ScalarC - (VecC / VF)) < TC 9979 // 9980 // For now we assume the epilogue cost EpiC = 0 for simplicity. Note that 9981 // the computations are performed on doubles, not integers and the result 9982 // is rounded up, hence we get an upper estimate of the TC. 9983 unsigned IntVF = VF.Width.getKnownMinValue(); 9984 if (VF.Width.isScalable()) { 9985 unsigned AssumedMinimumVscale = 1; 9986 if (VScale) 9987 AssumedMinimumVscale = *VScale; 9988 IntVF *= AssumedMinimumVscale; 9989 } 9990 double VecCOverVF = double(*VF.Cost.getValue()) / IntVF; 9991 double RtC = *CheckCost.getValue(); 9992 double MinTC1 = RtC / (ScalarC - VecCOverVF); 9993 9994 // Second, compute a minimum iteration count so that the cost of the 9995 // runtime checks is only a fraction of the total scalar loop cost. This 9996 // adds a loop-dependent bound on the overhead incurred if the runtime 9997 // checks fail. In case the runtime checks fail, the cost is RtC + ScalarC 9998 // * TC. To bound the runtime check to be a fraction 1/X of the scalar 9999 // cost, compute 10000 // RtC < ScalarC * TC * (1 / X) ==> RtC * X / ScalarC < TC 10001 double MinTC2 = RtC * 10 / ScalarC; 10002 10003 // Now pick the larger minimum. If it is not a multiple of VF, choose the 10004 // next closest multiple of VF. This should partly compensate for ignoring 10005 // the epilogue cost. 10006 uint64_t MinTC = std::ceil(std::max(MinTC1, MinTC2)); 10007 VF.MinProfitableTripCount = ElementCount::getFixed(alignTo(MinTC, IntVF)); 10008 10009 LLVM_DEBUG( 10010 dbgs() << "LV: Minimum required TC for runtime checks to be profitable:" 10011 << VF.MinProfitableTripCount << "\n"); 10012 10013 // Skip vectorization if the expected trip count is less than the minimum 10014 // required trip count. 10015 if (auto ExpectedTC = getSmallBestKnownTC(SE, L)) { 10016 if (ElementCount::isKnownLT(ElementCount::getFixed(*ExpectedTC), 10017 VF.MinProfitableTripCount)) { 10018 LLVM_DEBUG(dbgs() << "LV: Vectorization is not beneficial: expected " 10019 "trip count < minimum profitable VF (" 10020 << *ExpectedTC << " < " << VF.MinProfitableTripCount 10021 << ")\n"); 10022 10023 return false; 10024 } 10025 } 10026 return true; 10027 } 10028 10029 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10030 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10031 !EnableLoopInterleaving), 10032 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10033 !EnableLoopVectorization) {} 10034 10035 bool LoopVectorizePass::processLoop(Loop *L) { 10036 assert((EnableVPlanNativePath || L->isInnermost()) && 10037 "VPlan-native path is not enabled. Only process inner loops."); 10038 10039 #ifndef NDEBUG 10040 const std::string DebugLocStr = getDebugLocString(L); 10041 #endif /* NDEBUG */ 10042 10043 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '" 10044 << L->getHeader()->getParent()->getName() << "' from " 10045 << DebugLocStr << "\n"); 10046 10047 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI); 10048 10049 LLVM_DEBUG( 10050 dbgs() << "LV: Loop hints:" 10051 << " force=" 10052 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10053 ? "disabled" 10054 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10055 ? "enabled" 10056 : "?")) 10057 << " width=" << Hints.getWidth() 10058 << " interleave=" << Hints.getInterleave() << "\n"); 10059 10060 // Function containing loop 10061 Function *F = L->getHeader()->getParent(); 10062 10063 // Looking at the diagnostic output is the only way to determine if a loop 10064 // was vectorized (other than looking at the IR or machine code), so it 10065 // is important to generate an optimization remark for each loop. Most of 10066 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10067 // generated as OptimizationRemark and OptimizationRemarkMissed are 10068 // less verbose reporting vectorized loops and unvectorized loops that may 10069 // benefit from vectorization, respectively. 10070 10071 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10072 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10073 return false; 10074 } 10075 10076 PredicatedScalarEvolution PSE(*SE, *L); 10077 10078 // Check if it is legal to vectorize the loop. 10079 LoopVectorizationRequirements Requirements; 10080 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10081 &Requirements, &Hints, DB, AC, BFI, PSI); 10082 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10083 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10084 Hints.emitRemarkWithHints(); 10085 return false; 10086 } 10087 10088 // Check the function attributes and profiles to find out if this function 10089 // should be optimized for size. 10090 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10091 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10092 10093 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10094 // here. They may require CFG and instruction level transformations before 10095 // even evaluating whether vectorization is profitable. Since we cannot modify 10096 // the incoming IR, we need to build VPlan upfront in the vectorization 10097 // pipeline. 10098 if (!L->isInnermost()) 10099 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10100 ORE, BFI, PSI, Hints, Requirements); 10101 10102 assert(L->isInnermost() && "Inner loop expected."); 10103 10104 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10105 // count by optimizing for size, to minimize overheads. 10106 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10107 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10108 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10109 << "This loop is worth vectorizing only if no scalar " 10110 << "iteration overheads are incurred."); 10111 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10112 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10113 else { 10114 LLVM_DEBUG(dbgs() << "\n"); 10115 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10116 } 10117 } 10118 10119 // Check the function attributes to see if implicit floats are allowed. 10120 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10121 // an integer loop and the vector instructions selected are purely integer 10122 // vector instructions? 10123 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10124 reportVectorizationFailure( 10125 "Can't vectorize when the NoImplicitFloat attribute is used", 10126 "loop not vectorized due to NoImplicitFloat attribute", 10127 "NoImplicitFloat", ORE, L); 10128 Hints.emitRemarkWithHints(); 10129 return false; 10130 } 10131 10132 // Check if the target supports potentially unsafe FP vectorization. 10133 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10134 // for the target we're vectorizing for, to make sure none of the 10135 // additional fp-math flags can help. 10136 if (Hints.isPotentiallyUnsafe() && 10137 TTI->isFPVectorizationPotentiallyUnsafe()) { 10138 reportVectorizationFailure( 10139 "Potentially unsafe FP op prevents vectorization", 10140 "loop not vectorized due to unsafe FP support.", 10141 "UnsafeFP", ORE, L); 10142 Hints.emitRemarkWithHints(); 10143 return false; 10144 } 10145 10146 bool AllowOrderedReductions; 10147 // If the flag is set, use that instead and override the TTI behaviour. 10148 if (ForceOrderedReductions.getNumOccurrences() > 0) 10149 AllowOrderedReductions = ForceOrderedReductions; 10150 else 10151 AllowOrderedReductions = TTI->enableOrderedReductions(); 10152 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10153 ORE->emit([&]() { 10154 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10155 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10156 ExactFPMathInst->getDebugLoc(), 10157 ExactFPMathInst->getParent()) 10158 << "loop not vectorized: cannot prove it is safe to reorder " 10159 "floating-point operations"; 10160 }); 10161 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10162 "reorder floating-point operations\n"); 10163 Hints.emitRemarkWithHints(); 10164 return false; 10165 } 10166 10167 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10168 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10169 10170 // If an override option has been passed in for interleaved accesses, use it. 10171 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10172 UseInterleaved = EnableInterleavedMemAccesses; 10173 10174 // Analyze interleaved memory accesses. 10175 if (UseInterleaved) { 10176 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10177 } 10178 10179 // Use the cost model. 10180 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10181 F, &Hints, IAI); 10182 CM.collectValuesToIgnore(); 10183 CM.collectElementTypesForWidening(); 10184 10185 // Use the planner for vectorization. 10186 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, ORE); 10187 10188 // Get user vectorization factor and interleave count. 10189 ElementCount UserVF = Hints.getWidth(); 10190 unsigned UserIC = Hints.getInterleave(); 10191 10192 // Plan how to best vectorize, return the best VF and its cost. 10193 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10194 10195 VectorizationFactor VF = VectorizationFactor::Disabled(); 10196 unsigned IC = 1; 10197 10198 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, TTI, 10199 F->getParent()->getDataLayout()); 10200 if (MaybeVF) { 10201 VF = *MaybeVF; 10202 // Select the interleave count. 10203 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10204 10205 unsigned SelectedIC = std::max(IC, UserIC); 10206 // Optimistically generate runtime checks if they are needed. Drop them if 10207 // they turn out to not be profitable. 10208 if (VF.Width.isVector() || SelectedIC > 1) 10209 Checks.Create(L, *LVL.getLAI(), PSE.getPredicate(), VF.Width, SelectedIC); 10210 10211 // Check if it is profitable to vectorize with runtime checks. 10212 bool ForceVectorization = 10213 Hints.getForce() == LoopVectorizeHints::FK_Enabled; 10214 if (!ForceVectorization && 10215 !areRuntimeChecksProfitable(Checks, VF, CM.getVScaleForTuning(), L, 10216 *PSE.getSE())) { 10217 ORE->emit([&]() { 10218 return OptimizationRemarkAnalysisAliasing( 10219 DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(), 10220 L->getHeader()) 10221 << "loop not vectorized: cannot prove it is safe to reorder " 10222 "memory operations"; 10223 }); 10224 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 10225 Hints.emitRemarkWithHints(); 10226 return false; 10227 } 10228 } 10229 10230 // Identify the diagnostic messages that should be produced. 10231 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10232 bool VectorizeLoop = true, InterleaveLoop = true; 10233 if (VF.Width.isScalar()) { 10234 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10235 VecDiagMsg = std::make_pair( 10236 "VectorizationNotBeneficial", 10237 "the cost-model indicates that vectorization is not beneficial"); 10238 VectorizeLoop = false; 10239 } 10240 10241 if (!MaybeVF && UserIC > 1) { 10242 // Tell the user interleaving was avoided up-front, despite being explicitly 10243 // requested. 10244 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10245 "interleaving should be avoided up front\n"); 10246 IntDiagMsg = std::make_pair( 10247 "InterleavingAvoided", 10248 "Ignoring UserIC, because interleaving was avoided up front"); 10249 InterleaveLoop = false; 10250 } else if (IC == 1 && UserIC <= 1) { 10251 // Tell the user interleaving is not beneficial. 10252 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10253 IntDiagMsg = std::make_pair( 10254 "InterleavingNotBeneficial", 10255 "the cost-model indicates that interleaving is not beneficial"); 10256 InterleaveLoop = false; 10257 if (UserIC == 1) { 10258 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10259 IntDiagMsg.second += 10260 " and is explicitly disabled or interleave count is set to 1"; 10261 } 10262 } else if (IC > 1 && UserIC == 1) { 10263 // Tell the user interleaving is beneficial, but it explicitly disabled. 10264 LLVM_DEBUG( 10265 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10266 IntDiagMsg = std::make_pair( 10267 "InterleavingBeneficialButDisabled", 10268 "the cost-model indicates that interleaving is beneficial " 10269 "but is explicitly disabled or interleave count is set to 1"); 10270 InterleaveLoop = false; 10271 } 10272 10273 // Override IC if user provided an interleave count. 10274 IC = UserIC > 0 ? UserIC : IC; 10275 10276 // Emit diagnostic messages, if any. 10277 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10278 if (!VectorizeLoop && !InterleaveLoop) { 10279 // Do not vectorize or interleaving the loop. 10280 ORE->emit([&]() { 10281 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10282 L->getStartLoc(), L->getHeader()) 10283 << VecDiagMsg.second; 10284 }); 10285 ORE->emit([&]() { 10286 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10287 L->getStartLoc(), L->getHeader()) 10288 << IntDiagMsg.second; 10289 }); 10290 return false; 10291 } else if (!VectorizeLoop && InterleaveLoop) { 10292 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10293 ORE->emit([&]() { 10294 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10295 L->getStartLoc(), L->getHeader()) 10296 << VecDiagMsg.second; 10297 }); 10298 } else if (VectorizeLoop && !InterleaveLoop) { 10299 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10300 << ") in " << DebugLocStr << '\n'); 10301 ORE->emit([&]() { 10302 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10303 L->getStartLoc(), L->getHeader()) 10304 << IntDiagMsg.second; 10305 }); 10306 } else if (VectorizeLoop && InterleaveLoop) { 10307 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10308 << ") in " << DebugLocStr << '\n'); 10309 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10310 } 10311 10312 bool DisableRuntimeUnroll = false; 10313 MDNode *OrigLoopID = L->getLoopID(); 10314 { 10315 using namespace ore; 10316 if (!VectorizeLoop) { 10317 assert(IC > 1 && "interleave count should not be 1 or 0"); 10318 // If we decided that it is not legal to vectorize the loop, then 10319 // interleave it. 10320 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10321 &CM, BFI, PSI, Checks); 10322 10323 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10324 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT, false); 10325 10326 ORE->emit([&]() { 10327 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10328 L->getHeader()) 10329 << "interleaved loop (interleaved count: " 10330 << NV("InterleaveCount", IC) << ")"; 10331 }); 10332 } else { 10333 // If we decided that it is *legal* to vectorize the loop, then do it. 10334 10335 // Consider vectorizing the epilogue too if it's profitable. 10336 VectorizationFactor EpilogueVF = 10337 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10338 if (EpilogueVF.Width.isVector()) { 10339 10340 // The first pass vectorizes the main loop and creates a scalar epilogue 10341 // to be vectorized by executing the plan (potentially with a different 10342 // factor) again shortly afterwards. 10343 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10344 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10345 EPI, &LVL, &CM, BFI, PSI, Checks); 10346 10347 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10348 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10349 DT, true); 10350 ++LoopsVectorized; 10351 10352 // Second pass vectorizes the epilogue and adjusts the control flow 10353 // edges from the first pass. 10354 EPI.MainLoopVF = EPI.EpilogueVF; 10355 EPI.MainLoopUF = EPI.EpilogueUF; 10356 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10357 ORE, EPI, &LVL, &CM, BFI, PSI, 10358 Checks); 10359 10360 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10361 VPRegionBlock *VectorLoop = BestEpiPlan.getVectorLoopRegion(); 10362 VPBasicBlock *Header = VectorLoop->getEntryBasicBlock(); 10363 Header->setName("vec.epilog.vector.body"); 10364 10365 // Ensure that the start values for any VPReductionPHIRecipes are 10366 // updated before vectorising the epilogue loop. 10367 for (VPRecipeBase &R : Header->phis()) { 10368 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) { 10369 if (auto *Resume = MainILV.getReductionResumeValue( 10370 ReductionPhi->getRecurrenceDescriptor())) { 10371 VPValue *StartVal = BestEpiPlan.getOrAddExternalDef(Resume); 10372 ReductionPhi->setOperand(0, StartVal); 10373 } 10374 } 10375 } 10376 10377 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10378 DT, true); 10379 ++LoopsEpilogueVectorized; 10380 10381 if (!MainILV.areSafetyChecksAdded()) 10382 DisableRuntimeUnroll = true; 10383 } else { 10384 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 10385 VF.MinProfitableTripCount, IC, &LVL, &CM, BFI, 10386 PSI, Checks); 10387 10388 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10389 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT, false); 10390 ++LoopsVectorized; 10391 10392 // Add metadata to disable runtime unrolling a scalar loop when there 10393 // are no runtime checks about strides and memory. A scalar loop that is 10394 // rarely used is not worth unrolling. 10395 if (!LB.areSafetyChecksAdded()) 10396 DisableRuntimeUnroll = true; 10397 } 10398 // Report the vectorization decision. 10399 ORE->emit([&]() { 10400 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10401 L->getHeader()) 10402 << "vectorized loop (vectorization width: " 10403 << NV("VectorizationFactor", VF.Width) 10404 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10405 }); 10406 } 10407 10408 if (ORE->allowExtraAnalysis(LV_NAME)) 10409 checkMixedPrecision(L, ORE); 10410 } 10411 10412 Optional<MDNode *> RemainderLoopID = 10413 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10414 LLVMLoopVectorizeFollowupEpilogue}); 10415 if (RemainderLoopID) { 10416 L->setLoopID(RemainderLoopID.value()); 10417 } else { 10418 if (DisableRuntimeUnroll) 10419 AddRuntimeUnrollDisableMetaData(L); 10420 10421 // Mark the loop as already vectorized to avoid vectorizing again. 10422 Hints.setAlreadyVectorized(); 10423 } 10424 10425 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10426 return true; 10427 } 10428 10429 LoopVectorizeResult LoopVectorizePass::runImpl( 10430 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10431 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10432 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10433 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10434 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10435 SE = &SE_; 10436 LI = &LI_; 10437 TTI = &TTI_; 10438 DT = &DT_; 10439 BFI = &BFI_; 10440 TLI = TLI_; 10441 AA = &AA_; 10442 AC = &AC_; 10443 GetLAA = &GetLAA_; 10444 DB = &DB_; 10445 ORE = &ORE_; 10446 PSI = PSI_; 10447 10448 // Don't attempt if 10449 // 1. the target claims to have no vector registers, and 10450 // 2. interleaving won't help ILP. 10451 // 10452 // The second condition is necessary because, even if the target has no 10453 // vector registers, loop vectorization may still enable scalar 10454 // interleaving. 10455 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10456 TTI->getMaxInterleaveFactor(1) < 2) 10457 return LoopVectorizeResult(false, false); 10458 10459 bool Changed = false, CFGChanged = false; 10460 10461 // The vectorizer requires loops to be in simplified form. 10462 // Since simplification may add new inner loops, it has to run before the 10463 // legality and profitability checks. This means running the loop vectorizer 10464 // will simplify all loops, regardless of whether anything end up being 10465 // vectorized. 10466 for (auto &L : *LI) 10467 Changed |= CFGChanged |= 10468 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10469 10470 // Build up a worklist of inner-loops to vectorize. This is necessary as 10471 // the act of vectorizing or partially unrolling a loop creates new loops 10472 // and can invalidate iterators across the loops. 10473 SmallVector<Loop *, 8> Worklist; 10474 10475 for (Loop *L : *LI) 10476 collectSupportedLoops(*L, LI, ORE, Worklist); 10477 10478 LoopsAnalyzed += Worklist.size(); 10479 10480 // Now walk the identified inner loops. 10481 while (!Worklist.empty()) { 10482 Loop *L = Worklist.pop_back_val(); 10483 10484 // For the inner loops we actually process, form LCSSA to simplify the 10485 // transform. 10486 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10487 10488 Changed |= CFGChanged |= processLoop(L); 10489 } 10490 10491 // Process each loop nest in the function. 10492 return LoopVectorizeResult(Changed, CFGChanged); 10493 } 10494 10495 PreservedAnalyses LoopVectorizePass::run(Function &F, 10496 FunctionAnalysisManager &AM) { 10497 auto &LI = AM.getResult<LoopAnalysis>(F); 10498 // There are no loops in the function. Return before computing other expensive 10499 // analyses. 10500 if (LI.empty()) 10501 return PreservedAnalyses::all(); 10502 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10503 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10504 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10505 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10506 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10507 auto &AA = AM.getResult<AAManager>(F); 10508 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10509 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10510 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10511 10512 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10513 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10514 [&](Loop &L) -> const LoopAccessInfo & { 10515 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10516 TLI, TTI, nullptr, nullptr, nullptr}; 10517 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10518 }; 10519 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10520 ProfileSummaryInfo *PSI = 10521 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10522 LoopVectorizeResult Result = 10523 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10524 if (!Result.MadeAnyChange) 10525 return PreservedAnalyses::all(); 10526 PreservedAnalyses PA; 10527 10528 // We currently do not preserve loopinfo/dominator analyses with outer loop 10529 // vectorization. Until this is addressed, mark these analyses as preserved 10530 // only for non-VPlan-native path. 10531 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10532 if (!EnableVPlanNativePath) { 10533 PA.preserve<LoopAnalysis>(); 10534 PA.preserve<DominatorTreeAnalysis>(); 10535 } 10536 10537 if (Result.MadeCFGChange) { 10538 // Making CFG changes likely means a loop got vectorized. Indicate that 10539 // extra simplification passes should be run. 10540 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only 10541 // be run if runtime checks have been added. 10542 AM.getResult<ShouldRunExtraVectorPasses>(F); 10543 PA.preserve<ShouldRunExtraVectorPasses>(); 10544 } else { 10545 PA.preserveSet<CFGAnalyses>(); 10546 } 10547 return PA; 10548 } 10549 10550 void LoopVectorizePass::printPipeline( 10551 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10552 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10553 OS, MapClassName2PassName); 10554 10555 OS << "<"; 10556 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10557 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10558 OS << ">"; 10559 } 10560