1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/Metadata.h" 116 #include "llvm/IR/Module.h" 117 #include "llvm/IR/Operator.h" 118 #include "llvm/IR/PatternMatch.h" 119 #include "llvm/IR/Type.h" 120 #include "llvm/IR/Use.h" 121 #include "llvm/IR/User.h" 122 #include "llvm/IR/Value.h" 123 #include "llvm/IR/ValueHandle.h" 124 #include "llvm/IR/Verifier.h" 125 #include "llvm/InitializePasses.h" 126 #include "llvm/Pass.h" 127 #include "llvm/Support/Casting.h" 128 #include "llvm/Support/CommandLine.h" 129 #include "llvm/Support/Compiler.h" 130 #include "llvm/Support/Debug.h" 131 #include "llvm/Support/ErrorHandling.h" 132 #include "llvm/Support/InstructionCost.h" 133 #include "llvm/Support/MathExtras.h" 134 #include "llvm/Support/raw_ostream.h" 135 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 136 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 137 #include "llvm/Transforms/Utils/LoopSimplify.h" 138 #include "llvm/Transforms/Utils/LoopUtils.h" 139 #include "llvm/Transforms/Utils/LoopVersioning.h" 140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 141 #include "llvm/Transforms/Utils/SizeOpts.h" 142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 143 #include <algorithm> 144 #include <cassert> 145 #include <cstdint> 146 #include <functional> 147 #include <iterator> 148 #include <limits> 149 #include <map> 150 #include <memory> 151 #include <string> 152 #include <tuple> 153 #include <utility> 154 155 using namespace llvm; 156 157 #define LV_NAME "loop-vectorize" 158 #define DEBUG_TYPE LV_NAME 159 160 #ifndef NDEBUG 161 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 162 #endif 163 164 /// @{ 165 /// Metadata attribute names 166 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 167 const char LLVMLoopVectorizeFollowupVectorized[] = 168 "llvm.loop.vectorize.followup_vectorized"; 169 const char LLVMLoopVectorizeFollowupEpilogue[] = 170 "llvm.loop.vectorize.followup_epilogue"; 171 /// @} 172 173 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 174 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 175 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 176 177 static cl::opt<bool> EnableEpilogueVectorization( 178 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 179 cl::desc("Enable vectorization of epilogue loops.")); 180 181 static cl::opt<unsigned> EpilogueVectorizationForceVF( 182 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 183 cl::desc("When epilogue vectorization is enabled, and a value greater than " 184 "1 is specified, forces the given VF for all applicable epilogue " 185 "loops.")); 186 187 static cl::opt<unsigned> EpilogueVectorizationMinVF( 188 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 189 cl::desc("Only loops with vectorization factor equal to or larger than " 190 "the specified value are considered for epilogue vectorization.")); 191 192 /// Loops with a known constant trip count below this number are vectorized only 193 /// if no scalar iteration overheads are incurred. 194 static cl::opt<unsigned> TinyTripCountVectorThreshold( 195 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 196 cl::desc("Loops with a constant trip count that is smaller than this " 197 "value are vectorized only if no scalar iteration overheads " 198 "are incurred.")); 199 200 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 201 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 202 cl::desc("The maximum allowed number of runtime memory checks with a " 203 "vectorize(enable) pragma.")); 204 205 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 206 // that predication is preferred, and this lists all options. I.e., the 207 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 208 // and predicate the instructions accordingly. If tail-folding fails, there are 209 // different fallback strategies depending on these values: 210 namespace PreferPredicateTy { 211 enum Option { 212 ScalarEpilogue = 0, 213 PredicateElseScalarEpilogue, 214 PredicateOrDontVectorize 215 }; 216 } // namespace PreferPredicateTy 217 218 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 219 "prefer-predicate-over-epilogue", 220 cl::init(PreferPredicateTy::ScalarEpilogue), 221 cl::Hidden, 222 cl::desc("Tail-folding and predication preferences over creating a scalar " 223 "epilogue loop."), 224 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 225 "scalar-epilogue", 226 "Don't tail-predicate loops, create scalar epilogue"), 227 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 228 "predicate-else-scalar-epilogue", 229 "prefer tail-folding, create scalar epilogue if tail " 230 "folding fails."), 231 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 232 "predicate-dont-vectorize", 233 "prefers tail-folding, don't attempt vectorization if " 234 "tail-folding fails."))); 235 236 static cl::opt<bool> MaximizeBandwidth( 237 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 238 cl::desc("Maximize bandwidth when selecting vectorization factor which " 239 "will be determined by the smallest type in loop.")); 240 241 static cl::opt<bool> EnableInterleavedMemAccesses( 242 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 243 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 244 245 /// An interleave-group may need masking if it resides in a block that needs 246 /// predication, or in order to mask away gaps. 247 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 248 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 249 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 250 251 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 252 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 253 cl::desc("We don't interleave loops with a estimated constant trip count " 254 "below this number")); 255 256 static cl::opt<unsigned> ForceTargetNumScalarRegs( 257 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 258 cl::desc("A flag that overrides the target's number of scalar registers.")); 259 260 static cl::opt<unsigned> ForceTargetNumVectorRegs( 261 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 262 cl::desc("A flag that overrides the target's number of vector registers.")); 263 264 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 265 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 266 cl::desc("A flag that overrides the target's max interleave factor for " 267 "scalar loops.")); 268 269 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 270 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 271 cl::desc("A flag that overrides the target's max interleave factor for " 272 "vectorized loops.")); 273 274 static cl::opt<unsigned> ForceTargetInstructionCost( 275 "force-target-instruction-cost", cl::init(0), cl::Hidden, 276 cl::desc("A flag that overrides the target's expected cost for " 277 "an instruction to a single constant value. Mostly " 278 "useful for getting consistent testing.")); 279 280 static cl::opt<bool> ForceTargetSupportsScalableVectors( 281 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 282 cl::desc( 283 "Pretend that scalable vectors are supported, even if the target does " 284 "not support them. This flag should only be used for testing.")); 285 286 static cl::opt<unsigned> SmallLoopCost( 287 "small-loop-cost", cl::init(20), cl::Hidden, 288 cl::desc( 289 "The cost of a loop that is considered 'small' by the interleaver.")); 290 291 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 292 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 293 cl::desc("Enable the use of the block frequency analysis to access PGO " 294 "heuristics minimizing code growth in cold regions and being more " 295 "aggressive in hot regions.")); 296 297 // Runtime interleave loops for load/store throughput. 298 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 299 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 300 cl::desc( 301 "Enable runtime interleaving until load/store ports are saturated")); 302 303 /// Interleave small loops with scalar reductions. 304 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 305 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 306 cl::desc("Enable interleaving for loops with small iteration counts that " 307 "contain scalar reductions to expose ILP.")); 308 309 /// The number of stores in a loop that are allowed to need predication. 310 static cl::opt<unsigned> NumberOfStoresToPredicate( 311 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 312 cl::desc("Max number of stores to be predicated behind an if.")); 313 314 static cl::opt<bool> EnableIndVarRegisterHeur( 315 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 316 cl::desc("Count the induction variable only once when interleaving")); 317 318 static cl::opt<bool> EnableCondStoresVectorization( 319 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 320 cl::desc("Enable if predication of stores during vectorization.")); 321 322 static cl::opt<unsigned> MaxNestedScalarReductionIC( 323 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 324 cl::desc("The maximum interleave count to use when interleaving a scalar " 325 "reduction in a nested loop.")); 326 327 static cl::opt<bool> 328 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 329 cl::Hidden, 330 cl::desc("Prefer in-loop vector reductions, " 331 "overriding the targets preference.")); 332 333 static cl::opt<bool> ForceOrderedReductions( 334 "force-ordered-reductions", cl::init(false), cl::Hidden, 335 cl::desc("Enable the vectorisation of loops with in-order (strict) " 336 "FP reductions")); 337 338 static cl::opt<bool> PreferPredicatedReductionSelect( 339 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 340 cl::desc( 341 "Prefer predicating a reduction operation over an after loop select.")); 342 343 cl::opt<bool> EnableVPlanNativePath( 344 "enable-vplan-native-path", cl::init(false), cl::Hidden, 345 cl::desc("Enable VPlan-native vectorization path with " 346 "support for outer loop vectorization.")); 347 348 // FIXME: Remove this switch once we have divergence analysis. Currently we 349 // assume divergent non-backedge branches when this switch is true. 350 cl::opt<bool> EnableVPlanPredication( 351 "enable-vplan-predication", cl::init(false), cl::Hidden, 352 cl::desc("Enable VPlan-native vectorization path predicator with " 353 "support for outer loop vectorization.")); 354 355 // This flag enables the stress testing of the VPlan H-CFG construction in the 356 // VPlan-native vectorization path. It must be used in conjuction with 357 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 358 // verification of the H-CFGs built. 359 static cl::opt<bool> VPlanBuildStressTest( 360 "vplan-build-stress-test", cl::init(false), cl::Hidden, 361 cl::desc( 362 "Build VPlan for every supported loop nest in the function and bail " 363 "out right after the build (stress test the VPlan H-CFG construction " 364 "in the VPlan-native vectorization path).")); 365 366 cl::opt<bool> llvm::EnableLoopInterleaving( 367 "interleave-loops", cl::init(true), cl::Hidden, 368 cl::desc("Enable loop interleaving in Loop vectorization passes")); 369 cl::opt<bool> llvm::EnableLoopVectorization( 370 "vectorize-loops", cl::init(true), cl::Hidden, 371 cl::desc("Run the Loop vectorization passes")); 372 373 cl::opt<bool> PrintVPlansInDotFormat( 374 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 375 cl::desc("Use dot format instead of plain text when dumping VPlans")); 376 377 /// A helper function that returns true if the given type is irregular. The 378 /// type is irregular if its allocated size doesn't equal the store size of an 379 /// element of the corresponding vector type. 380 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 381 // Determine if an array of N elements of type Ty is "bitcast compatible" 382 // with a <N x Ty> vector. 383 // This is only true if there is no padding between the array elements. 384 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 385 } 386 387 /// A helper function that returns the reciprocal of the block probability of 388 /// predicated blocks. If we return X, we are assuming the predicated block 389 /// will execute once for every X iterations of the loop header. 390 /// 391 /// TODO: We should use actual block probability here, if available. Currently, 392 /// we always assume predicated blocks have a 50% chance of executing. 393 static unsigned getReciprocalPredBlockProb() { return 2; } 394 395 /// A helper function that returns an integer or floating-point constant with 396 /// value C. 397 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 398 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 399 : ConstantFP::get(Ty, C); 400 } 401 402 /// Returns "best known" trip count for the specified loop \p L as defined by 403 /// the following procedure: 404 /// 1) Returns exact trip count if it is known. 405 /// 2) Returns expected trip count according to profile data if any. 406 /// 3) Returns upper bound estimate if it is known. 407 /// 4) Returns None if all of the above failed. 408 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 409 // Check if exact trip count is known. 410 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 411 return ExpectedTC; 412 413 // Check if there is an expected trip count available from profile data. 414 if (LoopVectorizeWithBlockFrequency) 415 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 416 return EstimatedTC; 417 418 // Check if upper bound estimate is known. 419 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 420 return ExpectedTC; 421 422 return None; 423 } 424 425 // Forward declare GeneratedRTChecks. 426 class GeneratedRTChecks; 427 428 namespace llvm { 429 430 AnalysisKey ShouldRunExtraVectorPasses::Key; 431 432 /// InnerLoopVectorizer vectorizes loops which contain only one basic 433 /// block to a specified vectorization factor (VF). 434 /// This class performs the widening of scalars into vectors, or multiple 435 /// scalars. This class also implements the following features: 436 /// * It inserts an epilogue loop for handling loops that don't have iteration 437 /// counts that are known to be a multiple of the vectorization factor. 438 /// * It handles the code generation for reduction variables. 439 /// * Scalarization (implementation using scalars) of un-vectorizable 440 /// instructions. 441 /// InnerLoopVectorizer does not perform any vectorization-legality 442 /// checks, and relies on the caller to check for the different legality 443 /// aspects. The InnerLoopVectorizer relies on the 444 /// LoopVectorizationLegality class to provide information about the induction 445 /// and reduction variables that were found to a given vectorization factor. 446 class InnerLoopVectorizer { 447 public: 448 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 449 LoopInfo *LI, DominatorTree *DT, 450 const TargetLibraryInfo *TLI, 451 const TargetTransformInfo *TTI, AssumptionCache *AC, 452 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 453 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 454 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 455 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 456 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 457 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 458 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 459 PSI(PSI), RTChecks(RTChecks) { 460 // Query this against the original loop and save it here because the profile 461 // of the original loop header may change as the transformation happens. 462 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 463 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 464 } 465 466 virtual ~InnerLoopVectorizer() = default; 467 468 /// Create a new empty loop that will contain vectorized instructions later 469 /// on, while the old loop will be used as the scalar remainder. Control flow 470 /// is generated around the vectorized (and scalar epilogue) loops consisting 471 /// of various checks and bypasses. Return the pre-header block of the new 472 /// loop and the start value for the canonical induction, if it is != 0. The 473 /// latter is the case when vectorizing the epilogue loop. In the case of 474 /// epilogue vectorization, this function is overriden to handle the more 475 /// complex control flow around the loops. 476 virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton(); 477 478 /// Widen a single call instruction within the innermost loop. 479 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 480 VPTransformState &State); 481 482 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 483 void fixVectorizedLoop(VPTransformState &State); 484 485 // Return true if any runtime check is added. 486 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 487 488 /// A type for vectorized values in the new loop. Each value from the 489 /// original loop, when vectorized, is represented by UF vector values in the 490 /// new unrolled loop, where UF is the unroll factor. 491 using VectorParts = SmallVector<Value *, 2>; 492 493 /// Vectorize a single vector PHINode in a block in the VPlan-native path 494 /// only. 495 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 496 VPTransformState &State); 497 498 /// A helper function to scalarize a single Instruction in the innermost loop. 499 /// Generates a sequence of scalar instances for each lane between \p MinLane 500 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 501 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p 502 /// Instr's operands. 503 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe, 504 const VPIteration &Instance, bool IfPredicateInstr, 505 VPTransformState &State); 506 507 /// Construct the vector value of a scalarized value \p V one lane at a time. 508 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 509 VPTransformState &State); 510 511 /// Try to vectorize interleaved access group \p Group with the base address 512 /// given in \p Addr, optionally masking the vector operations if \p 513 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 514 /// values in the vectorized loop. 515 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 516 ArrayRef<VPValue *> VPDefs, 517 VPTransformState &State, VPValue *Addr, 518 ArrayRef<VPValue *> StoredValues, 519 VPValue *BlockInMask = nullptr); 520 521 /// Set the debug location in the builder \p Ptr using the debug location in 522 /// \p V. If \p Ptr is None then it uses the class member's Builder. 523 void setDebugLocFromInst(const Value *V, 524 Optional<IRBuilderBase *> CustomBuilder = None); 525 526 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 527 void fixNonInductionPHIs(VPTransformState &State); 528 529 /// Returns true if the reordering of FP operations is not allowed, but we are 530 /// able to vectorize with strict in-order reductions for the given RdxDesc. 531 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc); 532 533 /// Create a broadcast instruction. This method generates a broadcast 534 /// instruction (shuffle) for loop invariant values and for the induction 535 /// value. If this is the induction variable then we extend it to N, N+1, ... 536 /// this is needed because each iteration in the loop corresponds to a SIMD 537 /// element. 538 virtual Value *getBroadcastInstrs(Value *V); 539 540 /// Add metadata from one instruction to another. 541 /// 542 /// This includes both the original MDs from \p From and additional ones (\see 543 /// addNewMetadata). Use this for *newly created* instructions in the vector 544 /// loop. 545 void addMetadata(Instruction *To, Instruction *From); 546 547 /// Similar to the previous function but it adds the metadata to a 548 /// vector of instructions. 549 void addMetadata(ArrayRef<Value *> To, Instruction *From); 550 551 // Returns the resume value (bc.merge.rdx) for a reduction as 552 // generated by fixReduction. 553 PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc); 554 555 protected: 556 friend class LoopVectorizationPlanner; 557 558 /// A small list of PHINodes. 559 using PhiVector = SmallVector<PHINode *, 4>; 560 561 /// A type for scalarized values in the new loop. Each value from the 562 /// original loop, when scalarized, is represented by UF x VF scalar values 563 /// in the new unrolled loop, where UF is the unroll factor and VF is the 564 /// vectorization factor. 565 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 566 567 /// Set up the values of the IVs correctly when exiting the vector loop. 568 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 569 Value *CountRoundDown, Value *EndValue, 570 BasicBlock *MiddleBlock, BasicBlock *VectorHeader); 571 572 /// Handle all cross-iteration phis in the header. 573 void fixCrossIterationPHIs(VPTransformState &State); 574 575 /// Create the exit value of first order recurrences in the middle block and 576 /// update their users. 577 void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR, 578 VPTransformState &State); 579 580 /// Create code for the loop exit value of the reduction. 581 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 582 583 /// Clear NSW/NUW flags from reduction instructions if necessary. 584 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 585 VPTransformState &State); 586 587 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 588 /// means we need to add the appropriate incoming value from the middle 589 /// block as exiting edges from the scalar epilogue loop (if present) are 590 /// already in place, and we exit the vector loop exclusively to the middle 591 /// block. 592 void fixLCSSAPHIs(VPTransformState &State); 593 594 /// Iteratively sink the scalarized operands of a predicated instruction into 595 /// the block that was created for it. 596 void sinkScalarOperands(Instruction *PredInst); 597 598 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 599 /// represented as. 600 void truncateToMinimalBitwidths(VPTransformState &State); 601 602 /// Returns (and creates if needed) the original loop trip count. 603 Value *getOrCreateTripCount(BasicBlock *InsertBlock); 604 605 /// Returns (and creates if needed) the trip count of the widened loop. 606 Value *getOrCreateVectorTripCount(BasicBlock *InsertBlock); 607 608 /// Returns a bitcasted value to the requested vector type. 609 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 610 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 611 const DataLayout &DL); 612 613 /// Emit a bypass check to see if the vector trip count is zero, including if 614 /// it overflows. 615 void emitMinimumIterationCountCheck(BasicBlock *Bypass); 616 617 /// Emit a bypass check to see if all of the SCEV assumptions we've 618 /// had to make are correct. Returns the block containing the checks or 619 /// nullptr if no checks have been added. 620 BasicBlock *emitSCEVChecks(BasicBlock *Bypass); 621 622 /// Emit bypass checks to check any memory assumptions we may have made. 623 /// Returns the block containing the checks or nullptr if no checks have been 624 /// added. 625 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass); 626 627 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 628 /// vector loop preheader, middle block and scalar preheader. 629 void createVectorLoopSkeleton(StringRef Prefix); 630 631 /// Create new phi nodes for the induction variables to resume iteration count 632 /// in the scalar epilogue, from where the vectorized loop left off. 633 /// In cases where the loop skeleton is more complicated (eg. epilogue 634 /// vectorization) and the resume values can come from an additional bypass 635 /// block, the \p AdditionalBypass pair provides information about the bypass 636 /// block and the end value on the edge from bypass to this loop. 637 void createInductionResumeValues( 638 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 639 640 /// Complete the loop skeleton by adding debug MDs, creating appropriate 641 /// conditional branches in the middle block, preparing the builder and 642 /// running the verifier. Return the preheader of the completed vector loop. 643 BasicBlock *completeLoopSkeleton(MDNode *OrigLoopID); 644 645 /// Add additional metadata to \p To that was not present on \p Orig. 646 /// 647 /// Currently this is used to add the noalias annotations based on the 648 /// inserted memchecks. Use this for instructions that are *cloned* into the 649 /// vector loop. 650 void addNewMetadata(Instruction *To, const Instruction *Orig); 651 652 /// Collect poison-generating recipes that may generate a poison value that is 653 /// used after vectorization, even when their operands are not poison. Those 654 /// recipes meet the following conditions: 655 /// * Contribute to the address computation of a recipe generating a widen 656 /// memory load/store (VPWidenMemoryInstructionRecipe or 657 /// VPInterleaveRecipe). 658 /// * Such a widen memory load/store has at least one underlying Instruction 659 /// that is in a basic block that needs predication and after vectorization 660 /// the generated instruction won't be predicated. 661 void collectPoisonGeneratingRecipes(VPTransformState &State); 662 663 /// Allow subclasses to override and print debug traces before/after vplan 664 /// execution, when trace information is requested. 665 virtual void printDebugTracesAtStart(){}; 666 virtual void printDebugTracesAtEnd(){}; 667 668 /// The original loop. 669 Loop *OrigLoop; 670 671 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 672 /// dynamic knowledge to simplify SCEV expressions and converts them to a 673 /// more usable form. 674 PredicatedScalarEvolution &PSE; 675 676 /// Loop Info. 677 LoopInfo *LI; 678 679 /// Dominator Tree. 680 DominatorTree *DT; 681 682 /// Alias Analysis. 683 AAResults *AA; 684 685 /// Target Library Info. 686 const TargetLibraryInfo *TLI; 687 688 /// Target Transform Info. 689 const TargetTransformInfo *TTI; 690 691 /// Assumption Cache. 692 AssumptionCache *AC; 693 694 /// Interface to emit optimization remarks. 695 OptimizationRemarkEmitter *ORE; 696 697 /// LoopVersioning. It's only set up (non-null) if memchecks were 698 /// used. 699 /// 700 /// This is currently only used to add no-alias metadata based on the 701 /// memchecks. The actually versioning is performed manually. 702 std::unique_ptr<LoopVersioning> LVer; 703 704 /// The vectorization SIMD factor to use. Each vector will have this many 705 /// vector elements. 706 ElementCount VF; 707 708 /// The vectorization unroll factor to use. Each scalar is vectorized to this 709 /// many different vector instructions. 710 unsigned UF; 711 712 /// The builder that we use 713 IRBuilder<> Builder; 714 715 // --- Vectorization state --- 716 717 /// The vector-loop preheader. 718 BasicBlock *LoopVectorPreHeader; 719 720 /// The scalar-loop preheader. 721 BasicBlock *LoopScalarPreHeader; 722 723 /// Middle Block between the vector and the scalar. 724 BasicBlock *LoopMiddleBlock; 725 726 /// The unique ExitBlock of the scalar loop if one exists. Note that 727 /// there can be multiple exiting edges reaching this block. 728 BasicBlock *LoopExitBlock; 729 730 /// The scalar loop body. 731 BasicBlock *LoopScalarBody; 732 733 /// A list of all bypass blocks. The first block is the entry of the loop. 734 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 735 736 /// Store instructions that were predicated. 737 SmallVector<Instruction *, 4> PredicatedInstructions; 738 739 /// Trip count of the original loop. 740 Value *TripCount = nullptr; 741 742 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 743 Value *VectorTripCount = nullptr; 744 745 /// The legality analysis. 746 LoopVectorizationLegality *Legal; 747 748 /// The profitablity analysis. 749 LoopVectorizationCostModel *Cost; 750 751 // Record whether runtime checks are added. 752 bool AddedSafetyChecks = false; 753 754 // Holds the end values for each induction variable. We save the end values 755 // so we can later fix-up the external users of the induction variables. 756 DenseMap<PHINode *, Value *> IVEndValues; 757 758 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 759 // fixed up at the end of vector code generation. 760 SmallVector<PHINode *, 8> OrigPHIsToFix; 761 762 /// BFI and PSI are used to check for profile guided size optimizations. 763 BlockFrequencyInfo *BFI; 764 ProfileSummaryInfo *PSI; 765 766 // Whether this loop should be optimized for size based on profile guided size 767 // optimizatios. 768 bool OptForSizeBasedOnProfile; 769 770 /// Structure to hold information about generated runtime checks, responsible 771 /// for cleaning the checks, if vectorization turns out unprofitable. 772 GeneratedRTChecks &RTChecks; 773 774 // Holds the resume values for reductions in the loops, used to set the 775 // correct start value of reduction PHIs when vectorizing the epilogue. 776 SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4> 777 ReductionResumeValues; 778 }; 779 780 class InnerLoopUnroller : public InnerLoopVectorizer { 781 public: 782 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 783 LoopInfo *LI, DominatorTree *DT, 784 const TargetLibraryInfo *TLI, 785 const TargetTransformInfo *TTI, AssumptionCache *AC, 786 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 787 LoopVectorizationLegality *LVL, 788 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 789 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 790 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 791 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 792 BFI, PSI, Check) {} 793 794 private: 795 Value *getBroadcastInstrs(Value *V) override; 796 }; 797 798 /// Encapsulate information regarding vectorization of a loop and its epilogue. 799 /// This information is meant to be updated and used across two stages of 800 /// epilogue vectorization. 801 struct EpilogueLoopVectorizationInfo { 802 ElementCount MainLoopVF = ElementCount::getFixed(0); 803 unsigned MainLoopUF = 0; 804 ElementCount EpilogueVF = ElementCount::getFixed(0); 805 unsigned EpilogueUF = 0; 806 BasicBlock *MainLoopIterationCountCheck = nullptr; 807 BasicBlock *EpilogueIterationCountCheck = nullptr; 808 BasicBlock *SCEVSafetyCheck = nullptr; 809 BasicBlock *MemSafetyCheck = nullptr; 810 Value *TripCount = nullptr; 811 Value *VectorTripCount = nullptr; 812 813 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 814 ElementCount EVF, unsigned EUF) 815 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 816 assert(EUF == 1 && 817 "A high UF for the epilogue loop is likely not beneficial."); 818 } 819 }; 820 821 /// An extension of the inner loop vectorizer that creates a skeleton for a 822 /// vectorized loop that has its epilogue (residual) also vectorized. 823 /// The idea is to run the vplan on a given loop twice, firstly to setup the 824 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 825 /// from the first step and vectorize the epilogue. This is achieved by 826 /// deriving two concrete strategy classes from this base class and invoking 827 /// them in succession from the loop vectorizer planner. 828 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 829 public: 830 InnerLoopAndEpilogueVectorizer( 831 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 832 DominatorTree *DT, const TargetLibraryInfo *TLI, 833 const TargetTransformInfo *TTI, AssumptionCache *AC, 834 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 835 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 836 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 837 GeneratedRTChecks &Checks) 838 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 839 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 840 Checks), 841 EPI(EPI) {} 842 843 // Override this function to handle the more complex control flow around the 844 // three loops. 845 std::pair<BasicBlock *, Value *> 846 createVectorizedLoopSkeleton() final override { 847 return createEpilogueVectorizedLoopSkeleton(); 848 } 849 850 /// The interface for creating a vectorized skeleton using one of two 851 /// different strategies, each corresponding to one execution of the vplan 852 /// as described above. 853 virtual std::pair<BasicBlock *, Value *> 854 createEpilogueVectorizedLoopSkeleton() = 0; 855 856 /// Holds and updates state information required to vectorize the main loop 857 /// and its epilogue in two separate passes. This setup helps us avoid 858 /// regenerating and recomputing runtime safety checks. It also helps us to 859 /// shorten the iteration-count-check path length for the cases where the 860 /// iteration count of the loop is so small that the main vector loop is 861 /// completely skipped. 862 EpilogueLoopVectorizationInfo &EPI; 863 }; 864 865 /// A specialized derived class of inner loop vectorizer that performs 866 /// vectorization of *main* loops in the process of vectorizing loops and their 867 /// epilogues. 868 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 869 public: 870 EpilogueVectorizerMainLoop( 871 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 872 DominatorTree *DT, const TargetLibraryInfo *TLI, 873 const TargetTransformInfo *TTI, AssumptionCache *AC, 874 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 875 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 876 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 877 GeneratedRTChecks &Check) 878 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 879 EPI, LVL, CM, BFI, PSI, Check) {} 880 /// Implements the interface for creating a vectorized skeleton using the 881 /// *main loop* strategy (ie the first pass of vplan execution). 882 std::pair<BasicBlock *, Value *> 883 createEpilogueVectorizedLoopSkeleton() final override; 884 885 protected: 886 /// Emits an iteration count bypass check once for the main loop (when \p 887 /// ForEpilogue is false) and once for the epilogue loop (when \p 888 /// ForEpilogue is true). 889 BasicBlock *emitMinimumIterationCountCheck(BasicBlock *Bypass, 890 bool ForEpilogue); 891 void printDebugTracesAtStart() override; 892 void printDebugTracesAtEnd() override; 893 }; 894 895 // A specialized derived class of inner loop vectorizer that performs 896 // vectorization of *epilogue* loops in the process of vectorizing loops and 897 // their epilogues. 898 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 899 public: 900 EpilogueVectorizerEpilogueLoop( 901 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 902 DominatorTree *DT, const TargetLibraryInfo *TLI, 903 const TargetTransformInfo *TTI, AssumptionCache *AC, 904 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 905 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 906 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 907 GeneratedRTChecks &Checks) 908 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 909 EPI, LVL, CM, BFI, PSI, Checks) { 910 TripCount = EPI.TripCount; 911 } 912 /// Implements the interface for creating a vectorized skeleton using the 913 /// *epilogue loop* strategy (ie the second pass of vplan execution). 914 std::pair<BasicBlock *, Value *> 915 createEpilogueVectorizedLoopSkeleton() final override; 916 917 protected: 918 /// Emits an iteration count bypass check after the main vector loop has 919 /// finished to see if there are any iterations left to execute by either 920 /// the vector epilogue or the scalar epilogue. 921 BasicBlock *emitMinimumVectorEpilogueIterCountCheck( 922 BasicBlock *Bypass, 923 BasicBlock *Insert); 924 void printDebugTracesAtStart() override; 925 void printDebugTracesAtEnd() override; 926 }; 927 } // end namespace llvm 928 929 /// Look for a meaningful debug location on the instruction or it's 930 /// operands. 931 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 932 if (!I) 933 return I; 934 935 DebugLoc Empty; 936 if (I->getDebugLoc() != Empty) 937 return I; 938 939 for (Use &Op : I->operands()) { 940 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 941 if (OpInst->getDebugLoc() != Empty) 942 return OpInst; 943 } 944 945 return I; 946 } 947 948 void InnerLoopVectorizer::setDebugLocFromInst( 949 const Value *V, Optional<IRBuilderBase *> CustomBuilder) { 950 IRBuilderBase *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 951 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 952 const DILocation *DIL = Inst->getDebugLoc(); 953 954 // When a FSDiscriminator is enabled, we don't need to add the multiply 955 // factors to the discriminators. 956 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 957 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 958 // FIXME: For scalable vectors, assume vscale=1. 959 auto NewDIL = 960 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 961 if (NewDIL) 962 B->SetCurrentDebugLocation(NewDIL.getValue()); 963 else 964 LLVM_DEBUG(dbgs() 965 << "Failed to create new discriminator: " 966 << DIL->getFilename() << " Line: " << DIL->getLine()); 967 } else 968 B->SetCurrentDebugLocation(DIL); 969 } else 970 B->SetCurrentDebugLocation(DebugLoc()); 971 } 972 973 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 974 /// is passed, the message relates to that particular instruction. 975 #ifndef NDEBUG 976 static void debugVectorizationMessage(const StringRef Prefix, 977 const StringRef DebugMsg, 978 Instruction *I) { 979 dbgs() << "LV: " << Prefix << DebugMsg; 980 if (I != nullptr) 981 dbgs() << " " << *I; 982 else 983 dbgs() << '.'; 984 dbgs() << '\n'; 985 } 986 #endif 987 988 /// Create an analysis remark that explains why vectorization failed 989 /// 990 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 991 /// RemarkName is the identifier for the remark. If \p I is passed it is an 992 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 993 /// the location of the remark. \return the remark object that can be 994 /// streamed to. 995 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 996 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 997 Value *CodeRegion = TheLoop->getHeader(); 998 DebugLoc DL = TheLoop->getStartLoc(); 999 1000 if (I) { 1001 CodeRegion = I->getParent(); 1002 // If there is no debug location attached to the instruction, revert back to 1003 // using the loop's. 1004 if (I->getDebugLoc()) 1005 DL = I->getDebugLoc(); 1006 } 1007 1008 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1009 } 1010 1011 namespace llvm { 1012 1013 /// Return a value for Step multiplied by VF. 1014 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, 1015 int64_t Step) { 1016 assert(Ty->isIntegerTy() && "Expected an integer step"); 1017 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 1018 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1019 } 1020 1021 /// Return the runtime value for VF. 1022 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) { 1023 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1024 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1025 } 1026 1027 static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy, 1028 ElementCount VF) { 1029 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 1030 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 1031 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 1032 return B.CreateUIToFP(RuntimeVF, FTy); 1033 } 1034 1035 void reportVectorizationFailure(const StringRef DebugMsg, 1036 const StringRef OREMsg, const StringRef ORETag, 1037 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1038 Instruction *I) { 1039 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1040 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1041 ORE->emit( 1042 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1043 << "loop not vectorized: " << OREMsg); 1044 } 1045 1046 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1047 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1048 Instruction *I) { 1049 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1050 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1051 ORE->emit( 1052 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1053 << Msg); 1054 } 1055 1056 } // end namespace llvm 1057 1058 #ifndef NDEBUG 1059 /// \return string containing a file name and a line # for the given loop. 1060 static std::string getDebugLocString(const Loop *L) { 1061 std::string Result; 1062 if (L) { 1063 raw_string_ostream OS(Result); 1064 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1065 LoopDbgLoc.print(OS); 1066 else 1067 // Just print the module name. 1068 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1069 OS.flush(); 1070 } 1071 return Result; 1072 } 1073 #endif 1074 1075 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1076 const Instruction *Orig) { 1077 // If the loop was versioned with memchecks, add the corresponding no-alias 1078 // metadata. 1079 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1080 LVer->annotateInstWithNoAlias(To, Orig); 1081 } 1082 1083 void InnerLoopVectorizer::collectPoisonGeneratingRecipes( 1084 VPTransformState &State) { 1085 1086 // Collect recipes in the backward slice of `Root` that may generate a poison 1087 // value that is used after vectorization. 1088 SmallPtrSet<VPRecipeBase *, 16> Visited; 1089 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1090 SmallVector<VPRecipeBase *, 16> Worklist; 1091 Worklist.push_back(Root); 1092 1093 // Traverse the backward slice of Root through its use-def chain. 1094 while (!Worklist.empty()) { 1095 VPRecipeBase *CurRec = Worklist.back(); 1096 Worklist.pop_back(); 1097 1098 if (!Visited.insert(CurRec).second) 1099 continue; 1100 1101 // Prune search if we find another recipe generating a widen memory 1102 // instruction. Widen memory instructions involved in address computation 1103 // will lead to gather/scatter instructions, which don't need to be 1104 // handled. 1105 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) || 1106 isa<VPInterleaveRecipe>(CurRec) || 1107 isa<VPScalarIVStepsRecipe>(CurRec) || 1108 isa<VPCanonicalIVPHIRecipe>(CurRec)) 1109 continue; 1110 1111 // This recipe contributes to the address computation of a widen 1112 // load/store. Collect recipe if its underlying instruction has 1113 // poison-generating flags. 1114 Instruction *Instr = CurRec->getUnderlyingInstr(); 1115 if (Instr && Instr->hasPoisonGeneratingFlags()) 1116 State.MayGeneratePoisonRecipes.insert(CurRec); 1117 1118 // Add new definitions to the worklist. 1119 for (VPValue *operand : CurRec->operands()) 1120 if (VPDef *OpDef = operand->getDef()) 1121 Worklist.push_back(cast<VPRecipeBase>(OpDef)); 1122 } 1123 }); 1124 1125 // Traverse all the recipes in the VPlan and collect the poison-generating 1126 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1127 // VPInterleaveRecipe. 1128 auto Iter = depth_first( 1129 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry())); 1130 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1131 for (VPRecipeBase &Recipe : *VPBB) { 1132 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) { 1133 Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr(); 1134 VPDef *AddrDef = WidenRec->getAddr()->getDef(); 1135 if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr && 1136 Legal->blockNeedsPredication(UnderlyingInstr->getParent())) 1137 collectPoisonGeneratingInstrsInBackwardSlice( 1138 cast<VPRecipeBase>(AddrDef)); 1139 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1140 VPDef *AddrDef = InterleaveRec->getAddr()->getDef(); 1141 if (AddrDef) { 1142 // Check if any member of the interleave group needs predication. 1143 const InterleaveGroup<Instruction> *InterGroup = 1144 InterleaveRec->getInterleaveGroup(); 1145 bool NeedPredication = false; 1146 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1147 I < NumMembers; ++I) { 1148 Instruction *Member = InterGroup->getMember(I); 1149 if (Member) 1150 NeedPredication |= 1151 Legal->blockNeedsPredication(Member->getParent()); 1152 } 1153 1154 if (NeedPredication) 1155 collectPoisonGeneratingInstrsInBackwardSlice( 1156 cast<VPRecipeBase>(AddrDef)); 1157 } 1158 } 1159 } 1160 } 1161 } 1162 1163 void InnerLoopVectorizer::addMetadata(Instruction *To, 1164 Instruction *From) { 1165 propagateMetadata(To, From); 1166 addNewMetadata(To, From); 1167 } 1168 1169 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1170 Instruction *From) { 1171 for (Value *V : To) { 1172 if (Instruction *I = dyn_cast<Instruction>(V)) 1173 addMetadata(I, From); 1174 } 1175 } 1176 1177 PHINode *InnerLoopVectorizer::getReductionResumeValue( 1178 const RecurrenceDescriptor &RdxDesc) { 1179 auto It = ReductionResumeValues.find(&RdxDesc); 1180 assert(It != ReductionResumeValues.end() && 1181 "Expected to find a resume value for the reduction."); 1182 return It->second; 1183 } 1184 1185 namespace llvm { 1186 1187 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1188 // lowered. 1189 enum ScalarEpilogueLowering { 1190 1191 // The default: allowing scalar epilogues. 1192 CM_ScalarEpilogueAllowed, 1193 1194 // Vectorization with OptForSize: don't allow epilogues. 1195 CM_ScalarEpilogueNotAllowedOptSize, 1196 1197 // A special case of vectorisation with OptForSize: loops with a very small 1198 // trip count are considered for vectorization under OptForSize, thereby 1199 // making sure the cost of their loop body is dominant, free of runtime 1200 // guards and scalar iteration overheads. 1201 CM_ScalarEpilogueNotAllowedLowTripLoop, 1202 1203 // Loop hint predicate indicating an epilogue is undesired. 1204 CM_ScalarEpilogueNotNeededUsePredicate, 1205 1206 // Directive indicating we must either tail fold or not vectorize 1207 CM_ScalarEpilogueNotAllowedUsePredicate 1208 }; 1209 1210 /// ElementCountComparator creates a total ordering for ElementCount 1211 /// for the purposes of using it in a set structure. 1212 struct ElementCountComparator { 1213 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1214 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1215 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1216 } 1217 }; 1218 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1219 1220 /// LoopVectorizationCostModel - estimates the expected speedups due to 1221 /// vectorization. 1222 /// In many cases vectorization is not profitable. This can happen because of 1223 /// a number of reasons. In this class we mainly attempt to predict the 1224 /// expected speedup/slowdowns due to the supported instruction set. We use the 1225 /// TargetTransformInfo to query the different backends for the cost of 1226 /// different operations. 1227 class LoopVectorizationCostModel { 1228 public: 1229 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1230 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1231 LoopVectorizationLegality *Legal, 1232 const TargetTransformInfo &TTI, 1233 const TargetLibraryInfo *TLI, DemandedBits *DB, 1234 AssumptionCache *AC, 1235 OptimizationRemarkEmitter *ORE, const Function *F, 1236 const LoopVectorizeHints *Hints, 1237 InterleavedAccessInfo &IAI) 1238 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1239 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1240 Hints(Hints), InterleaveInfo(IAI) {} 1241 1242 /// \return An upper bound for the vectorization factors (both fixed and 1243 /// scalable). If the factors are 0, vectorization and interleaving should be 1244 /// avoided up front. 1245 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1246 1247 /// \return True if runtime checks are required for vectorization, and false 1248 /// otherwise. 1249 bool runtimeChecksRequired(); 1250 1251 /// \return The most profitable vectorization factor and the cost of that VF. 1252 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1253 /// then this vectorization factor will be selected if vectorization is 1254 /// possible. 1255 VectorizationFactor 1256 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1257 1258 VectorizationFactor 1259 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1260 const LoopVectorizationPlanner &LVP); 1261 1262 /// Setup cost-based decisions for user vectorization factor. 1263 /// \return true if the UserVF is a feasible VF to be chosen. 1264 bool selectUserVectorizationFactor(ElementCount UserVF) { 1265 collectUniformsAndScalars(UserVF); 1266 collectInstsToScalarize(UserVF); 1267 return expectedCost(UserVF).first.isValid(); 1268 } 1269 1270 /// \return The size (in bits) of the smallest and widest types in the code 1271 /// that needs to be vectorized. We ignore values that remain scalar such as 1272 /// 64 bit loop indices. 1273 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1274 1275 /// \return The desired interleave count. 1276 /// If interleave count has been specified by metadata it will be returned. 1277 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1278 /// are the selected vectorization factor and the cost of the selected VF. 1279 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1280 1281 /// Memory access instruction may be vectorized in more than one way. 1282 /// Form of instruction after vectorization depends on cost. 1283 /// This function takes cost-based decisions for Load/Store instructions 1284 /// and collects them in a map. This decisions map is used for building 1285 /// the lists of loop-uniform and loop-scalar instructions. 1286 /// The calculated cost is saved with widening decision in order to 1287 /// avoid redundant calculations. 1288 void setCostBasedWideningDecision(ElementCount VF); 1289 1290 /// A struct that represents some properties of the register usage 1291 /// of a loop. 1292 struct RegisterUsage { 1293 /// Holds the number of loop invariant values that are used in the loop. 1294 /// The key is ClassID of target-provided register class. 1295 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1296 /// Holds the maximum number of concurrent live intervals in the loop. 1297 /// The key is ClassID of target-provided register class. 1298 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1299 }; 1300 1301 /// \return Returns information about the register usages of the loop for the 1302 /// given vectorization factors. 1303 SmallVector<RegisterUsage, 8> 1304 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1305 1306 /// Collect values we want to ignore in the cost model. 1307 void collectValuesToIgnore(); 1308 1309 /// Collect all element types in the loop for which widening is needed. 1310 void collectElementTypesForWidening(); 1311 1312 /// Split reductions into those that happen in the loop, and those that happen 1313 /// outside. In loop reductions are collected into InLoopReductionChains. 1314 void collectInLoopReductions(); 1315 1316 /// Returns true if we should use strict in-order reductions for the given 1317 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1318 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1319 /// of FP operations. 1320 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1321 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1322 } 1323 1324 /// \returns The smallest bitwidth each instruction can be represented with. 1325 /// The vector equivalents of these instructions should be truncated to this 1326 /// type. 1327 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1328 return MinBWs; 1329 } 1330 1331 /// \returns True if it is more profitable to scalarize instruction \p I for 1332 /// vectorization factor \p VF. 1333 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1334 assert(VF.isVector() && 1335 "Profitable to scalarize relevant only for VF > 1."); 1336 1337 // Cost model is not run in the VPlan-native path - return conservative 1338 // result until this changes. 1339 if (EnableVPlanNativePath) 1340 return false; 1341 1342 auto Scalars = InstsToScalarize.find(VF); 1343 assert(Scalars != InstsToScalarize.end() && 1344 "VF not yet analyzed for scalarization profitability"); 1345 return Scalars->second.find(I) != Scalars->second.end(); 1346 } 1347 1348 /// Returns true if \p I is known to be uniform after vectorization. 1349 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1350 if (VF.isScalar()) 1351 return true; 1352 1353 // Cost model is not run in the VPlan-native path - return conservative 1354 // result until this changes. 1355 if (EnableVPlanNativePath) 1356 return false; 1357 1358 auto UniformsPerVF = Uniforms.find(VF); 1359 assert(UniformsPerVF != Uniforms.end() && 1360 "VF not yet analyzed for uniformity"); 1361 return UniformsPerVF->second.count(I); 1362 } 1363 1364 /// Returns true if \p I is known to be scalar after vectorization. 1365 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1366 if (VF.isScalar()) 1367 return true; 1368 1369 // Cost model is not run in the VPlan-native path - return conservative 1370 // result until this changes. 1371 if (EnableVPlanNativePath) 1372 return false; 1373 1374 auto ScalarsPerVF = Scalars.find(VF); 1375 assert(ScalarsPerVF != Scalars.end() && 1376 "Scalar values are not calculated for VF"); 1377 return ScalarsPerVF->second.count(I); 1378 } 1379 1380 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1381 /// for vectorization factor \p VF. 1382 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1383 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1384 !isProfitableToScalarize(I, VF) && 1385 !isScalarAfterVectorization(I, VF); 1386 } 1387 1388 /// Decision that was taken during cost calculation for memory instruction. 1389 enum InstWidening { 1390 CM_Unknown, 1391 CM_Widen, // For consecutive accesses with stride +1. 1392 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1393 CM_Interleave, 1394 CM_GatherScatter, 1395 CM_Scalarize 1396 }; 1397 1398 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1399 /// instruction \p I and vector width \p VF. 1400 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1401 InstructionCost Cost) { 1402 assert(VF.isVector() && "Expected VF >=2"); 1403 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1404 } 1405 1406 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1407 /// interleaving group \p Grp and vector width \p VF. 1408 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1409 ElementCount VF, InstWidening W, 1410 InstructionCost Cost) { 1411 assert(VF.isVector() && "Expected VF >=2"); 1412 /// Broadcast this decicion to all instructions inside the group. 1413 /// But the cost will be assigned to one instruction only. 1414 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1415 if (auto *I = Grp->getMember(i)) { 1416 if (Grp->getInsertPos() == I) 1417 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1418 else 1419 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1420 } 1421 } 1422 } 1423 1424 /// Return the cost model decision for the given instruction \p I and vector 1425 /// width \p VF. Return CM_Unknown if this instruction did not pass 1426 /// through the cost modeling. 1427 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1428 assert(VF.isVector() && "Expected VF to be a vector VF"); 1429 // Cost model is not run in the VPlan-native path - return conservative 1430 // result until this changes. 1431 if (EnableVPlanNativePath) 1432 return CM_GatherScatter; 1433 1434 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1435 auto Itr = WideningDecisions.find(InstOnVF); 1436 if (Itr == WideningDecisions.end()) 1437 return CM_Unknown; 1438 return Itr->second.first; 1439 } 1440 1441 /// Return the vectorization cost for the given instruction \p I and vector 1442 /// width \p VF. 1443 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1444 assert(VF.isVector() && "Expected VF >=2"); 1445 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1446 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1447 "The cost is not calculated"); 1448 return WideningDecisions[InstOnVF].second; 1449 } 1450 1451 /// Return True if instruction \p I is an optimizable truncate whose operand 1452 /// is an induction variable. Such a truncate will be removed by adding a new 1453 /// induction variable with the destination type. 1454 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1455 // If the instruction is not a truncate, return false. 1456 auto *Trunc = dyn_cast<TruncInst>(I); 1457 if (!Trunc) 1458 return false; 1459 1460 // Get the source and destination types of the truncate. 1461 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1462 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1463 1464 // If the truncate is free for the given types, return false. Replacing a 1465 // free truncate with an induction variable would add an induction variable 1466 // update instruction to each iteration of the loop. We exclude from this 1467 // check the primary induction variable since it will need an update 1468 // instruction regardless. 1469 Value *Op = Trunc->getOperand(0); 1470 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1471 return false; 1472 1473 // If the truncated value is not an induction variable, return false. 1474 return Legal->isInductionPhi(Op); 1475 } 1476 1477 /// Collects the instructions to scalarize for each predicated instruction in 1478 /// the loop. 1479 void collectInstsToScalarize(ElementCount VF); 1480 1481 /// Collect Uniform and Scalar values for the given \p VF. 1482 /// The sets depend on CM decision for Load/Store instructions 1483 /// that may be vectorized as interleave, gather-scatter or scalarized. 1484 void collectUniformsAndScalars(ElementCount VF) { 1485 // Do the analysis once. 1486 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1487 return; 1488 setCostBasedWideningDecision(VF); 1489 collectLoopUniforms(VF); 1490 collectLoopScalars(VF); 1491 } 1492 1493 /// Returns true if the target machine supports masked store operation 1494 /// for the given \p DataType and kind of access to \p Ptr. 1495 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1496 return Legal->isConsecutivePtr(DataType, Ptr) && 1497 TTI.isLegalMaskedStore(DataType, Alignment); 1498 } 1499 1500 /// Returns true if the target machine supports masked load operation 1501 /// for the given \p DataType and kind of access to \p Ptr. 1502 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1503 return Legal->isConsecutivePtr(DataType, Ptr) && 1504 TTI.isLegalMaskedLoad(DataType, Alignment); 1505 } 1506 1507 /// Returns true if the target machine can represent \p V as a masked gather 1508 /// or scatter operation. 1509 bool isLegalGatherOrScatter(Value *V, 1510 ElementCount VF = ElementCount::getFixed(1)) { 1511 bool LI = isa<LoadInst>(V); 1512 bool SI = isa<StoreInst>(V); 1513 if (!LI && !SI) 1514 return false; 1515 auto *Ty = getLoadStoreType(V); 1516 Align Align = getLoadStoreAlignment(V); 1517 if (VF.isVector()) 1518 Ty = VectorType::get(Ty, VF); 1519 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1520 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1521 } 1522 1523 /// Returns true if the target machine supports all of the reduction 1524 /// variables found for the given VF. 1525 bool canVectorizeReductions(ElementCount VF) const { 1526 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1527 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1528 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1529 })); 1530 } 1531 1532 /// Returns true if \p I is an instruction that will be scalarized with 1533 /// predication when vectorizing \p I with vectorization factor \p VF. Such 1534 /// instructions include conditional stores and instructions that may divide 1535 /// by zero. 1536 bool isScalarWithPredication(Instruction *I, ElementCount VF) const; 1537 1538 // Returns true if \p I is an instruction that will be predicated either 1539 // through scalar predication or masked load/store or masked gather/scatter. 1540 // \p VF is the vectorization factor that will be used to vectorize \p I. 1541 // Superset of instructions that return true for isScalarWithPredication. 1542 bool isPredicatedInst(Instruction *I, ElementCount VF, 1543 bool IsKnownUniform = false) { 1544 // When we know the load is uniform and the original scalar loop was not 1545 // predicated we don't need to mark it as a predicated instruction. Any 1546 // vectorised blocks created when tail-folding are something artificial we 1547 // have introduced and we know there is always at least one active lane. 1548 // That's why we call Legal->blockNeedsPredication here because it doesn't 1549 // query tail-folding. 1550 if (IsKnownUniform && isa<LoadInst>(I) && 1551 !Legal->blockNeedsPredication(I->getParent())) 1552 return false; 1553 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1554 return false; 1555 // Loads and stores that need some form of masked operation are predicated 1556 // instructions. 1557 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1558 return Legal->isMaskRequired(I); 1559 return isScalarWithPredication(I, VF); 1560 } 1561 1562 /// Returns true if \p I is a memory instruction with consecutive memory 1563 /// access that can be widened. 1564 bool 1565 memoryInstructionCanBeWidened(Instruction *I, 1566 ElementCount VF = ElementCount::getFixed(1)); 1567 1568 /// Returns true if \p I is a memory instruction in an interleaved-group 1569 /// of memory accesses that can be vectorized with wide vector loads/stores 1570 /// and shuffles. 1571 bool 1572 interleavedAccessCanBeWidened(Instruction *I, 1573 ElementCount VF = ElementCount::getFixed(1)); 1574 1575 /// Check if \p Instr belongs to any interleaved access group. 1576 bool isAccessInterleaved(Instruction *Instr) { 1577 return InterleaveInfo.isInterleaved(Instr); 1578 } 1579 1580 /// Get the interleaved access group that \p Instr belongs to. 1581 const InterleaveGroup<Instruction> * 1582 getInterleavedAccessGroup(Instruction *Instr) { 1583 return InterleaveInfo.getInterleaveGroup(Instr); 1584 } 1585 1586 /// Returns true if we're required to use a scalar epilogue for at least 1587 /// the final iteration of the original loop. 1588 bool requiresScalarEpilogue(ElementCount VF) const { 1589 if (!isScalarEpilogueAllowed()) 1590 return false; 1591 // If we might exit from anywhere but the latch, must run the exiting 1592 // iteration in scalar form. 1593 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1594 return true; 1595 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1596 } 1597 1598 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1599 /// loop hint annotation. 1600 bool isScalarEpilogueAllowed() const { 1601 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1602 } 1603 1604 /// Returns true if all loop blocks should be masked to fold tail loop. 1605 bool foldTailByMasking() const { return FoldTailByMasking; } 1606 1607 /// Returns true if the instructions in this block requires predication 1608 /// for any reason, e.g. because tail folding now requires a predicate 1609 /// or because the block in the original loop was predicated. 1610 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1611 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1612 } 1613 1614 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1615 /// nodes to the chain of instructions representing the reductions. Uses a 1616 /// MapVector to ensure deterministic iteration order. 1617 using ReductionChainMap = 1618 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1619 1620 /// Return the chain of instructions representing an inloop reduction. 1621 const ReductionChainMap &getInLoopReductionChains() const { 1622 return InLoopReductionChains; 1623 } 1624 1625 /// Returns true if the Phi is part of an inloop reduction. 1626 bool isInLoopReduction(PHINode *Phi) const { 1627 return InLoopReductionChains.count(Phi); 1628 } 1629 1630 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1631 /// with factor VF. Return the cost of the instruction, including 1632 /// scalarization overhead if it's needed. 1633 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1634 1635 /// Estimate cost of a call instruction CI if it were vectorized with factor 1636 /// VF. Return the cost of the instruction, including scalarization overhead 1637 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1638 /// scalarized - 1639 /// i.e. either vector version isn't available, or is too expensive. 1640 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1641 bool &NeedToScalarize) const; 1642 1643 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1644 /// that of B. 1645 bool isMoreProfitable(const VectorizationFactor &A, 1646 const VectorizationFactor &B) const; 1647 1648 /// Invalidates decisions already taken by the cost model. 1649 void invalidateCostModelingDecisions() { 1650 WideningDecisions.clear(); 1651 Uniforms.clear(); 1652 Scalars.clear(); 1653 } 1654 1655 private: 1656 unsigned NumPredStores = 0; 1657 1658 /// Convenience function that returns the value of vscale_range iff 1659 /// vscale_range.min == vscale_range.max or otherwise returns the value 1660 /// returned by the corresponding TLI method. 1661 Optional<unsigned> getVScaleForTuning() const; 1662 1663 /// \return An upper bound for the vectorization factors for both 1664 /// fixed and scalable vectorization, where the minimum-known number of 1665 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1666 /// disabled or unsupported, then the scalable part will be equal to 1667 /// ElementCount::getScalable(0). 1668 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1669 ElementCount UserVF, 1670 bool FoldTailByMasking); 1671 1672 /// \return the maximized element count based on the targets vector 1673 /// registers and the loop trip-count, but limited to a maximum safe VF. 1674 /// This is a helper function of computeFeasibleMaxVF. 1675 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1676 /// issue that occurred on one of the buildbots which cannot be reproduced 1677 /// without having access to the properietary compiler (see comments on 1678 /// D98509). The issue is currently under investigation and this workaround 1679 /// will be removed as soon as possible. 1680 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1681 unsigned SmallestType, 1682 unsigned WidestType, 1683 const ElementCount &MaxSafeVF, 1684 bool FoldTailByMasking); 1685 1686 /// \return the maximum legal scalable VF, based on the safe max number 1687 /// of elements. 1688 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1689 1690 /// The vectorization cost is a combination of the cost itself and a boolean 1691 /// indicating whether any of the contributing operations will actually 1692 /// operate on vector values after type legalization in the backend. If this 1693 /// latter value is false, then all operations will be scalarized (i.e. no 1694 /// vectorization has actually taken place). 1695 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1696 1697 /// Returns the expected execution cost. The unit of the cost does 1698 /// not matter because we use the 'cost' units to compare different 1699 /// vector widths. The cost that is returned is *not* normalized by 1700 /// the factor width. If \p Invalid is not nullptr, this function 1701 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1702 /// each instruction that has an Invalid cost for the given VF. 1703 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1704 VectorizationCostTy 1705 expectedCost(ElementCount VF, 1706 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1707 1708 /// Returns the execution time cost of an instruction for a given vector 1709 /// width. Vector width of one means scalar. 1710 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1711 1712 /// The cost-computation logic from getInstructionCost which provides 1713 /// the vector type as an output parameter. 1714 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1715 Type *&VectorTy); 1716 1717 /// Return the cost of instructions in an inloop reduction pattern, if I is 1718 /// part of that pattern. 1719 Optional<InstructionCost> 1720 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1721 TTI::TargetCostKind CostKind); 1722 1723 /// Calculate vectorization cost of memory instruction \p I. 1724 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1725 1726 /// The cost computation for scalarized memory instruction. 1727 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1728 1729 /// The cost computation for interleaving group of memory instructions. 1730 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1731 1732 /// The cost computation for Gather/Scatter instruction. 1733 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1734 1735 /// The cost computation for widening instruction \p I with consecutive 1736 /// memory access. 1737 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1738 1739 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1740 /// Load: scalar load + broadcast. 1741 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1742 /// element) 1743 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1744 1745 /// Estimate the overhead of scalarizing an instruction. This is a 1746 /// convenience wrapper for the type-based getScalarizationOverhead API. 1747 InstructionCost getScalarizationOverhead(Instruction *I, 1748 ElementCount VF) const; 1749 1750 /// Returns whether the instruction is a load or store and will be a emitted 1751 /// as a vector operation. 1752 bool isConsecutiveLoadOrStore(Instruction *I); 1753 1754 /// Returns true if an artificially high cost for emulated masked memrefs 1755 /// should be used. 1756 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF); 1757 1758 /// Map of scalar integer values to the smallest bitwidth they can be legally 1759 /// represented as. The vector equivalents of these values should be truncated 1760 /// to this type. 1761 MapVector<Instruction *, uint64_t> MinBWs; 1762 1763 /// A type representing the costs for instructions if they were to be 1764 /// scalarized rather than vectorized. The entries are Instruction-Cost 1765 /// pairs. 1766 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1767 1768 /// A set containing all BasicBlocks that are known to present after 1769 /// vectorization as a predicated block. 1770 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1771 1772 /// Records whether it is allowed to have the original scalar loop execute at 1773 /// least once. This may be needed as a fallback loop in case runtime 1774 /// aliasing/dependence checks fail, or to handle the tail/remainder 1775 /// iterations when the trip count is unknown or doesn't divide by the VF, 1776 /// or as a peel-loop to handle gaps in interleave-groups. 1777 /// Under optsize and when the trip count is very small we don't allow any 1778 /// iterations to execute in the scalar loop. 1779 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1780 1781 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1782 bool FoldTailByMasking = false; 1783 1784 /// A map holding scalar costs for different vectorization factors. The 1785 /// presence of a cost for an instruction in the mapping indicates that the 1786 /// instruction will be scalarized when vectorizing with the associated 1787 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1788 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1789 1790 /// Holds the instructions known to be uniform after vectorization. 1791 /// The data is collected per VF. 1792 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1793 1794 /// Holds the instructions known to be scalar after vectorization. 1795 /// The data is collected per VF. 1796 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1797 1798 /// Holds the instructions (address computations) that are forced to be 1799 /// scalarized. 1800 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1801 1802 /// PHINodes of the reductions that should be expanded in-loop along with 1803 /// their associated chains of reduction operations, in program order from top 1804 /// (PHI) to bottom 1805 ReductionChainMap InLoopReductionChains; 1806 1807 /// A Map of inloop reduction operations and their immediate chain operand. 1808 /// FIXME: This can be removed once reductions can be costed correctly in 1809 /// vplan. This was added to allow quick lookup to the inloop operations, 1810 /// without having to loop through InLoopReductionChains. 1811 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1812 1813 /// Returns the expected difference in cost from scalarizing the expression 1814 /// feeding a predicated instruction \p PredInst. The instructions to 1815 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1816 /// non-negative return value implies the expression will be scalarized. 1817 /// Currently, only single-use chains are considered for scalarization. 1818 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1819 ElementCount VF); 1820 1821 /// Collect the instructions that are uniform after vectorization. An 1822 /// instruction is uniform if we represent it with a single scalar value in 1823 /// the vectorized loop corresponding to each vector iteration. Examples of 1824 /// uniform instructions include pointer operands of consecutive or 1825 /// interleaved memory accesses. Note that although uniformity implies an 1826 /// instruction will be scalar, the reverse is not true. In general, a 1827 /// scalarized instruction will be represented by VF scalar values in the 1828 /// vectorized loop, each corresponding to an iteration of the original 1829 /// scalar loop. 1830 void collectLoopUniforms(ElementCount VF); 1831 1832 /// Collect the instructions that are scalar after vectorization. An 1833 /// instruction is scalar if it is known to be uniform or will be scalarized 1834 /// during vectorization. collectLoopScalars should only add non-uniform nodes 1835 /// to the list if they are used by a load/store instruction that is marked as 1836 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by 1837 /// VF values in the vectorized loop, each corresponding to an iteration of 1838 /// the original scalar loop. 1839 void collectLoopScalars(ElementCount VF); 1840 1841 /// Keeps cost model vectorization decision and cost for instructions. 1842 /// Right now it is used for memory instructions only. 1843 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1844 std::pair<InstWidening, InstructionCost>>; 1845 1846 DecisionList WideningDecisions; 1847 1848 /// Returns true if \p V is expected to be vectorized and it needs to be 1849 /// extracted. 1850 bool needsExtract(Value *V, ElementCount VF) const { 1851 Instruction *I = dyn_cast<Instruction>(V); 1852 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1853 TheLoop->isLoopInvariant(I)) 1854 return false; 1855 1856 // Assume we can vectorize V (and hence we need extraction) if the 1857 // scalars are not computed yet. This can happen, because it is called 1858 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1859 // the scalars are collected. That should be a safe assumption in most 1860 // cases, because we check if the operands have vectorizable types 1861 // beforehand in LoopVectorizationLegality. 1862 return Scalars.find(VF) == Scalars.end() || 1863 !isScalarAfterVectorization(I, VF); 1864 }; 1865 1866 /// Returns a range containing only operands needing to be extracted. 1867 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1868 ElementCount VF) const { 1869 return SmallVector<Value *, 4>(make_filter_range( 1870 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1871 } 1872 1873 /// Determines if we have the infrastructure to vectorize loop \p L and its 1874 /// epilogue, assuming the main loop is vectorized by \p VF. 1875 bool isCandidateForEpilogueVectorization(const Loop &L, 1876 const ElementCount VF) const; 1877 1878 /// Returns true if epilogue vectorization is considered profitable, and 1879 /// false otherwise. 1880 /// \p VF is the vectorization factor chosen for the original loop. 1881 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1882 1883 public: 1884 /// The loop that we evaluate. 1885 Loop *TheLoop; 1886 1887 /// Predicated scalar evolution analysis. 1888 PredicatedScalarEvolution &PSE; 1889 1890 /// Loop Info analysis. 1891 LoopInfo *LI; 1892 1893 /// Vectorization legality. 1894 LoopVectorizationLegality *Legal; 1895 1896 /// Vector target information. 1897 const TargetTransformInfo &TTI; 1898 1899 /// Target Library Info. 1900 const TargetLibraryInfo *TLI; 1901 1902 /// Demanded bits analysis. 1903 DemandedBits *DB; 1904 1905 /// Assumption cache. 1906 AssumptionCache *AC; 1907 1908 /// Interface to emit optimization remarks. 1909 OptimizationRemarkEmitter *ORE; 1910 1911 const Function *TheFunction; 1912 1913 /// Loop Vectorize Hint. 1914 const LoopVectorizeHints *Hints; 1915 1916 /// The interleave access information contains groups of interleaved accesses 1917 /// with the same stride and close to each other. 1918 InterleavedAccessInfo &InterleaveInfo; 1919 1920 /// Values to ignore in the cost model. 1921 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1922 1923 /// Values to ignore in the cost model when VF > 1. 1924 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1925 1926 /// All element types found in the loop. 1927 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1928 1929 /// Profitable vector factors. 1930 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1931 }; 1932 } // end namespace llvm 1933 1934 /// Helper struct to manage generating runtime checks for vectorization. 1935 /// 1936 /// The runtime checks are created up-front in temporary blocks to allow better 1937 /// estimating the cost and un-linked from the existing IR. After deciding to 1938 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1939 /// temporary blocks are completely removed. 1940 class GeneratedRTChecks { 1941 /// Basic block which contains the generated SCEV checks, if any. 1942 BasicBlock *SCEVCheckBlock = nullptr; 1943 1944 /// The value representing the result of the generated SCEV checks. If it is 1945 /// nullptr, either no SCEV checks have been generated or they have been used. 1946 Value *SCEVCheckCond = nullptr; 1947 1948 /// Basic block which contains the generated memory runtime checks, if any. 1949 BasicBlock *MemCheckBlock = nullptr; 1950 1951 /// The value representing the result of the generated memory runtime checks. 1952 /// If it is nullptr, either no memory runtime checks have been generated or 1953 /// they have been used. 1954 Value *MemRuntimeCheckCond = nullptr; 1955 1956 DominatorTree *DT; 1957 LoopInfo *LI; 1958 1959 SCEVExpander SCEVExp; 1960 SCEVExpander MemCheckExp; 1961 1962 public: 1963 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1964 const DataLayout &DL) 1965 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1966 MemCheckExp(SE, DL, "scev.check") {} 1967 1968 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1969 /// accurately estimate the cost of the runtime checks. The blocks are 1970 /// un-linked from the IR and is added back during vector code generation. If 1971 /// there is no vector code generation, the check blocks are removed 1972 /// completely. 1973 void Create(Loop *L, const LoopAccessInfo &LAI, 1974 const SCEVPredicate &Pred) { 1975 1976 BasicBlock *LoopHeader = L->getHeader(); 1977 BasicBlock *Preheader = L->getLoopPreheader(); 1978 1979 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1980 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1981 // may be used by SCEVExpander. The blocks will be un-linked from their 1982 // predecessors and removed from LI & DT at the end of the function. 1983 if (!Pred.isAlwaysTrue()) { 1984 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1985 nullptr, "vector.scevcheck"); 1986 1987 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1988 &Pred, SCEVCheckBlock->getTerminator()); 1989 } 1990 1991 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1992 if (RtPtrChecking.Need) { 1993 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1994 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1995 "vector.memcheck"); 1996 1997 MemRuntimeCheckCond = 1998 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 1999 RtPtrChecking.getChecks(), MemCheckExp); 2000 assert(MemRuntimeCheckCond && 2001 "no RT checks generated although RtPtrChecking " 2002 "claimed checks are required"); 2003 } 2004 2005 if (!MemCheckBlock && !SCEVCheckBlock) 2006 return; 2007 2008 // Unhook the temporary block with the checks, update various places 2009 // accordingly. 2010 if (SCEVCheckBlock) 2011 SCEVCheckBlock->replaceAllUsesWith(Preheader); 2012 if (MemCheckBlock) 2013 MemCheckBlock->replaceAllUsesWith(Preheader); 2014 2015 if (SCEVCheckBlock) { 2016 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2017 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 2018 Preheader->getTerminator()->eraseFromParent(); 2019 } 2020 if (MemCheckBlock) { 2021 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2022 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2023 Preheader->getTerminator()->eraseFromParent(); 2024 } 2025 2026 DT->changeImmediateDominator(LoopHeader, Preheader); 2027 if (MemCheckBlock) { 2028 DT->eraseNode(MemCheckBlock); 2029 LI->removeBlock(MemCheckBlock); 2030 } 2031 if (SCEVCheckBlock) { 2032 DT->eraseNode(SCEVCheckBlock); 2033 LI->removeBlock(SCEVCheckBlock); 2034 } 2035 } 2036 2037 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2038 /// unused. 2039 ~GeneratedRTChecks() { 2040 SCEVExpanderCleaner SCEVCleaner(SCEVExp); 2041 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp); 2042 if (!SCEVCheckCond) 2043 SCEVCleaner.markResultUsed(); 2044 2045 if (!MemRuntimeCheckCond) 2046 MemCheckCleaner.markResultUsed(); 2047 2048 if (MemRuntimeCheckCond) { 2049 auto &SE = *MemCheckExp.getSE(); 2050 // Memory runtime check generation creates compares that use expanded 2051 // values. Remove them before running the SCEVExpanderCleaners. 2052 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2053 if (MemCheckExp.isInsertedInstruction(&I)) 2054 continue; 2055 SE.forgetValue(&I); 2056 I.eraseFromParent(); 2057 } 2058 } 2059 MemCheckCleaner.cleanup(); 2060 SCEVCleaner.cleanup(); 2061 2062 if (SCEVCheckCond) 2063 SCEVCheckBlock->eraseFromParent(); 2064 if (MemRuntimeCheckCond) 2065 MemCheckBlock->eraseFromParent(); 2066 } 2067 2068 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2069 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2070 /// depending on the generated condition. 2071 BasicBlock *emitSCEVChecks(BasicBlock *Bypass, 2072 BasicBlock *LoopVectorPreHeader, 2073 BasicBlock *LoopExitBlock) { 2074 if (!SCEVCheckCond) 2075 return nullptr; 2076 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2077 if (C->isZero()) 2078 return nullptr; 2079 2080 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2081 2082 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2083 // Create new preheader for vector loop. 2084 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2085 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2086 2087 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2088 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2089 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2090 SCEVCheckBlock); 2091 2092 DT->addNewBlock(SCEVCheckBlock, Pred); 2093 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2094 2095 ReplaceInstWithInst( 2096 SCEVCheckBlock->getTerminator(), 2097 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2098 // Mark the check as used, to prevent it from being removed during cleanup. 2099 SCEVCheckCond = nullptr; 2100 return SCEVCheckBlock; 2101 } 2102 2103 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2104 /// the branches to branch to the vector preheader or \p Bypass, depending on 2105 /// the generated condition. 2106 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass, 2107 BasicBlock *LoopVectorPreHeader) { 2108 // Check if we generated code that checks in runtime if arrays overlap. 2109 if (!MemRuntimeCheckCond) 2110 return nullptr; 2111 2112 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2113 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2114 MemCheckBlock); 2115 2116 DT->addNewBlock(MemCheckBlock, Pred); 2117 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2118 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2119 2120 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2121 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2122 2123 ReplaceInstWithInst( 2124 MemCheckBlock->getTerminator(), 2125 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2126 MemCheckBlock->getTerminator()->setDebugLoc( 2127 Pred->getTerminator()->getDebugLoc()); 2128 2129 // Mark the check as used, to prevent it from being removed during cleanup. 2130 MemRuntimeCheckCond = nullptr; 2131 return MemCheckBlock; 2132 } 2133 }; 2134 2135 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2136 // vectorization. The loop needs to be annotated with #pragma omp simd 2137 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2138 // vector length information is not provided, vectorization is not considered 2139 // explicit. Interleave hints are not allowed either. These limitations will be 2140 // relaxed in the future. 2141 // Please, note that we are currently forced to abuse the pragma 'clang 2142 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2143 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2144 // provides *explicit vectorization hints* (LV can bypass legal checks and 2145 // assume that vectorization is legal). However, both hints are implemented 2146 // using the same metadata (llvm.loop.vectorize, processed by 2147 // LoopVectorizeHints). This will be fixed in the future when the native IR 2148 // representation for pragma 'omp simd' is introduced. 2149 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2150 OptimizationRemarkEmitter *ORE) { 2151 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2152 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2153 2154 // Only outer loops with an explicit vectorization hint are supported. 2155 // Unannotated outer loops are ignored. 2156 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2157 return false; 2158 2159 Function *Fn = OuterLp->getHeader()->getParent(); 2160 if (!Hints.allowVectorization(Fn, OuterLp, 2161 true /*VectorizeOnlyWhenForced*/)) { 2162 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2163 return false; 2164 } 2165 2166 if (Hints.getInterleave() > 1) { 2167 // TODO: Interleave support is future work. 2168 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2169 "outer loops.\n"); 2170 Hints.emitRemarkWithHints(); 2171 return false; 2172 } 2173 2174 return true; 2175 } 2176 2177 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2178 OptimizationRemarkEmitter *ORE, 2179 SmallVectorImpl<Loop *> &V) { 2180 // Collect inner loops and outer loops without irreducible control flow. For 2181 // now, only collect outer loops that have explicit vectorization hints. If we 2182 // are stress testing the VPlan H-CFG construction, we collect the outermost 2183 // loop of every loop nest. 2184 if (L.isInnermost() || VPlanBuildStressTest || 2185 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2186 LoopBlocksRPO RPOT(&L); 2187 RPOT.perform(LI); 2188 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2189 V.push_back(&L); 2190 // TODO: Collect inner loops inside marked outer loops in case 2191 // vectorization fails for the outer loop. Do not invoke 2192 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2193 // already known to be reducible. We can use an inherited attribute for 2194 // that. 2195 return; 2196 } 2197 } 2198 for (Loop *InnerL : L) 2199 collectSupportedLoops(*InnerL, LI, ORE, V); 2200 } 2201 2202 namespace { 2203 2204 /// The LoopVectorize Pass. 2205 struct LoopVectorize : public FunctionPass { 2206 /// Pass identification, replacement for typeid 2207 static char ID; 2208 2209 LoopVectorizePass Impl; 2210 2211 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2212 bool VectorizeOnlyWhenForced = false) 2213 : FunctionPass(ID), 2214 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2215 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2216 } 2217 2218 bool runOnFunction(Function &F) override { 2219 if (skipFunction(F)) 2220 return false; 2221 2222 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2223 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2224 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2225 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2226 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2227 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2228 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2229 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2230 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2231 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2232 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2233 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2234 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2235 2236 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2237 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2238 2239 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2240 GetLAA, *ORE, PSI).MadeAnyChange; 2241 } 2242 2243 void getAnalysisUsage(AnalysisUsage &AU) const override { 2244 AU.addRequired<AssumptionCacheTracker>(); 2245 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2246 AU.addRequired<DominatorTreeWrapperPass>(); 2247 AU.addRequired<LoopInfoWrapperPass>(); 2248 AU.addRequired<ScalarEvolutionWrapperPass>(); 2249 AU.addRequired<TargetTransformInfoWrapperPass>(); 2250 AU.addRequired<AAResultsWrapperPass>(); 2251 AU.addRequired<LoopAccessLegacyAnalysis>(); 2252 AU.addRequired<DemandedBitsWrapperPass>(); 2253 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2254 AU.addRequired<InjectTLIMappingsLegacy>(); 2255 2256 // We currently do not preserve loopinfo/dominator analyses with outer loop 2257 // vectorization. Until this is addressed, mark these analyses as preserved 2258 // only for non-VPlan-native path. 2259 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2260 if (!EnableVPlanNativePath) { 2261 AU.addPreserved<LoopInfoWrapperPass>(); 2262 AU.addPreserved<DominatorTreeWrapperPass>(); 2263 } 2264 2265 AU.addPreserved<BasicAAWrapperPass>(); 2266 AU.addPreserved<GlobalsAAWrapperPass>(); 2267 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2268 } 2269 }; 2270 2271 } // end anonymous namespace 2272 2273 //===----------------------------------------------------------------------===// 2274 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2275 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2276 //===----------------------------------------------------------------------===// 2277 2278 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2279 // We need to place the broadcast of invariant variables outside the loop, 2280 // but only if it's proven safe to do so. Else, broadcast will be inside 2281 // vector loop body. 2282 Instruction *Instr = dyn_cast<Instruction>(V); 2283 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2284 (!Instr || 2285 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2286 // Place the code for broadcasting invariant variables in the new preheader. 2287 IRBuilder<>::InsertPointGuard Guard(Builder); 2288 if (SafeToHoist) 2289 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2290 2291 // Broadcast the scalar into all locations in the vector. 2292 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2293 2294 return Shuf; 2295 } 2296 2297 /// This function adds 2298 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 2299 /// to each vector element of Val. The sequence starts at StartIndex. 2300 /// \p Opcode is relevant for FP induction variable. 2301 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step, 2302 Instruction::BinaryOps BinOp, ElementCount VF, 2303 IRBuilderBase &Builder) { 2304 assert(VF.isVector() && "only vector VFs are supported"); 2305 2306 // Create and check the types. 2307 auto *ValVTy = cast<VectorType>(Val->getType()); 2308 ElementCount VLen = ValVTy->getElementCount(); 2309 2310 Type *STy = Val->getType()->getScalarType(); 2311 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2312 "Induction Step must be an integer or FP"); 2313 assert(Step->getType() == STy && "Step has wrong type"); 2314 2315 SmallVector<Constant *, 8> Indices; 2316 2317 // Create a vector of consecutive numbers from zero to VF. 2318 VectorType *InitVecValVTy = ValVTy; 2319 if (STy->isFloatingPointTy()) { 2320 Type *InitVecValSTy = 2321 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2322 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2323 } 2324 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2325 2326 // Splat the StartIdx 2327 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2328 2329 if (STy->isIntegerTy()) { 2330 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2331 Step = Builder.CreateVectorSplat(VLen, Step); 2332 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2333 // FIXME: The newly created binary instructions should contain nsw/nuw 2334 // flags, which can be found from the original scalar operations. 2335 Step = Builder.CreateMul(InitVec, Step); 2336 return Builder.CreateAdd(Val, Step, "induction"); 2337 } 2338 2339 // Floating point induction. 2340 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2341 "Binary Opcode should be specified for FP induction"); 2342 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2343 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2344 2345 Step = Builder.CreateVectorSplat(VLen, Step); 2346 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2347 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2348 } 2349 2350 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 2351 /// variable on which to base the steps, \p Step is the size of the step. 2352 static void buildScalarSteps(Value *ScalarIV, Value *Step, 2353 const InductionDescriptor &ID, VPValue *Def, 2354 VPTransformState &State) { 2355 IRBuilderBase &Builder = State.Builder; 2356 // We shouldn't have to build scalar steps if we aren't vectorizing. 2357 assert(State.VF.isVector() && "VF should be greater than one"); 2358 // Get the value type and ensure it and the step have the same integer type. 2359 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2360 assert(ScalarIVTy == Step->getType() && 2361 "Val and Step should have the same type"); 2362 2363 // We build scalar steps for both integer and floating-point induction 2364 // variables. Here, we determine the kind of arithmetic we will perform. 2365 Instruction::BinaryOps AddOp; 2366 Instruction::BinaryOps MulOp; 2367 if (ScalarIVTy->isIntegerTy()) { 2368 AddOp = Instruction::Add; 2369 MulOp = Instruction::Mul; 2370 } else { 2371 AddOp = ID.getInductionOpcode(); 2372 MulOp = Instruction::FMul; 2373 } 2374 2375 // Determine the number of scalars we need to generate for each unroll 2376 // iteration. 2377 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def); 2378 unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue(); 2379 // Compute the scalar steps and save the results in State. 2380 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2381 ScalarIVTy->getScalarSizeInBits()); 2382 Type *VecIVTy = nullptr; 2383 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2384 if (!FirstLaneOnly && State.VF.isScalable()) { 2385 VecIVTy = VectorType::get(ScalarIVTy, State.VF); 2386 UnitStepVec = 2387 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF)); 2388 SplatStep = Builder.CreateVectorSplat(State.VF, Step); 2389 SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV); 2390 } 2391 2392 for (unsigned Part = 0; Part < State.UF; ++Part) { 2393 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part); 2394 2395 if (!FirstLaneOnly && State.VF.isScalable()) { 2396 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0); 2397 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2398 if (ScalarIVTy->isFloatingPointTy()) 2399 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2400 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2401 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2402 State.set(Def, Add, Part); 2403 // It's useful to record the lane values too for the known minimum number 2404 // of elements so we do those below. This improves the code quality when 2405 // trying to extract the first element, for example. 2406 } 2407 2408 if (ScalarIVTy->isFloatingPointTy()) 2409 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2410 2411 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2412 Value *StartIdx = Builder.CreateBinOp( 2413 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2414 // The step returned by `createStepForVF` is a runtime-evaluated value 2415 // when VF is scalable. Otherwise, it should be folded into a Constant. 2416 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) && 2417 "Expected StartIdx to be folded to a constant when VF is not " 2418 "scalable"); 2419 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2420 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2421 State.set(Def, Add, VPIteration(Part, Lane)); 2422 } 2423 } 2424 } 2425 2426 // Generate code for the induction step. Note that induction steps are 2427 // required to be loop-invariant 2428 static Value *CreateStepValue(const SCEV *Step, ScalarEvolution &SE, 2429 Instruction *InsertBefore, 2430 Loop *OrigLoop = nullptr) { 2431 const DataLayout &DL = SE.getDataLayout(); 2432 assert((!OrigLoop || SE.isLoopInvariant(Step, OrigLoop)) && 2433 "Induction step should be loop invariant"); 2434 if (auto *E = dyn_cast<SCEVUnknown>(Step)) 2435 return E->getValue(); 2436 2437 SCEVExpander Exp(SE, DL, "induction"); 2438 return Exp.expandCodeFor(Step, Step->getType(), InsertBefore); 2439 } 2440 2441 /// Compute the transformed value of Index at offset StartValue using step 2442 /// StepValue. 2443 /// For integer induction, returns StartValue + Index * StepValue. 2444 /// For pointer induction, returns StartValue[Index * StepValue]. 2445 /// FIXME: The newly created binary instructions should contain nsw/nuw 2446 /// flags, which can be found from the original scalar operations. 2447 static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index, 2448 Value *StartValue, Value *Step, 2449 const InductionDescriptor &ID) { 2450 assert(Index->getType()->getScalarType() == Step->getType() && 2451 "Index scalar type does not match StepValue type"); 2452 2453 // Note: the IR at this point is broken. We cannot use SE to create any new 2454 // SCEV and then expand it, hoping that SCEV's simplification will give us 2455 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2456 // lead to various SCEV crashes. So all we can do is to use builder and rely 2457 // on InstCombine for future simplifications. Here we handle some trivial 2458 // cases only. 2459 auto CreateAdd = [&B](Value *X, Value *Y) { 2460 assert(X->getType() == Y->getType() && "Types don't match!"); 2461 if (auto *CX = dyn_cast<ConstantInt>(X)) 2462 if (CX->isZero()) 2463 return Y; 2464 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2465 if (CY->isZero()) 2466 return X; 2467 return B.CreateAdd(X, Y); 2468 }; 2469 2470 // We allow X to be a vector type, in which case Y will potentially be 2471 // splatted into a vector with the same element count. 2472 auto CreateMul = [&B](Value *X, Value *Y) { 2473 assert(X->getType()->getScalarType() == Y->getType() && 2474 "Types don't match!"); 2475 if (auto *CX = dyn_cast<ConstantInt>(X)) 2476 if (CX->isOne()) 2477 return Y; 2478 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2479 if (CY->isOne()) 2480 return X; 2481 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 2482 if (XVTy && !isa<VectorType>(Y->getType())) 2483 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 2484 return B.CreateMul(X, Y); 2485 }; 2486 2487 switch (ID.getKind()) { 2488 case InductionDescriptor::IK_IntInduction: { 2489 assert(!isa<VectorType>(Index->getType()) && 2490 "Vector indices not supported for integer inductions yet"); 2491 assert(Index->getType() == StartValue->getType() && 2492 "Index type does not match StartValue type"); 2493 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne()) 2494 return B.CreateSub(StartValue, Index); 2495 auto *Offset = CreateMul(Index, Step); 2496 return CreateAdd(StartValue, Offset); 2497 } 2498 case InductionDescriptor::IK_PtrInduction: { 2499 assert(isa<Constant>(Step) && 2500 "Expected constant step for pointer induction"); 2501 return B.CreateGEP(ID.getElementType(), StartValue, CreateMul(Index, Step)); 2502 } 2503 case InductionDescriptor::IK_FpInduction: { 2504 assert(!isa<VectorType>(Index->getType()) && 2505 "Vector indices not supported for FP inductions yet"); 2506 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2507 auto InductionBinOp = ID.getInductionBinOp(); 2508 assert(InductionBinOp && 2509 (InductionBinOp->getOpcode() == Instruction::FAdd || 2510 InductionBinOp->getOpcode() == Instruction::FSub) && 2511 "Original bin op should be defined for FP induction"); 2512 2513 Value *MulExp = B.CreateFMul(Step, Index); 2514 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2515 "induction"); 2516 } 2517 case InductionDescriptor::IK_NoInduction: 2518 return nullptr; 2519 } 2520 llvm_unreachable("invalid enum"); 2521 } 2522 2523 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2524 const VPIteration &Instance, 2525 VPTransformState &State) { 2526 Value *ScalarInst = State.get(Def, Instance); 2527 Value *VectorValue = State.get(Def, Instance.Part); 2528 VectorValue = Builder.CreateInsertElement( 2529 VectorValue, ScalarInst, 2530 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2531 State.set(Def, VectorValue, Instance.Part); 2532 } 2533 2534 // Return whether we allow using masked interleave-groups (for dealing with 2535 // strided loads/stores that reside in predicated blocks, or for dealing 2536 // with gaps). 2537 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2538 // If an override option has been passed in for interleaved accesses, use it. 2539 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2540 return EnableMaskedInterleavedMemAccesses; 2541 2542 return TTI.enableMaskedInterleavedAccessVectorization(); 2543 } 2544 2545 // Try to vectorize the interleave group that \p Instr belongs to. 2546 // 2547 // E.g. Translate following interleaved load group (factor = 3): 2548 // for (i = 0; i < N; i+=3) { 2549 // R = Pic[i]; // Member of index 0 2550 // G = Pic[i+1]; // Member of index 1 2551 // B = Pic[i+2]; // Member of index 2 2552 // ... // do something to R, G, B 2553 // } 2554 // To: 2555 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2556 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2557 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2558 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2559 // 2560 // Or translate following interleaved store group (factor = 3): 2561 // for (i = 0; i < N; i+=3) { 2562 // ... do something to R, G, B 2563 // Pic[i] = R; // Member of index 0 2564 // Pic[i+1] = G; // Member of index 1 2565 // Pic[i+2] = B; // Member of index 2 2566 // } 2567 // To: 2568 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2569 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2570 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2571 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2572 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2573 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2574 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2575 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2576 VPValue *BlockInMask) { 2577 Instruction *Instr = Group->getInsertPos(); 2578 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2579 2580 // Prepare for the vector type of the interleaved load/store. 2581 Type *ScalarTy = getLoadStoreType(Instr); 2582 unsigned InterleaveFactor = Group->getFactor(); 2583 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2584 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2585 2586 // Prepare for the new pointers. 2587 SmallVector<Value *, 2> AddrParts; 2588 unsigned Index = Group->getIndex(Instr); 2589 2590 // TODO: extend the masked interleaved-group support to reversed access. 2591 assert((!BlockInMask || !Group->isReverse()) && 2592 "Reversed masked interleave-group not supported."); 2593 2594 // If the group is reverse, adjust the index to refer to the last vector lane 2595 // instead of the first. We adjust the index from the first vector lane, 2596 // rather than directly getting the pointer for lane VF - 1, because the 2597 // pointer operand of the interleaved access is supposed to be uniform. For 2598 // uniform instructions, we're only required to generate a value for the 2599 // first vector lane in each unroll iteration. 2600 if (Group->isReverse()) 2601 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2602 2603 for (unsigned Part = 0; Part < UF; Part++) { 2604 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2605 setDebugLocFromInst(AddrPart); 2606 2607 // Notice current instruction could be any index. Need to adjust the address 2608 // to the member of index 0. 2609 // 2610 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2611 // b = A[i]; // Member of index 0 2612 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2613 // 2614 // E.g. A[i+1] = a; // Member of index 1 2615 // A[i] = b; // Member of index 0 2616 // A[i+2] = c; // Member of index 2 (Current instruction) 2617 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2618 2619 bool InBounds = false; 2620 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2621 InBounds = gep->isInBounds(); 2622 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2623 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2624 2625 // Cast to the vector pointer type. 2626 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2627 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2628 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2629 } 2630 2631 setDebugLocFromInst(Instr); 2632 Value *PoisonVec = PoisonValue::get(VecTy); 2633 2634 Value *MaskForGaps = nullptr; 2635 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2636 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2637 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2638 } 2639 2640 // Vectorize the interleaved load group. 2641 if (isa<LoadInst>(Instr)) { 2642 // For each unroll part, create a wide load for the group. 2643 SmallVector<Value *, 2> NewLoads; 2644 for (unsigned Part = 0; Part < UF; Part++) { 2645 Instruction *NewLoad; 2646 if (BlockInMask || MaskForGaps) { 2647 assert(useMaskedInterleavedAccesses(*TTI) && 2648 "masked interleaved groups are not allowed."); 2649 Value *GroupMask = MaskForGaps; 2650 if (BlockInMask) { 2651 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2652 Value *ShuffledMask = Builder.CreateShuffleVector( 2653 BlockInMaskPart, 2654 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2655 "interleaved.mask"); 2656 GroupMask = MaskForGaps 2657 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2658 MaskForGaps) 2659 : ShuffledMask; 2660 } 2661 NewLoad = 2662 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2663 GroupMask, PoisonVec, "wide.masked.vec"); 2664 } 2665 else 2666 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2667 Group->getAlign(), "wide.vec"); 2668 Group->addMetadata(NewLoad); 2669 NewLoads.push_back(NewLoad); 2670 } 2671 2672 // For each member in the group, shuffle out the appropriate data from the 2673 // wide loads. 2674 unsigned J = 0; 2675 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2676 Instruction *Member = Group->getMember(I); 2677 2678 // Skip the gaps in the group. 2679 if (!Member) 2680 continue; 2681 2682 auto StrideMask = 2683 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2684 for (unsigned Part = 0; Part < UF; Part++) { 2685 Value *StridedVec = Builder.CreateShuffleVector( 2686 NewLoads[Part], StrideMask, "strided.vec"); 2687 2688 // If this member has different type, cast the result type. 2689 if (Member->getType() != ScalarTy) { 2690 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2691 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2692 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2693 } 2694 2695 if (Group->isReverse()) 2696 StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse"); 2697 2698 State.set(VPDefs[J], StridedVec, Part); 2699 } 2700 ++J; 2701 } 2702 return; 2703 } 2704 2705 // The sub vector type for current instruction. 2706 auto *SubVT = VectorType::get(ScalarTy, VF); 2707 2708 // Vectorize the interleaved store group. 2709 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2710 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2711 "masked interleaved groups are not allowed."); 2712 assert((!MaskForGaps || !VF.isScalable()) && 2713 "masking gaps for scalable vectors is not yet supported."); 2714 for (unsigned Part = 0; Part < UF; Part++) { 2715 // Collect the stored vector from each member. 2716 SmallVector<Value *, 4> StoredVecs; 2717 for (unsigned i = 0; i < InterleaveFactor; i++) { 2718 assert((Group->getMember(i) || MaskForGaps) && 2719 "Fail to get a member from an interleaved store group"); 2720 Instruction *Member = Group->getMember(i); 2721 2722 // Skip the gaps in the group. 2723 if (!Member) { 2724 Value *Undef = PoisonValue::get(SubVT); 2725 StoredVecs.push_back(Undef); 2726 continue; 2727 } 2728 2729 Value *StoredVec = State.get(StoredValues[i], Part); 2730 2731 if (Group->isReverse()) 2732 StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse"); 2733 2734 // If this member has different type, cast it to a unified type. 2735 2736 if (StoredVec->getType() != SubVT) 2737 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2738 2739 StoredVecs.push_back(StoredVec); 2740 } 2741 2742 // Concatenate all vectors into a wide vector. 2743 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2744 2745 // Interleave the elements in the wide vector. 2746 Value *IVec = Builder.CreateShuffleVector( 2747 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2748 "interleaved.vec"); 2749 2750 Instruction *NewStoreInstr; 2751 if (BlockInMask || MaskForGaps) { 2752 Value *GroupMask = MaskForGaps; 2753 if (BlockInMask) { 2754 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2755 Value *ShuffledMask = Builder.CreateShuffleVector( 2756 BlockInMaskPart, 2757 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2758 "interleaved.mask"); 2759 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2760 ShuffledMask, MaskForGaps) 2761 : ShuffledMask; 2762 } 2763 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2764 Group->getAlign(), GroupMask); 2765 } else 2766 NewStoreInstr = 2767 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2768 2769 Group->addMetadata(NewStoreInstr); 2770 } 2771 } 2772 2773 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2774 VPReplicateRecipe *RepRecipe, 2775 const VPIteration &Instance, 2776 bool IfPredicateInstr, 2777 VPTransformState &State) { 2778 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2779 2780 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2781 // the first lane and part. 2782 if (isa<NoAliasScopeDeclInst>(Instr)) 2783 if (!Instance.isFirstIteration()) 2784 return; 2785 2786 // Does this instruction return a value ? 2787 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2788 2789 Instruction *Cloned = Instr->clone(); 2790 if (!IsVoidRetTy) 2791 Cloned->setName(Instr->getName() + ".cloned"); 2792 2793 // If the scalarized instruction contributes to the address computation of a 2794 // widen masked load/store which was in a basic block that needed predication 2795 // and is not predicated after vectorization, we can't propagate 2796 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized 2797 // instruction could feed a poison value to the base address of the widen 2798 // load/store. 2799 if (State.MayGeneratePoisonRecipes.contains(RepRecipe)) 2800 Cloned->dropPoisonGeneratingFlags(); 2801 2802 if (Instr->getDebugLoc()) 2803 setDebugLocFromInst(Instr); 2804 2805 // Replace the operands of the cloned instructions with their scalar 2806 // equivalents in the new loop. 2807 for (auto &I : enumerate(RepRecipe->operands())) { 2808 auto InputInstance = Instance; 2809 VPValue *Operand = I.value(); 2810 VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand); 2811 if (OperandR && OperandR->isUniform()) 2812 InputInstance.Lane = VPLane::getFirstLane(); 2813 Cloned->setOperand(I.index(), State.get(Operand, InputInstance)); 2814 } 2815 addNewMetadata(Cloned, Instr); 2816 2817 // Place the cloned scalar in the new loop. 2818 State.Builder.Insert(Cloned); 2819 2820 State.set(RepRecipe, Cloned, Instance); 2821 2822 // If we just cloned a new assumption, add it the assumption cache. 2823 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 2824 AC->registerAssumption(II); 2825 2826 // End if-block. 2827 if (IfPredicateInstr) 2828 PredicatedInstructions.push_back(Cloned); 2829 } 2830 2831 Value *InnerLoopVectorizer::getOrCreateTripCount(BasicBlock *InsertBlock) { 2832 if (TripCount) 2833 return TripCount; 2834 2835 assert(InsertBlock); 2836 IRBuilder<> Builder(InsertBlock->getTerminator()); 2837 // Find the loop boundaries. 2838 ScalarEvolution *SE = PSE.getSE(); 2839 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2840 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 2841 "Invalid loop count"); 2842 2843 Type *IdxTy = Legal->getWidestInductionType(); 2844 assert(IdxTy && "No type for induction"); 2845 2846 // The exit count might have the type of i64 while the phi is i32. This can 2847 // happen if we have an induction variable that is sign extended before the 2848 // compare. The only way that we get a backedge taken count is that the 2849 // induction variable was signed and as such will not overflow. In such a case 2850 // truncation is legal. 2851 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2852 IdxTy->getPrimitiveSizeInBits()) 2853 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2854 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2855 2856 // Get the total trip count from the count by adding 1. 2857 const SCEV *ExitCount = SE->getAddExpr( 2858 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2859 2860 const DataLayout &DL = InsertBlock->getModule()->getDataLayout(); 2861 2862 // Expand the trip count and place the new instructions in the preheader. 2863 // Notice that the pre-header does not change, only the loop body. 2864 SCEVExpander Exp(*SE, DL, "induction"); 2865 2866 // Count holds the overall loop count (N). 2867 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2868 InsertBlock->getTerminator()); 2869 2870 if (TripCount->getType()->isPointerTy()) 2871 TripCount = 2872 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2873 InsertBlock->getTerminator()); 2874 2875 return TripCount; 2876 } 2877 2878 Value * 2879 InnerLoopVectorizer::getOrCreateVectorTripCount(BasicBlock *InsertBlock) { 2880 if (VectorTripCount) 2881 return VectorTripCount; 2882 2883 Value *TC = getOrCreateTripCount(InsertBlock); 2884 IRBuilder<> Builder(InsertBlock->getTerminator()); 2885 2886 Type *Ty = TC->getType(); 2887 // This is where we can make the step a runtime constant. 2888 Value *Step = createStepForVF(Builder, Ty, VF, UF); 2889 2890 // If the tail is to be folded by masking, round the number of iterations N 2891 // up to a multiple of Step instead of rounding down. This is done by first 2892 // adding Step-1 and then rounding down. Note that it's ok if this addition 2893 // overflows: the vector induction variable will eventually wrap to zero given 2894 // that it starts at zero and its Step is a power of two; the loop will then 2895 // exit, with the last early-exit vector comparison also producing all-true. 2896 if (Cost->foldTailByMasking()) { 2897 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 2898 "VF*UF must be a power of 2 when folding tail by masking"); 2899 Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF); 2900 TC = Builder.CreateAdd( 2901 TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up"); 2902 } 2903 2904 // Now we need to generate the expression for the part of the loop that the 2905 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2906 // iterations are not required for correctness, or N - Step, otherwise. Step 2907 // is equal to the vectorization factor (number of SIMD elements) times the 2908 // unroll factor (number of SIMD instructions). 2909 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2910 2911 // There are cases where we *must* run at least one iteration in the remainder 2912 // loop. See the cost model for when this can happen. If the step evenly 2913 // divides the trip count, we set the remainder to be equal to the step. If 2914 // the step does not evenly divide the trip count, no adjustment is necessary 2915 // since there will already be scalar iterations. Note that the minimum 2916 // iterations check ensures that N >= Step. 2917 if (Cost->requiresScalarEpilogue(VF)) { 2918 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2919 R = Builder.CreateSelect(IsZero, Step, R); 2920 } 2921 2922 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2923 2924 return VectorTripCount; 2925 } 2926 2927 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2928 const DataLayout &DL) { 2929 // Verify that V is a vector type with same number of elements as DstVTy. 2930 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 2931 unsigned VF = DstFVTy->getNumElements(); 2932 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 2933 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2934 Type *SrcElemTy = SrcVecTy->getElementType(); 2935 Type *DstElemTy = DstFVTy->getElementType(); 2936 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2937 "Vector elements must have same size"); 2938 2939 // Do a direct cast if element types are castable. 2940 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2941 return Builder.CreateBitOrPointerCast(V, DstFVTy); 2942 } 2943 // V cannot be directly casted to desired vector type. 2944 // May happen when V is a floating point vector but DstVTy is a vector of 2945 // pointers or vice-versa. Handle this using a two-step bitcast using an 2946 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2947 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2948 "Only one type should be a pointer type"); 2949 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2950 "Only one type should be a floating point type"); 2951 Type *IntTy = 2952 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2953 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 2954 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2955 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 2956 } 2957 2958 void InnerLoopVectorizer::emitMinimumIterationCountCheck(BasicBlock *Bypass) { 2959 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 2960 // Reuse existing vector loop preheader for TC checks. 2961 // Note that new preheader block is generated for vector loop. 2962 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 2963 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 2964 2965 // Generate code to check if the loop's trip count is less than VF * UF, or 2966 // equal to it in case a scalar epilogue is required; this implies that the 2967 // vector trip count is zero. This check also covers the case where adding one 2968 // to the backedge-taken count overflowed leading to an incorrect trip count 2969 // of zero. In this case we will also jump to the scalar loop. 2970 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 2971 : ICmpInst::ICMP_ULT; 2972 2973 // If tail is to be folded, vector loop takes care of all iterations. 2974 Value *CheckMinIters = Builder.getFalse(); 2975 if (!Cost->foldTailByMasking()) { 2976 Value *Step = createStepForVF(Builder, Count->getType(), VF, UF); 2977 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 2978 } 2979 // Create new preheader for vector loop. 2980 LoopVectorPreHeader = 2981 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 2982 "vector.ph"); 2983 2984 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 2985 DT->getNode(Bypass)->getIDom()) && 2986 "TC check is expected to dominate Bypass"); 2987 2988 // Update dominator for Bypass & LoopExit (if needed). 2989 DT->changeImmediateDominator(Bypass, TCCheckBlock); 2990 if (!Cost->requiresScalarEpilogue(VF)) 2991 // If there is an epilogue which must run, there's no edge from the 2992 // middle block to exit blocks and thus no need to update the immediate 2993 // dominator of the exit blocks. 2994 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 2995 2996 ReplaceInstWithInst( 2997 TCCheckBlock->getTerminator(), 2998 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 2999 LoopBypassBlocks.push_back(TCCheckBlock); 3000 } 3001 3002 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) { 3003 3004 BasicBlock *const SCEVCheckBlock = 3005 RTChecks.emitSCEVChecks(Bypass, LoopVectorPreHeader, LoopExitBlock); 3006 if (!SCEVCheckBlock) 3007 return nullptr; 3008 3009 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3010 (OptForSizeBasedOnProfile && 3011 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3012 "Cannot SCEV check stride or overflow when optimizing for size"); 3013 3014 3015 // Update dominator only if this is first RT check. 3016 if (LoopBypassBlocks.empty()) { 3017 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3018 if (!Cost->requiresScalarEpilogue(VF)) 3019 // If there is an epilogue which must run, there's no edge from the 3020 // middle block to exit blocks and thus no need to update the immediate 3021 // dominator of the exit blocks. 3022 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3023 } 3024 3025 LoopBypassBlocks.push_back(SCEVCheckBlock); 3026 AddedSafetyChecks = true; 3027 return SCEVCheckBlock; 3028 } 3029 3030 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) { 3031 // VPlan-native path does not do any analysis for runtime checks currently. 3032 if (EnableVPlanNativePath) 3033 return nullptr; 3034 3035 BasicBlock *const MemCheckBlock = 3036 RTChecks.emitMemRuntimeChecks(Bypass, LoopVectorPreHeader); 3037 3038 // Check if we generated code that checks in runtime if arrays overlap. We put 3039 // the checks into a separate block to make the more common case of few 3040 // elements faster. 3041 if (!MemCheckBlock) 3042 return nullptr; 3043 3044 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3045 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3046 "Cannot emit memory checks when optimizing for size, unless forced " 3047 "to vectorize."); 3048 ORE->emit([&]() { 3049 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3050 OrigLoop->getStartLoc(), 3051 OrigLoop->getHeader()) 3052 << "Code-size may be reduced by not forcing " 3053 "vectorization, or by source-code modifications " 3054 "eliminating the need for runtime checks " 3055 "(e.g., adding 'restrict')."; 3056 }); 3057 } 3058 3059 LoopBypassBlocks.push_back(MemCheckBlock); 3060 3061 AddedSafetyChecks = true; 3062 3063 // We currently don't use LoopVersioning for the actual loop cloning but we 3064 // still use it to add the noalias metadata. 3065 LVer = std::make_unique<LoopVersioning>( 3066 *Legal->getLAI(), 3067 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3068 DT, PSE.getSE()); 3069 LVer->prepareNoAliasMetadata(); 3070 return MemCheckBlock; 3071 } 3072 3073 void InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3074 LoopScalarBody = OrigLoop->getHeader(); 3075 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3076 assert(LoopVectorPreHeader && "Invalid loop structure"); 3077 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3078 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3079 "multiple exit loop without required epilogue?"); 3080 3081 LoopMiddleBlock = 3082 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3083 LI, nullptr, Twine(Prefix) + "middle.block"); 3084 LoopScalarPreHeader = 3085 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3086 nullptr, Twine(Prefix) + "scalar.ph"); 3087 3088 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3089 3090 // Set up the middle block terminator. Two cases: 3091 // 1) If we know that we must execute the scalar epilogue, emit an 3092 // unconditional branch. 3093 // 2) Otherwise, we must have a single unique exit block (due to how we 3094 // implement the multiple exit case). In this case, set up a conditonal 3095 // branch from the middle block to the loop scalar preheader, and the 3096 // exit block. completeLoopSkeleton will update the condition to use an 3097 // iteration check, if required to decide whether to execute the remainder. 3098 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3099 BranchInst::Create(LoopScalarPreHeader) : 3100 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3101 Builder.getTrue()); 3102 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3103 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3104 3105 // Update dominator for loop exit. During skeleton creation, only the vector 3106 // pre-header and the middle block are created. The vector loop is entirely 3107 // created during VPlan exection. 3108 if (!Cost->requiresScalarEpilogue(VF)) 3109 // If there is an epilogue which must run, there's no edge from the 3110 // middle block to exit blocks and thus no need to update the immediate 3111 // dominator of the exit blocks. 3112 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3113 } 3114 3115 void InnerLoopVectorizer::createInductionResumeValues( 3116 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3117 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3118 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3119 "Inconsistent information about additional bypass."); 3120 3121 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3122 assert(VectorTripCount && "Expected valid arguments"); 3123 // We are going to resume the execution of the scalar loop. 3124 // Go over all of the induction variables that we found and fix the 3125 // PHIs that are left in the scalar version of the loop. 3126 // The starting values of PHI nodes depend on the counter of the last 3127 // iteration in the vectorized loop. 3128 // If we come from a bypass edge then we need to start from the original 3129 // start value. 3130 Instruction *OldInduction = Legal->getPrimaryInduction(); 3131 for (auto &InductionEntry : Legal->getInductionVars()) { 3132 PHINode *OrigPhi = InductionEntry.first; 3133 InductionDescriptor II = InductionEntry.second; 3134 3135 // Create phi nodes to merge from the backedge-taken check block. 3136 PHINode *BCResumeVal = 3137 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3138 LoopScalarPreHeader->getTerminator()); 3139 // Copy original phi DL over to the new one. 3140 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3141 Value *&EndValue = IVEndValues[OrigPhi]; 3142 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3143 if (OrigPhi == OldInduction) { 3144 // We know what the end value is. 3145 EndValue = VectorTripCount; 3146 } else { 3147 IRBuilder<> B(LoopVectorPreHeader->getTerminator()); 3148 3149 // Fast-math-flags propagate from the original induction instruction. 3150 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3151 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3152 3153 Type *StepType = II.getStep()->getType(); 3154 Instruction::CastOps CastOp = 3155 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3156 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3157 Value *Step = 3158 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3159 EndValue = emitTransformedIndex(B, CRD, II.getStartValue(), Step, II); 3160 EndValue->setName("ind.end"); 3161 3162 // Compute the end value for the additional bypass (if applicable). 3163 if (AdditionalBypass.first) { 3164 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3165 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3166 StepType, true); 3167 Value *Step = 3168 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3169 CRD = 3170 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3171 EndValueFromAdditionalBypass = 3172 emitTransformedIndex(B, CRD, II.getStartValue(), Step, II); 3173 EndValueFromAdditionalBypass->setName("ind.end"); 3174 } 3175 } 3176 // The new PHI merges the original incoming value, in case of a bypass, 3177 // or the value at the end of the vectorized loop. 3178 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3179 3180 // Fix the scalar body counter (PHI node). 3181 // The old induction's phi node in the scalar body needs the truncated 3182 // value. 3183 for (BasicBlock *BB : LoopBypassBlocks) 3184 BCResumeVal->addIncoming(II.getStartValue(), BB); 3185 3186 if (AdditionalBypass.first) 3187 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3188 EndValueFromAdditionalBypass); 3189 3190 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3191 } 3192 } 3193 3194 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(MDNode *OrigLoopID) { 3195 // The trip counts should be cached by now. 3196 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 3197 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3198 3199 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3200 3201 // Add a check in the middle block to see if we have completed 3202 // all of the iterations in the first vector loop. Three cases: 3203 // 1) If we require a scalar epilogue, there is no conditional branch as 3204 // we unconditionally branch to the scalar preheader. Do nothing. 3205 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3206 // Thus if tail is to be folded, we know we don't need to run the 3207 // remainder and we can use the previous value for the condition (true). 3208 // 3) Otherwise, construct a runtime check. 3209 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3210 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3211 Count, VectorTripCount, "cmp.n", 3212 LoopMiddleBlock->getTerminator()); 3213 3214 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3215 // of the corresponding compare because they may have ended up with 3216 // different line numbers and we want to avoid awkward line stepping while 3217 // debugging. Eg. if the compare has got a line number inside the loop. 3218 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3219 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3220 } 3221 3222 #ifdef EXPENSIVE_CHECKS 3223 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3224 #endif 3225 3226 return LoopVectorPreHeader; 3227 } 3228 3229 std::pair<BasicBlock *, Value *> 3230 InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3231 /* 3232 In this function we generate a new loop. The new loop will contain 3233 the vectorized instructions while the old loop will continue to run the 3234 scalar remainder. 3235 3236 [ ] <-- loop iteration number check. 3237 / | 3238 / v 3239 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3240 | / | 3241 | / v 3242 || [ ] <-- vector pre header. 3243 |/ | 3244 | v 3245 | [ ] \ 3246 | [ ]_| <-- vector loop (created during VPlan execution). 3247 | | 3248 | v 3249 \ -[ ] <--- middle-block. 3250 \/ | 3251 /\ v 3252 | ->[ ] <--- new preheader. 3253 | | 3254 (opt) v <-- edge from middle to exit iff epilogue is not required. 3255 | [ ] \ 3256 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3257 \ | 3258 \ v 3259 >[ ] <-- exit block(s). 3260 ... 3261 */ 3262 3263 // Get the metadata of the original loop before it gets modified. 3264 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3265 3266 // Workaround! Compute the trip count of the original loop and cache it 3267 // before we start modifying the CFG. This code has a systemic problem 3268 // wherein it tries to run analysis over partially constructed IR; this is 3269 // wrong, and not simply for SCEV. The trip count of the original loop 3270 // simply happens to be prone to hitting this in practice. In theory, we 3271 // can hit the same issue for any SCEV, or ValueTracking query done during 3272 // mutation. See PR49900. 3273 getOrCreateTripCount(OrigLoop->getLoopPreheader()); 3274 3275 // Create an empty vector loop, and prepare basic blocks for the runtime 3276 // checks. 3277 createVectorLoopSkeleton(""); 3278 3279 // Now, compare the new count to zero. If it is zero skip the vector loop and 3280 // jump to the scalar loop. This check also covers the case where the 3281 // backedge-taken count is uint##_max: adding one to it will overflow leading 3282 // to an incorrect trip count of zero. In this (rare) case we will also jump 3283 // to the scalar loop. 3284 emitMinimumIterationCountCheck(LoopScalarPreHeader); 3285 3286 // Generate the code to check any assumptions that we've made for SCEV 3287 // expressions. 3288 emitSCEVChecks(LoopScalarPreHeader); 3289 3290 // Generate the code that checks in runtime if arrays overlap. We put the 3291 // checks into a separate block to make the more common case of few elements 3292 // faster. 3293 emitMemRuntimeChecks(LoopScalarPreHeader); 3294 3295 // Emit phis for the new starting index of the scalar loop. 3296 createInductionResumeValues(); 3297 3298 return {completeLoopSkeleton(OrigLoopID), nullptr}; 3299 } 3300 3301 // Fix up external users of the induction variable. At this point, we are 3302 // in LCSSA form, with all external PHIs that use the IV having one input value, 3303 // coming from the remainder loop. We need those PHIs to also have a correct 3304 // value for the IV when arriving directly from the middle block. 3305 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3306 const InductionDescriptor &II, 3307 Value *CountRoundDown, Value *EndValue, 3308 BasicBlock *MiddleBlock, 3309 BasicBlock *VectorHeader) { 3310 // There are two kinds of external IV usages - those that use the value 3311 // computed in the last iteration (the PHI) and those that use the penultimate 3312 // value (the value that feeds into the phi from the loop latch). 3313 // We allow both, but they, obviously, have different values. 3314 3315 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3316 3317 DenseMap<Value *, Value *> MissingVals; 3318 3319 // An external user of the last iteration's value should see the value that 3320 // the remainder loop uses to initialize its own IV. 3321 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3322 for (User *U : PostInc->users()) { 3323 Instruction *UI = cast<Instruction>(U); 3324 if (!OrigLoop->contains(UI)) { 3325 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3326 MissingVals[UI] = EndValue; 3327 } 3328 } 3329 3330 // An external user of the penultimate value need to see EndValue - Step. 3331 // The simplest way to get this is to recompute it from the constituent SCEVs, 3332 // that is Start + (Step * (CRD - 1)). 3333 for (User *U : OrigPhi->users()) { 3334 auto *UI = cast<Instruction>(U); 3335 if (!OrigLoop->contains(UI)) { 3336 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3337 3338 IRBuilder<> B(MiddleBlock->getTerminator()); 3339 3340 // Fast-math-flags propagate from the original induction instruction. 3341 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3342 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3343 3344 Value *CountMinusOne = B.CreateSub( 3345 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3346 Value *CMO = 3347 !II.getStep()->getType()->isIntegerTy() 3348 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3349 II.getStep()->getType()) 3350 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3351 CMO->setName("cast.cmo"); 3352 3353 Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(), 3354 VectorHeader->getTerminator()); 3355 Value *Escape = 3356 emitTransformedIndex(B, CMO, II.getStartValue(), Step, II); 3357 Escape->setName("ind.escape"); 3358 MissingVals[UI] = Escape; 3359 } 3360 } 3361 3362 for (auto &I : MissingVals) { 3363 PHINode *PHI = cast<PHINode>(I.first); 3364 // One corner case we have to handle is two IVs "chasing" each-other, 3365 // that is %IV2 = phi [...], [ %IV1, %latch ] 3366 // In this case, if IV1 has an external use, we need to avoid adding both 3367 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3368 // don't already have an incoming value for the middle block. 3369 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3370 PHI->addIncoming(I.second, MiddleBlock); 3371 } 3372 } 3373 3374 namespace { 3375 3376 struct CSEDenseMapInfo { 3377 static bool canHandle(const Instruction *I) { 3378 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3379 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3380 } 3381 3382 static inline Instruction *getEmptyKey() { 3383 return DenseMapInfo<Instruction *>::getEmptyKey(); 3384 } 3385 3386 static inline Instruction *getTombstoneKey() { 3387 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3388 } 3389 3390 static unsigned getHashValue(const Instruction *I) { 3391 assert(canHandle(I) && "Unknown instruction!"); 3392 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3393 I->value_op_end())); 3394 } 3395 3396 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3397 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3398 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3399 return LHS == RHS; 3400 return LHS->isIdenticalTo(RHS); 3401 } 3402 }; 3403 3404 } // end anonymous namespace 3405 3406 ///Perform cse of induction variable instructions. 3407 static void cse(BasicBlock *BB) { 3408 // Perform simple cse. 3409 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3410 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3411 if (!CSEDenseMapInfo::canHandle(&In)) 3412 continue; 3413 3414 // Check if we can replace this instruction with any of the 3415 // visited instructions. 3416 if (Instruction *V = CSEMap.lookup(&In)) { 3417 In.replaceAllUsesWith(V); 3418 In.eraseFromParent(); 3419 continue; 3420 } 3421 3422 CSEMap[&In] = &In; 3423 } 3424 } 3425 3426 InstructionCost 3427 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3428 bool &NeedToScalarize) const { 3429 Function *F = CI->getCalledFunction(); 3430 Type *ScalarRetTy = CI->getType(); 3431 SmallVector<Type *, 4> Tys, ScalarTys; 3432 for (auto &ArgOp : CI->args()) 3433 ScalarTys.push_back(ArgOp->getType()); 3434 3435 // Estimate cost of scalarized vector call. The source operands are assumed 3436 // to be vectors, so we need to extract individual elements from there, 3437 // execute VF scalar calls, and then gather the result into the vector return 3438 // value. 3439 InstructionCost ScalarCallCost = 3440 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3441 if (VF.isScalar()) 3442 return ScalarCallCost; 3443 3444 // Compute corresponding vector type for return value and arguments. 3445 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3446 for (Type *ScalarTy : ScalarTys) 3447 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3448 3449 // Compute costs of unpacking argument values for the scalar calls and 3450 // packing the return values to a vector. 3451 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3452 3453 InstructionCost Cost = 3454 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3455 3456 // If we can't emit a vector call for this function, then the currently found 3457 // cost is the cost we need to return. 3458 NeedToScalarize = true; 3459 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3460 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3461 3462 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3463 return Cost; 3464 3465 // If the corresponding vector cost is cheaper, return its cost. 3466 InstructionCost VectorCallCost = 3467 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3468 if (VectorCallCost < Cost) { 3469 NeedToScalarize = false; 3470 Cost = VectorCallCost; 3471 } 3472 return Cost; 3473 } 3474 3475 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3476 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3477 return Elt; 3478 return VectorType::get(Elt, VF); 3479 } 3480 3481 InstructionCost 3482 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3483 ElementCount VF) const { 3484 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3485 assert(ID && "Expected intrinsic call!"); 3486 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3487 FastMathFlags FMF; 3488 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3489 FMF = FPMO->getFastMathFlags(); 3490 3491 SmallVector<const Value *> Arguments(CI->args()); 3492 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3493 SmallVector<Type *> ParamTys; 3494 std::transform(FTy->param_begin(), FTy->param_end(), 3495 std::back_inserter(ParamTys), 3496 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3497 3498 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3499 dyn_cast<IntrinsicInst>(CI)); 3500 return TTI.getIntrinsicInstrCost(CostAttrs, 3501 TargetTransformInfo::TCK_RecipThroughput); 3502 } 3503 3504 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3505 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3506 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3507 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3508 } 3509 3510 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3511 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3512 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3513 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3514 } 3515 3516 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3517 // For every instruction `I` in MinBWs, truncate the operands, create a 3518 // truncated version of `I` and reextend its result. InstCombine runs 3519 // later and will remove any ext/trunc pairs. 3520 SmallPtrSet<Value *, 4> Erased; 3521 for (const auto &KV : Cost->getMinimalBitwidths()) { 3522 // If the value wasn't vectorized, we must maintain the original scalar 3523 // type. The absence of the value from State indicates that it 3524 // wasn't vectorized. 3525 // FIXME: Should not rely on getVPValue at this point. 3526 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3527 if (!State.hasAnyVectorValue(Def)) 3528 continue; 3529 for (unsigned Part = 0; Part < UF; ++Part) { 3530 Value *I = State.get(Def, Part); 3531 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3532 continue; 3533 Type *OriginalTy = I->getType(); 3534 Type *ScalarTruncatedTy = 3535 IntegerType::get(OriginalTy->getContext(), KV.second); 3536 auto *TruncatedTy = VectorType::get( 3537 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3538 if (TruncatedTy == OriginalTy) 3539 continue; 3540 3541 IRBuilder<> B(cast<Instruction>(I)); 3542 auto ShrinkOperand = [&](Value *V) -> Value * { 3543 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3544 if (ZI->getSrcTy() == TruncatedTy) 3545 return ZI->getOperand(0); 3546 return B.CreateZExtOrTrunc(V, TruncatedTy); 3547 }; 3548 3549 // The actual instruction modification depends on the instruction type, 3550 // unfortunately. 3551 Value *NewI = nullptr; 3552 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3553 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3554 ShrinkOperand(BO->getOperand(1))); 3555 3556 // Any wrapping introduced by shrinking this operation shouldn't be 3557 // considered undefined behavior. So, we can't unconditionally copy 3558 // arithmetic wrapping flags to NewI. 3559 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3560 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3561 NewI = 3562 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3563 ShrinkOperand(CI->getOperand(1))); 3564 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3565 NewI = B.CreateSelect(SI->getCondition(), 3566 ShrinkOperand(SI->getTrueValue()), 3567 ShrinkOperand(SI->getFalseValue())); 3568 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3569 switch (CI->getOpcode()) { 3570 default: 3571 llvm_unreachable("Unhandled cast!"); 3572 case Instruction::Trunc: 3573 NewI = ShrinkOperand(CI->getOperand(0)); 3574 break; 3575 case Instruction::SExt: 3576 NewI = B.CreateSExtOrTrunc( 3577 CI->getOperand(0), 3578 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3579 break; 3580 case Instruction::ZExt: 3581 NewI = B.CreateZExtOrTrunc( 3582 CI->getOperand(0), 3583 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3584 break; 3585 } 3586 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3587 auto Elements0 = 3588 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 3589 auto *O0 = B.CreateZExtOrTrunc( 3590 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3591 auto Elements1 = 3592 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 3593 auto *O1 = B.CreateZExtOrTrunc( 3594 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3595 3596 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3597 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3598 // Don't do anything with the operands, just extend the result. 3599 continue; 3600 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3601 auto Elements = 3602 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 3603 auto *O0 = B.CreateZExtOrTrunc( 3604 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3605 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3606 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3607 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3608 auto Elements = 3609 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 3610 auto *O0 = B.CreateZExtOrTrunc( 3611 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3612 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3613 } else { 3614 // If we don't know what to do, be conservative and don't do anything. 3615 continue; 3616 } 3617 3618 // Lastly, extend the result. 3619 NewI->takeName(cast<Instruction>(I)); 3620 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3621 I->replaceAllUsesWith(Res); 3622 cast<Instruction>(I)->eraseFromParent(); 3623 Erased.insert(I); 3624 State.reset(Def, Res, Part); 3625 } 3626 } 3627 3628 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3629 for (const auto &KV : Cost->getMinimalBitwidths()) { 3630 // If the value wasn't vectorized, we must maintain the original scalar 3631 // type. The absence of the value from State indicates that it 3632 // wasn't vectorized. 3633 // FIXME: Should not rely on getVPValue at this point. 3634 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3635 if (!State.hasAnyVectorValue(Def)) 3636 continue; 3637 for (unsigned Part = 0; Part < UF; ++Part) { 3638 Value *I = State.get(Def, Part); 3639 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3640 if (Inst && Inst->use_empty()) { 3641 Value *NewI = Inst->getOperand(0); 3642 Inst->eraseFromParent(); 3643 State.reset(Def, NewI, Part); 3644 } 3645 } 3646 } 3647 } 3648 3649 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 3650 // Insert truncates and extends for any truncated instructions as hints to 3651 // InstCombine. 3652 if (VF.isVector()) 3653 truncateToMinimalBitwidths(State); 3654 3655 // Fix widened non-induction PHIs by setting up the PHI operands. 3656 if (OrigPHIsToFix.size()) { 3657 assert(EnableVPlanNativePath && 3658 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3659 fixNonInductionPHIs(State); 3660 } 3661 3662 // At this point every instruction in the original loop is widened to a 3663 // vector form. Now we need to fix the recurrences in the loop. These PHI 3664 // nodes are currently empty because we did not want to introduce cycles. 3665 // This is the second stage of vectorizing recurrences. 3666 fixCrossIterationPHIs(State); 3667 3668 // Forget the original basic block. 3669 PSE.getSE()->forgetLoop(OrigLoop); 3670 3671 Loop *VectorLoop = LI->getLoopFor(State.CFG.PrevBB); 3672 // If we inserted an edge from the middle block to the unique exit block, 3673 // update uses outside the loop (phis) to account for the newly inserted 3674 // edge. 3675 if (!Cost->requiresScalarEpilogue(VF)) { 3676 // Fix-up external users of the induction variables. 3677 for (auto &Entry : Legal->getInductionVars()) 3678 fixupIVUsers(Entry.first, Entry.second, 3679 getOrCreateVectorTripCount(VectorLoop->getLoopPreheader()), 3680 IVEndValues[Entry.first], LoopMiddleBlock, 3681 VectorLoop->getHeader()); 3682 3683 fixLCSSAPHIs(State); 3684 } 3685 3686 for (Instruction *PI : PredicatedInstructions) 3687 sinkScalarOperands(&*PI); 3688 3689 // Remove redundant induction instructions. 3690 cse(VectorLoop->getHeader()); 3691 3692 // Set/update profile weights for the vector and remainder loops as original 3693 // loop iterations are now distributed among them. Note that original loop 3694 // represented by LoopScalarBody becomes remainder loop after vectorization. 3695 // 3696 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3697 // end up getting slightly roughened result but that should be OK since 3698 // profile is not inherently precise anyway. Note also possible bypass of 3699 // vector code caused by legality checks is ignored, assigning all the weight 3700 // to the vector loop, optimistically. 3701 // 3702 // For scalable vectorization we can't know at compile time how many iterations 3703 // of the loop are handled in one vector iteration, so instead assume a pessimistic 3704 // vscale of '1'. 3705 setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), VectorLoop, 3706 LI->getLoopFor(LoopScalarBody), 3707 VF.getKnownMinValue() * UF); 3708 } 3709 3710 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 3711 // In order to support recurrences we need to be able to vectorize Phi nodes. 3712 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3713 // stage #2: We now need to fix the recurrences by adding incoming edges to 3714 // the currently empty PHI nodes. At this point every instruction in the 3715 // original loop is widened to a vector form so we can use them to construct 3716 // the incoming edges. 3717 VPBasicBlock *Header = 3718 State.Plan->getVectorLoopRegion()->getEntryBasicBlock(); 3719 for (VPRecipeBase &R : Header->phis()) { 3720 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 3721 fixReduction(ReductionPhi, State); 3722 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 3723 fixFirstOrderRecurrence(FOR, State); 3724 } 3725 } 3726 3727 void InnerLoopVectorizer::fixFirstOrderRecurrence( 3728 VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) { 3729 // This is the second phase of vectorizing first-order recurrences. An 3730 // overview of the transformation is described below. Suppose we have the 3731 // following loop. 3732 // 3733 // for (int i = 0; i < n; ++i) 3734 // b[i] = a[i] - a[i - 1]; 3735 // 3736 // There is a first-order recurrence on "a". For this loop, the shorthand 3737 // scalar IR looks like: 3738 // 3739 // scalar.ph: 3740 // s_init = a[-1] 3741 // br scalar.body 3742 // 3743 // scalar.body: 3744 // i = phi [0, scalar.ph], [i+1, scalar.body] 3745 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3746 // s2 = a[i] 3747 // b[i] = s2 - s1 3748 // br cond, scalar.body, ... 3749 // 3750 // In this example, s1 is a recurrence because it's value depends on the 3751 // previous iteration. In the first phase of vectorization, we created a 3752 // vector phi v1 for s1. We now complete the vectorization and produce the 3753 // shorthand vector IR shown below (for VF = 4, UF = 1). 3754 // 3755 // vector.ph: 3756 // v_init = vector(..., ..., ..., a[-1]) 3757 // br vector.body 3758 // 3759 // vector.body 3760 // i = phi [0, vector.ph], [i+4, vector.body] 3761 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3762 // v2 = a[i, i+1, i+2, i+3]; 3763 // v3 = vector(v1(3), v2(0, 1, 2)) 3764 // b[i, i+1, i+2, i+3] = v2 - v3 3765 // br cond, vector.body, middle.block 3766 // 3767 // middle.block: 3768 // x = v2(3) 3769 // br scalar.ph 3770 // 3771 // scalar.ph: 3772 // s_init = phi [x, middle.block], [a[-1], otherwise] 3773 // br scalar.body 3774 // 3775 // After execution completes the vector loop, we extract the next value of 3776 // the recurrence (x) to use as the initial value in the scalar loop. 3777 3778 // Extract the last vector element in the middle block. This will be the 3779 // initial value for the recurrence when jumping to the scalar loop. 3780 VPValue *PreviousDef = PhiR->getBackedgeValue(); 3781 Value *Incoming = State.get(PreviousDef, UF - 1); 3782 auto *ExtractForScalar = Incoming; 3783 auto *IdxTy = Builder.getInt32Ty(); 3784 if (VF.isVector()) { 3785 auto *One = ConstantInt::get(IdxTy, 1); 3786 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3787 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3788 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 3789 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 3790 "vector.recur.extract"); 3791 } 3792 // Extract the second last element in the middle block if the 3793 // Phi is used outside the loop. We need to extract the phi itself 3794 // and not the last element (the phi update in the current iteration). This 3795 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3796 // when the scalar loop is not run at all. 3797 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3798 if (VF.isVector()) { 3799 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3800 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 3801 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3802 Incoming, Idx, "vector.recur.extract.for.phi"); 3803 } else if (UF > 1) 3804 // When loop is unrolled without vectorizing, initialize 3805 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 3806 // of `Incoming`. This is analogous to the vectorized case above: extracting 3807 // the second last element when VF > 1. 3808 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 3809 3810 // Fix the initial value of the original recurrence in the scalar loop. 3811 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3812 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 3813 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3814 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 3815 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3816 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3817 Start->addIncoming(Incoming, BB); 3818 } 3819 3820 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 3821 Phi->setName("scalar.recur"); 3822 3823 // Finally, fix users of the recurrence outside the loop. The users will need 3824 // either the last value of the scalar recurrence or the last value of the 3825 // vector recurrence we extracted in the middle block. Since the loop is in 3826 // LCSSA form, we just need to find all the phi nodes for the original scalar 3827 // recurrence in the exit block, and then add an edge for the middle block. 3828 // Note that LCSSA does not imply single entry when the original scalar loop 3829 // had multiple exiting edges (as we always run the last iteration in the 3830 // scalar epilogue); in that case, there is no edge from middle to exit and 3831 // and thus no phis which needed updated. 3832 if (!Cost->requiresScalarEpilogue(VF)) 3833 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 3834 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) 3835 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3836 } 3837 3838 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 3839 VPTransformState &State) { 3840 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 3841 // Get it's reduction variable descriptor. 3842 assert(Legal->isReductionVariable(OrigPhi) && 3843 "Unable to find the reduction variable"); 3844 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 3845 3846 RecurKind RK = RdxDesc.getRecurrenceKind(); 3847 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3848 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3849 setDebugLocFromInst(ReductionStartValue); 3850 3851 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 3852 // This is the vector-clone of the value that leaves the loop. 3853 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 3854 3855 // Wrap flags are in general invalid after vectorization, clear them. 3856 clearReductionWrapFlags(RdxDesc, State); 3857 3858 // Before each round, move the insertion point right between 3859 // the PHIs and the values we are going to write. 3860 // This allows us to write both PHINodes and the extractelement 3861 // instructions. 3862 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3863 3864 setDebugLocFromInst(LoopExitInst); 3865 3866 Type *PhiTy = OrigPhi->getType(); 3867 BasicBlock *VectorLoopLatch = 3868 LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch(); 3869 // If tail is folded by masking, the vector value to leave the loop should be 3870 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 3871 // instead of the former. For an inloop reduction the reduction will already 3872 // be predicated, and does not need to be handled here. 3873 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 3874 for (unsigned Part = 0; Part < UF; ++Part) { 3875 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 3876 Value *Sel = nullptr; 3877 for (User *U : VecLoopExitInst->users()) { 3878 if (isa<SelectInst>(U)) { 3879 assert(!Sel && "Reduction exit feeding two selects"); 3880 Sel = U; 3881 } else 3882 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 3883 } 3884 assert(Sel && "Reduction exit feeds no select"); 3885 State.reset(LoopExitInstDef, Sel, Part); 3886 3887 // If the target can create a predicated operator for the reduction at no 3888 // extra cost in the loop (for example a predicated vadd), it can be 3889 // cheaper for the select to remain in the loop than be sunk out of it, 3890 // and so use the select value for the phi instead of the old 3891 // LoopExitValue. 3892 if (PreferPredicatedReductionSelect || 3893 TTI->preferPredicatedReductionSelect( 3894 RdxDesc.getOpcode(), PhiTy, 3895 TargetTransformInfo::ReductionFlags())) { 3896 auto *VecRdxPhi = 3897 cast<PHINode>(State.get(PhiR, Part)); 3898 VecRdxPhi->setIncomingValueForBlock(VectorLoopLatch, Sel); 3899 } 3900 } 3901 } 3902 3903 // If the vector reduction can be performed in a smaller type, we truncate 3904 // then extend the loop exit value to enable InstCombine to evaluate the 3905 // entire expression in the smaller type. 3906 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 3907 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 3908 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3909 Builder.SetInsertPoint(VectorLoopLatch->getTerminator()); 3910 VectorParts RdxParts(UF); 3911 for (unsigned Part = 0; Part < UF; ++Part) { 3912 RdxParts[Part] = State.get(LoopExitInstDef, Part); 3913 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3914 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3915 : Builder.CreateZExt(Trunc, VecTy); 3916 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 3917 if (U != Trunc) { 3918 U->replaceUsesOfWith(RdxParts[Part], Extnd); 3919 RdxParts[Part] = Extnd; 3920 } 3921 } 3922 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3923 for (unsigned Part = 0; Part < UF; ++Part) { 3924 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3925 State.reset(LoopExitInstDef, RdxParts[Part], Part); 3926 } 3927 } 3928 3929 // Reduce all of the unrolled parts into a single vector. 3930 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 3931 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 3932 3933 // The middle block terminator has already been assigned a DebugLoc here (the 3934 // OrigLoop's single latch terminator). We want the whole middle block to 3935 // appear to execute on this line because: (a) it is all compiler generated, 3936 // (b) these instructions are always executed after evaluating the latch 3937 // conditional branch, and (c) other passes may add new predecessors which 3938 // terminate on this line. This is the easiest way to ensure we don't 3939 // accidentally cause an extra step back into the loop while debugging. 3940 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 3941 if (PhiR->isOrdered()) 3942 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 3943 else { 3944 // Floating-point operations should have some FMF to enable the reduction. 3945 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 3946 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 3947 for (unsigned Part = 1; Part < UF; ++Part) { 3948 Value *RdxPart = State.get(LoopExitInstDef, Part); 3949 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 3950 ReducedPartRdx = Builder.CreateBinOp( 3951 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 3952 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 3953 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 3954 ReducedPartRdx, RdxPart); 3955 else 3956 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 3957 } 3958 } 3959 3960 // Create the reduction after the loop. Note that inloop reductions create the 3961 // target reduction in the loop using a Reduction recipe. 3962 if (VF.isVector() && !PhiR->isInLoop()) { 3963 ReducedPartRdx = 3964 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 3965 // If the reduction can be performed in a smaller type, we need to extend 3966 // the reduction to the wider type before we branch to the original loop. 3967 if (PhiTy != RdxDesc.getRecurrenceType()) 3968 ReducedPartRdx = RdxDesc.isSigned() 3969 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 3970 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 3971 } 3972 3973 PHINode *ResumePhi = 3974 dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue()); 3975 3976 // Create a phi node that merges control-flow from the backedge-taken check 3977 // block and the middle block. 3978 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 3979 LoopScalarPreHeader->getTerminator()); 3980 3981 // If we are fixing reductions in the epilogue loop then we should already 3982 // have created a bc.merge.rdx Phi after the main vector body. Ensure that 3983 // we carry over the incoming values correctly. 3984 for (auto *Incoming : predecessors(LoopScalarPreHeader)) { 3985 if (Incoming == LoopMiddleBlock) 3986 BCBlockPhi->addIncoming(ReducedPartRdx, Incoming); 3987 else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming)) 3988 BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming), 3989 Incoming); 3990 else 3991 BCBlockPhi->addIncoming(ReductionStartValue, Incoming); 3992 } 3993 3994 // Set the resume value for this reduction 3995 ReductionResumeValues.insert({&RdxDesc, BCBlockPhi}); 3996 3997 // Now, we need to fix the users of the reduction variable 3998 // inside and outside of the scalar remainder loop. 3999 4000 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4001 // in the exit blocks. See comment on analogous loop in 4002 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4003 if (!Cost->requiresScalarEpilogue(VF)) 4004 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4005 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) 4006 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4007 4008 // Fix the scalar loop reduction variable with the incoming reduction sum 4009 // from the vector body and from the backedge value. 4010 int IncomingEdgeBlockIdx = 4011 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4012 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4013 // Pick the other block. 4014 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4015 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4016 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4017 } 4018 4019 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4020 VPTransformState &State) { 4021 RecurKind RK = RdxDesc.getRecurrenceKind(); 4022 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4023 return; 4024 4025 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4026 assert(LoopExitInstr && "null loop exit instruction"); 4027 SmallVector<Instruction *, 8> Worklist; 4028 SmallPtrSet<Instruction *, 8> Visited; 4029 Worklist.push_back(LoopExitInstr); 4030 Visited.insert(LoopExitInstr); 4031 4032 while (!Worklist.empty()) { 4033 Instruction *Cur = Worklist.pop_back_val(); 4034 if (isa<OverflowingBinaryOperator>(Cur)) 4035 for (unsigned Part = 0; Part < UF; ++Part) { 4036 // FIXME: Should not rely on getVPValue at this point. 4037 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4038 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4039 } 4040 4041 for (User *U : Cur->users()) { 4042 Instruction *UI = cast<Instruction>(U); 4043 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4044 Visited.insert(UI).second) 4045 Worklist.push_back(UI); 4046 } 4047 } 4048 } 4049 4050 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4051 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4052 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4053 // Some phis were already hand updated by the reduction and recurrence 4054 // code above, leave them alone. 4055 continue; 4056 4057 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4058 // Non-instruction incoming values will have only one value. 4059 4060 VPLane Lane = VPLane::getFirstLane(); 4061 if (isa<Instruction>(IncomingValue) && 4062 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4063 VF)) 4064 Lane = VPLane::getLastLaneForVF(VF); 4065 4066 // Can be a loop invariant incoming value or the last scalar value to be 4067 // extracted from the vectorized loop. 4068 // FIXME: Should not rely on getVPValue at this point. 4069 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4070 Value *lastIncomingValue = 4071 OrigLoop->isLoopInvariant(IncomingValue) 4072 ? IncomingValue 4073 : State.get(State.Plan->getVPValue(IncomingValue, true), 4074 VPIteration(UF - 1, Lane)); 4075 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4076 } 4077 } 4078 4079 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4080 // The basic block and loop containing the predicated instruction. 4081 auto *PredBB = PredInst->getParent(); 4082 auto *VectorLoop = LI->getLoopFor(PredBB); 4083 4084 // Initialize a worklist with the operands of the predicated instruction. 4085 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4086 4087 // Holds instructions that we need to analyze again. An instruction may be 4088 // reanalyzed if we don't yet know if we can sink it or not. 4089 SmallVector<Instruction *, 8> InstsToReanalyze; 4090 4091 // Returns true if a given use occurs in the predicated block. Phi nodes use 4092 // their operands in their corresponding predecessor blocks. 4093 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4094 auto *I = cast<Instruction>(U.getUser()); 4095 BasicBlock *BB = I->getParent(); 4096 if (auto *Phi = dyn_cast<PHINode>(I)) 4097 BB = Phi->getIncomingBlock( 4098 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4099 return BB == PredBB; 4100 }; 4101 4102 // Iteratively sink the scalarized operands of the predicated instruction 4103 // into the block we created for it. When an instruction is sunk, it's 4104 // operands are then added to the worklist. The algorithm ends after one pass 4105 // through the worklist doesn't sink a single instruction. 4106 bool Changed; 4107 do { 4108 // Add the instructions that need to be reanalyzed to the worklist, and 4109 // reset the changed indicator. 4110 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4111 InstsToReanalyze.clear(); 4112 Changed = false; 4113 4114 while (!Worklist.empty()) { 4115 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4116 4117 // We can't sink an instruction if it is a phi node, is not in the loop, 4118 // or may have side effects. 4119 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4120 I->mayHaveSideEffects()) 4121 continue; 4122 4123 // If the instruction is already in PredBB, check if we can sink its 4124 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4125 // sinking the scalar instruction I, hence it appears in PredBB; but it 4126 // may have failed to sink I's operands (recursively), which we try 4127 // (again) here. 4128 if (I->getParent() == PredBB) { 4129 Worklist.insert(I->op_begin(), I->op_end()); 4130 continue; 4131 } 4132 4133 // It's legal to sink the instruction if all its uses occur in the 4134 // predicated block. Otherwise, there's nothing to do yet, and we may 4135 // need to reanalyze the instruction. 4136 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4137 InstsToReanalyze.push_back(I); 4138 continue; 4139 } 4140 4141 // Move the instruction to the beginning of the predicated block, and add 4142 // it's operands to the worklist. 4143 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4144 Worklist.insert(I->op_begin(), I->op_end()); 4145 4146 // The sinking may have enabled other instructions to be sunk, so we will 4147 // need to iterate. 4148 Changed = true; 4149 } 4150 } while (Changed); 4151 } 4152 4153 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4154 for (PHINode *OrigPhi : OrigPHIsToFix) { 4155 VPWidenPHIRecipe *VPPhi = 4156 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4157 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4158 // Make sure the builder has a valid insert point. 4159 Builder.SetInsertPoint(NewPhi); 4160 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4161 VPValue *Inc = VPPhi->getIncomingValue(i); 4162 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4163 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4164 } 4165 } 4166 } 4167 4168 bool InnerLoopVectorizer::useOrderedReductions( 4169 const RecurrenceDescriptor &RdxDesc) { 4170 return Cost->useOrderedReductions(RdxDesc); 4171 } 4172 4173 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4174 VPWidenPHIRecipe *PhiR, 4175 VPTransformState &State) { 4176 assert(EnableVPlanNativePath && 4177 "Non-native vplans are not expected to have VPWidenPHIRecipes."); 4178 // Currently we enter here in the VPlan-native path for non-induction 4179 // PHIs where all control flow is uniform. We simply widen these PHIs. 4180 // Create a vector phi with no operands - the vector phi operands will be 4181 // set at the end of vector code generation. 4182 Type *VecTy = (State.VF.isScalar()) 4183 ? PN->getType() 4184 : VectorType::get(PN->getType(), State.VF); 4185 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4186 State.set(PhiR, VecPhi, 0); 4187 OrigPHIsToFix.push_back(cast<PHINode>(PN)); 4188 } 4189 4190 /// A helper function for checking whether an integer division-related 4191 /// instruction may divide by zero (in which case it must be predicated if 4192 /// executed conditionally in the scalar code). 4193 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4194 /// Non-zero divisors that are non compile-time constants will not be 4195 /// converted into multiplication, so we will still end up scalarizing 4196 /// the division, but can do so w/o predication. 4197 static bool mayDivideByZero(Instruction &I) { 4198 assert((I.getOpcode() == Instruction::UDiv || 4199 I.getOpcode() == Instruction::SDiv || 4200 I.getOpcode() == Instruction::URem || 4201 I.getOpcode() == Instruction::SRem) && 4202 "Unexpected instruction"); 4203 Value *Divisor = I.getOperand(1); 4204 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4205 return !CInt || CInt->isZero(); 4206 } 4207 4208 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4209 VPUser &ArgOperands, 4210 VPTransformState &State) { 4211 assert(!isa<DbgInfoIntrinsic>(I) && 4212 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4213 setDebugLocFromInst(&I); 4214 4215 Module *M = I.getParent()->getParent()->getParent(); 4216 auto *CI = cast<CallInst>(&I); 4217 4218 SmallVector<Type *, 4> Tys; 4219 for (Value *ArgOperand : CI->args()) 4220 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4221 4222 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4223 4224 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4225 // version of the instruction. 4226 // Is it beneficial to perform intrinsic call compared to lib call? 4227 bool NeedToScalarize = false; 4228 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4229 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4230 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4231 assert((UseVectorIntrinsic || !NeedToScalarize) && 4232 "Instruction should be scalarized elsewhere."); 4233 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4234 "Either the intrinsic cost or vector call cost must be valid"); 4235 4236 for (unsigned Part = 0; Part < UF; ++Part) { 4237 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 4238 SmallVector<Value *, 4> Args; 4239 for (auto &I : enumerate(ArgOperands.operands())) { 4240 // Some intrinsics have a scalar argument - don't replace it with a 4241 // vector. 4242 Value *Arg; 4243 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4244 Arg = State.get(I.value(), Part); 4245 else { 4246 Arg = State.get(I.value(), VPIteration(0, 0)); 4247 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 4248 TysForDecl.push_back(Arg->getType()); 4249 } 4250 Args.push_back(Arg); 4251 } 4252 4253 Function *VectorF; 4254 if (UseVectorIntrinsic) { 4255 // Use vector version of the intrinsic. 4256 if (VF.isVector()) 4257 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4258 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4259 assert(VectorF && "Can't retrieve vector intrinsic."); 4260 } else { 4261 // Use vector version of the function call. 4262 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4263 #ifndef NDEBUG 4264 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4265 "Can't create vector function."); 4266 #endif 4267 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4268 } 4269 SmallVector<OperandBundleDef, 1> OpBundles; 4270 CI->getOperandBundlesAsDefs(OpBundles); 4271 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4272 4273 if (isa<FPMathOperator>(V)) 4274 V->copyFastMathFlags(CI); 4275 4276 State.set(Def, V, Part); 4277 addMetadata(V, &I); 4278 } 4279 } 4280 4281 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4282 // We should not collect Scalars more than once per VF. Right now, this 4283 // function is called from collectUniformsAndScalars(), which already does 4284 // this check. Collecting Scalars for VF=1 does not make any sense. 4285 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4286 "This function should not be visited twice for the same VF"); 4287 4288 // This avoids any chances of creating a REPLICATE recipe during planning 4289 // since that would result in generation of scalarized code during execution, 4290 // which is not supported for scalable vectors. 4291 if (VF.isScalable()) { 4292 Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4293 return; 4294 } 4295 4296 SmallSetVector<Instruction *, 8> Worklist; 4297 4298 // These sets are used to seed the analysis with pointers used by memory 4299 // accesses that will remain scalar. 4300 SmallSetVector<Instruction *, 8> ScalarPtrs; 4301 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4302 auto *Latch = TheLoop->getLoopLatch(); 4303 4304 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4305 // The pointer operands of loads and stores will be scalar as long as the 4306 // memory access is not a gather or scatter operation. The value operand of a 4307 // store will remain scalar if the store is scalarized. 4308 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4309 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4310 assert(WideningDecision != CM_Unknown && 4311 "Widening decision should be ready at this moment"); 4312 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4313 if (Ptr == Store->getValueOperand()) 4314 return WideningDecision == CM_Scalarize; 4315 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4316 "Ptr is neither a value or pointer operand"); 4317 return WideningDecision != CM_GatherScatter; 4318 }; 4319 4320 // A helper that returns true if the given value is a bitcast or 4321 // getelementptr instruction contained in the loop. 4322 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4323 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4324 isa<GetElementPtrInst>(V)) && 4325 !TheLoop->isLoopInvariant(V); 4326 }; 4327 4328 // A helper that evaluates a memory access's use of a pointer. If the use will 4329 // be a scalar use and the pointer is only used by memory accesses, we place 4330 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4331 // PossibleNonScalarPtrs. 4332 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4333 // We only care about bitcast and getelementptr instructions contained in 4334 // the loop. 4335 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4336 return; 4337 4338 // If the pointer has already been identified as scalar (e.g., if it was 4339 // also identified as uniform), there's nothing to do. 4340 auto *I = cast<Instruction>(Ptr); 4341 if (Worklist.count(I)) 4342 return; 4343 4344 // If the use of the pointer will be a scalar use, and all users of the 4345 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4346 // place the pointer in PossibleNonScalarPtrs. 4347 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4348 return isa<LoadInst>(U) || isa<StoreInst>(U); 4349 })) 4350 ScalarPtrs.insert(I); 4351 else 4352 PossibleNonScalarPtrs.insert(I); 4353 }; 4354 4355 // We seed the scalars analysis with three classes of instructions: (1) 4356 // instructions marked uniform-after-vectorization and (2) bitcast, 4357 // getelementptr and (pointer) phi instructions used by memory accesses 4358 // requiring a scalar use. 4359 // 4360 // (1) Add to the worklist all instructions that have been identified as 4361 // uniform-after-vectorization. 4362 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4363 4364 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4365 // memory accesses requiring a scalar use. The pointer operands of loads and 4366 // stores will be scalar as long as the memory accesses is not a gather or 4367 // scatter operation. The value operand of a store will remain scalar if the 4368 // store is scalarized. 4369 for (auto *BB : TheLoop->blocks()) 4370 for (auto &I : *BB) { 4371 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4372 evaluatePtrUse(Load, Load->getPointerOperand()); 4373 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4374 evaluatePtrUse(Store, Store->getPointerOperand()); 4375 evaluatePtrUse(Store, Store->getValueOperand()); 4376 } 4377 } 4378 for (auto *I : ScalarPtrs) 4379 if (!PossibleNonScalarPtrs.count(I)) { 4380 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4381 Worklist.insert(I); 4382 } 4383 4384 // Insert the forced scalars. 4385 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4386 // induction variable when the PHI user is scalarized. 4387 auto ForcedScalar = ForcedScalars.find(VF); 4388 if (ForcedScalar != ForcedScalars.end()) 4389 for (auto *I : ForcedScalar->second) 4390 Worklist.insert(I); 4391 4392 // Expand the worklist by looking through any bitcasts and getelementptr 4393 // instructions we've already identified as scalar. This is similar to the 4394 // expansion step in collectLoopUniforms(); however, here we're only 4395 // expanding to include additional bitcasts and getelementptr instructions. 4396 unsigned Idx = 0; 4397 while (Idx != Worklist.size()) { 4398 Instruction *Dst = Worklist[Idx++]; 4399 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4400 continue; 4401 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4402 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4403 auto *J = cast<Instruction>(U); 4404 return !TheLoop->contains(J) || Worklist.count(J) || 4405 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4406 isScalarUse(J, Src)); 4407 })) { 4408 Worklist.insert(Src); 4409 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4410 } 4411 } 4412 4413 // An induction variable will remain scalar if all users of the induction 4414 // variable and induction variable update remain scalar. 4415 for (auto &Induction : Legal->getInductionVars()) { 4416 auto *Ind = Induction.first; 4417 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4418 4419 // If tail-folding is applied, the primary induction variable will be used 4420 // to feed a vector compare. 4421 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 4422 continue; 4423 4424 // Returns true if \p Indvar is a pointer induction that is used directly by 4425 // load/store instruction \p I. 4426 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar, 4427 Instruction *I) { 4428 return Induction.second.getKind() == 4429 InductionDescriptor::IK_PtrInduction && 4430 (isa<LoadInst>(I) || isa<StoreInst>(I)) && 4431 Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar); 4432 }; 4433 4434 // Determine if all users of the induction variable are scalar after 4435 // vectorization. 4436 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4437 auto *I = cast<Instruction>(U); 4438 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4439 IsDirectLoadStoreFromPtrIndvar(Ind, I); 4440 }); 4441 if (!ScalarInd) 4442 continue; 4443 4444 // Determine if all users of the induction variable update instruction are 4445 // scalar after vectorization. 4446 auto ScalarIndUpdate = 4447 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4448 auto *I = cast<Instruction>(U); 4449 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4450 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I); 4451 }); 4452 if (!ScalarIndUpdate) 4453 continue; 4454 4455 // The induction variable and its update instruction will remain scalar. 4456 Worklist.insert(Ind); 4457 Worklist.insert(IndUpdate); 4458 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4459 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4460 << "\n"); 4461 } 4462 4463 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4464 } 4465 4466 bool LoopVectorizationCostModel::isScalarWithPredication( 4467 Instruction *I, ElementCount VF) const { 4468 if (!blockNeedsPredicationForAnyReason(I->getParent())) 4469 return false; 4470 switch(I->getOpcode()) { 4471 default: 4472 break; 4473 case Instruction::Load: 4474 case Instruction::Store: { 4475 if (!Legal->isMaskRequired(I)) 4476 return false; 4477 auto *Ptr = getLoadStorePointerOperand(I); 4478 auto *Ty = getLoadStoreType(I); 4479 Type *VTy = Ty; 4480 if (VF.isVector()) 4481 VTy = VectorType::get(Ty, VF); 4482 const Align Alignment = getLoadStoreAlignment(I); 4483 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4484 TTI.isLegalMaskedGather(VTy, Alignment)) 4485 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4486 TTI.isLegalMaskedScatter(VTy, Alignment)); 4487 } 4488 case Instruction::UDiv: 4489 case Instruction::SDiv: 4490 case Instruction::SRem: 4491 case Instruction::URem: 4492 return mayDivideByZero(*I); 4493 } 4494 return false; 4495 } 4496 4497 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 4498 Instruction *I, ElementCount VF) { 4499 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4500 assert(getWideningDecision(I, VF) == CM_Unknown && 4501 "Decision should not be set yet."); 4502 auto *Group = getInterleavedAccessGroup(I); 4503 assert(Group && "Must have a group."); 4504 4505 // If the instruction's allocated size doesn't equal it's type size, it 4506 // requires padding and will be scalarized. 4507 auto &DL = I->getModule()->getDataLayout(); 4508 auto *ScalarTy = getLoadStoreType(I); 4509 if (hasIrregularType(ScalarTy, DL)) 4510 return false; 4511 4512 // If the group involves a non-integral pointer, we may not be able to 4513 // losslessly cast all values to a common type. 4514 unsigned InterleaveFactor = Group->getFactor(); 4515 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy); 4516 for (unsigned i = 0; i < InterleaveFactor; i++) { 4517 Instruction *Member = Group->getMember(i); 4518 if (!Member) 4519 continue; 4520 auto *MemberTy = getLoadStoreType(Member); 4521 bool MemberNI = DL.isNonIntegralPointerType(MemberTy); 4522 // Don't coerce non-integral pointers to integers or vice versa. 4523 if (MemberNI != ScalarNI) { 4524 // TODO: Consider adding special nullptr value case here 4525 return false; 4526 } else if (MemberNI && ScalarNI && 4527 ScalarTy->getPointerAddressSpace() != 4528 MemberTy->getPointerAddressSpace()) { 4529 return false; 4530 } 4531 } 4532 4533 // Check if masking is required. 4534 // A Group may need masking for one of two reasons: it resides in a block that 4535 // needs predication, or it was decided to use masking to deal with gaps 4536 // (either a gap at the end of a load-access that may result in a speculative 4537 // load, or any gaps in a store-access). 4538 bool PredicatedAccessRequiresMasking = 4539 blockNeedsPredicationForAnyReason(I->getParent()) && 4540 Legal->isMaskRequired(I); 4541 bool LoadAccessWithGapsRequiresEpilogMasking = 4542 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 4543 !isScalarEpilogueAllowed(); 4544 bool StoreAccessWithGapsRequiresMasking = 4545 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 4546 if (!PredicatedAccessRequiresMasking && 4547 !LoadAccessWithGapsRequiresEpilogMasking && 4548 !StoreAccessWithGapsRequiresMasking) 4549 return true; 4550 4551 // If masked interleaving is required, we expect that the user/target had 4552 // enabled it, because otherwise it either wouldn't have been created or 4553 // it should have been invalidated by the CostModel. 4554 assert(useMaskedInterleavedAccesses(TTI) && 4555 "Masked interleave-groups for predicated accesses are not enabled."); 4556 4557 if (Group->isReverse()) 4558 return false; 4559 4560 auto *Ty = getLoadStoreType(I); 4561 const Align Alignment = getLoadStoreAlignment(I); 4562 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 4563 : TTI.isLegalMaskedStore(Ty, Alignment); 4564 } 4565 4566 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 4567 Instruction *I, ElementCount VF) { 4568 // Get and ensure we have a valid memory instruction. 4569 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 4570 4571 auto *Ptr = getLoadStorePointerOperand(I); 4572 auto *ScalarTy = getLoadStoreType(I); 4573 4574 // In order to be widened, the pointer should be consecutive, first of all. 4575 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 4576 return false; 4577 4578 // If the instruction is a store located in a predicated block, it will be 4579 // scalarized. 4580 if (isScalarWithPredication(I, VF)) 4581 return false; 4582 4583 // If the instruction's allocated size doesn't equal it's type size, it 4584 // requires padding and will be scalarized. 4585 auto &DL = I->getModule()->getDataLayout(); 4586 if (hasIrregularType(ScalarTy, DL)) 4587 return false; 4588 4589 return true; 4590 } 4591 4592 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 4593 // We should not collect Uniforms more than once per VF. Right now, 4594 // this function is called from collectUniformsAndScalars(), which 4595 // already does this check. Collecting Uniforms for VF=1 does not make any 4596 // sense. 4597 4598 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 4599 "This function should not be visited twice for the same VF"); 4600 4601 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4602 // not analyze again. Uniforms.count(VF) will return 1. 4603 Uniforms[VF].clear(); 4604 4605 // We now know that the loop is vectorizable! 4606 // Collect instructions inside the loop that will remain uniform after 4607 // vectorization. 4608 4609 // Global values, params and instructions outside of current loop are out of 4610 // scope. 4611 auto isOutOfScope = [&](Value *V) -> bool { 4612 Instruction *I = dyn_cast<Instruction>(V); 4613 return (!I || !TheLoop->contains(I)); 4614 }; 4615 4616 // Worklist containing uniform instructions demanding lane 0. 4617 SetVector<Instruction *> Worklist; 4618 BasicBlock *Latch = TheLoop->getLoopLatch(); 4619 4620 // Add uniform instructions demanding lane 0 to the worklist. Instructions 4621 // that are scalar with predication must not be considered uniform after 4622 // vectorization, because that would create an erroneous replicating region 4623 // where only a single instance out of VF should be formed. 4624 // TODO: optimize such seldom cases if found important, see PR40816. 4625 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 4626 if (isOutOfScope(I)) { 4627 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 4628 << *I << "\n"); 4629 return; 4630 } 4631 if (isScalarWithPredication(I, VF)) { 4632 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 4633 << *I << "\n"); 4634 return; 4635 } 4636 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 4637 Worklist.insert(I); 4638 }; 4639 4640 // Start with the conditional branch. If the branch condition is an 4641 // instruction contained in the loop that is only used by the branch, it is 4642 // uniform. 4643 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4644 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 4645 addToWorklistIfAllowed(Cmp); 4646 4647 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 4648 InstWidening WideningDecision = getWideningDecision(I, VF); 4649 assert(WideningDecision != CM_Unknown && 4650 "Widening decision should be ready at this moment"); 4651 4652 // A uniform memory op is itself uniform. We exclude uniform stores 4653 // here as they demand the last lane, not the first one. 4654 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 4655 assert(WideningDecision == CM_Scalarize); 4656 return true; 4657 } 4658 4659 return (WideningDecision == CM_Widen || 4660 WideningDecision == CM_Widen_Reverse || 4661 WideningDecision == CM_Interleave); 4662 }; 4663 4664 4665 // Returns true if Ptr is the pointer operand of a memory access instruction 4666 // I, and I is known to not require scalarization. 4667 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4668 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4669 }; 4670 4671 // Holds a list of values which are known to have at least one uniform use. 4672 // Note that there may be other uses which aren't uniform. A "uniform use" 4673 // here is something which only demands lane 0 of the unrolled iterations; 4674 // it does not imply that all lanes produce the same value (e.g. this is not 4675 // the usual meaning of uniform) 4676 SetVector<Value *> HasUniformUse; 4677 4678 // Scan the loop for instructions which are either a) known to have only 4679 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 4680 for (auto *BB : TheLoop->blocks()) 4681 for (auto &I : *BB) { 4682 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 4683 switch (II->getIntrinsicID()) { 4684 case Intrinsic::sideeffect: 4685 case Intrinsic::experimental_noalias_scope_decl: 4686 case Intrinsic::assume: 4687 case Intrinsic::lifetime_start: 4688 case Intrinsic::lifetime_end: 4689 if (TheLoop->hasLoopInvariantOperands(&I)) 4690 addToWorklistIfAllowed(&I); 4691 break; 4692 default: 4693 break; 4694 } 4695 } 4696 4697 // ExtractValue instructions must be uniform, because the operands are 4698 // known to be loop-invariant. 4699 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 4700 assert(isOutOfScope(EVI->getAggregateOperand()) && 4701 "Expected aggregate value to be loop invariant"); 4702 addToWorklistIfAllowed(EVI); 4703 continue; 4704 } 4705 4706 // If there's no pointer operand, there's nothing to do. 4707 auto *Ptr = getLoadStorePointerOperand(&I); 4708 if (!Ptr) 4709 continue; 4710 4711 // A uniform memory op is itself uniform. We exclude uniform stores 4712 // here as they demand the last lane, not the first one. 4713 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 4714 addToWorklistIfAllowed(&I); 4715 4716 if (isUniformDecision(&I, VF)) { 4717 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 4718 HasUniformUse.insert(Ptr); 4719 } 4720 } 4721 4722 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 4723 // demanding) users. Since loops are assumed to be in LCSSA form, this 4724 // disallows uses outside the loop as well. 4725 for (auto *V : HasUniformUse) { 4726 if (isOutOfScope(V)) 4727 continue; 4728 auto *I = cast<Instruction>(V); 4729 auto UsersAreMemAccesses = 4730 llvm::all_of(I->users(), [&](User *U) -> bool { 4731 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 4732 }); 4733 if (UsersAreMemAccesses) 4734 addToWorklistIfAllowed(I); 4735 } 4736 4737 // Expand Worklist in topological order: whenever a new instruction 4738 // is added , its users should be already inside Worklist. It ensures 4739 // a uniform instruction will only be used by uniform instructions. 4740 unsigned idx = 0; 4741 while (idx != Worklist.size()) { 4742 Instruction *I = Worklist[idx++]; 4743 4744 for (auto OV : I->operand_values()) { 4745 // isOutOfScope operands cannot be uniform instructions. 4746 if (isOutOfScope(OV)) 4747 continue; 4748 // First order recurrence Phi's should typically be considered 4749 // non-uniform. 4750 auto *OP = dyn_cast<PHINode>(OV); 4751 if (OP && Legal->isFirstOrderRecurrence(OP)) 4752 continue; 4753 // If all the users of the operand are uniform, then add the 4754 // operand into the uniform worklist. 4755 auto *OI = cast<Instruction>(OV); 4756 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4757 auto *J = cast<Instruction>(U); 4758 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 4759 })) 4760 addToWorklistIfAllowed(OI); 4761 } 4762 } 4763 4764 // For an instruction to be added into Worklist above, all its users inside 4765 // the loop should also be in Worklist. However, this condition cannot be 4766 // true for phi nodes that form a cyclic dependence. We must process phi 4767 // nodes separately. An induction variable will remain uniform if all users 4768 // of the induction variable and induction variable update remain uniform. 4769 // The code below handles both pointer and non-pointer induction variables. 4770 for (auto &Induction : Legal->getInductionVars()) { 4771 auto *Ind = Induction.first; 4772 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4773 4774 // Determine if all users of the induction variable are uniform after 4775 // vectorization. 4776 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4777 auto *I = cast<Instruction>(U); 4778 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4779 isVectorizedMemAccessUse(I, Ind); 4780 }); 4781 if (!UniformInd) 4782 continue; 4783 4784 // Determine if all users of the induction variable update instruction are 4785 // uniform after vectorization. 4786 auto UniformIndUpdate = 4787 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4788 auto *I = cast<Instruction>(U); 4789 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4790 isVectorizedMemAccessUse(I, IndUpdate); 4791 }); 4792 if (!UniformIndUpdate) 4793 continue; 4794 4795 // The induction variable and its update instruction will remain uniform. 4796 addToWorklistIfAllowed(Ind); 4797 addToWorklistIfAllowed(IndUpdate); 4798 } 4799 4800 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4801 } 4802 4803 bool LoopVectorizationCostModel::runtimeChecksRequired() { 4804 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 4805 4806 if (Legal->getRuntimePointerChecking()->Need) { 4807 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 4808 "runtime pointer checks needed. Enable vectorization of this " 4809 "loop with '#pragma clang loop vectorize(enable)' when " 4810 "compiling with -Os/-Oz", 4811 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4812 return true; 4813 } 4814 4815 if (!PSE.getPredicate().isAlwaysTrue()) { 4816 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 4817 "runtime SCEV checks needed. Enable vectorization of this " 4818 "loop with '#pragma clang loop vectorize(enable)' when " 4819 "compiling with -Os/-Oz", 4820 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4821 return true; 4822 } 4823 4824 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4825 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4826 reportVectorizationFailure("Runtime stride check for small trip count", 4827 "runtime stride == 1 checks needed. Enable vectorization of " 4828 "this loop without such check by compiling with -Os/-Oz", 4829 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4830 return true; 4831 } 4832 4833 return false; 4834 } 4835 4836 ElementCount 4837 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 4838 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 4839 return ElementCount::getScalable(0); 4840 4841 if (Hints->isScalableVectorizationDisabled()) { 4842 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 4843 "ScalableVectorizationDisabled", ORE, TheLoop); 4844 return ElementCount::getScalable(0); 4845 } 4846 4847 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 4848 4849 auto MaxScalableVF = ElementCount::getScalable( 4850 std::numeric_limits<ElementCount::ScalarTy>::max()); 4851 4852 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 4853 // FIXME: While for scalable vectors this is currently sufficient, this should 4854 // be replaced by a more detailed mechanism that filters out specific VFs, 4855 // instead of invalidating vectorization for a whole set of VFs based on the 4856 // MaxVF. 4857 4858 // Disable scalable vectorization if the loop contains unsupported reductions. 4859 if (!canVectorizeReductions(MaxScalableVF)) { 4860 reportVectorizationInfo( 4861 "Scalable vectorization not supported for the reduction " 4862 "operations found in this loop.", 4863 "ScalableVFUnfeasible", ORE, TheLoop); 4864 return ElementCount::getScalable(0); 4865 } 4866 4867 // Disable scalable vectorization if the loop contains any instructions 4868 // with element types not supported for scalable vectors. 4869 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 4870 return !Ty->isVoidTy() && 4871 !this->TTI.isElementTypeLegalForScalableVector(Ty); 4872 })) { 4873 reportVectorizationInfo("Scalable vectorization is not supported " 4874 "for all element types found in this loop.", 4875 "ScalableVFUnfeasible", ORE, TheLoop); 4876 return ElementCount::getScalable(0); 4877 } 4878 4879 if (Legal->isSafeForAnyVectorWidth()) 4880 return MaxScalableVF; 4881 4882 // Limit MaxScalableVF by the maximum safe dependence distance. 4883 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 4884 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) 4885 MaxVScale = 4886 TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax(); 4887 MaxScalableVF = ElementCount::getScalable( 4888 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 4889 if (!MaxScalableVF) 4890 reportVectorizationInfo( 4891 "Max legal vector width too small, scalable vectorization " 4892 "unfeasible.", 4893 "ScalableVFUnfeasible", ORE, TheLoop); 4894 4895 return MaxScalableVF; 4896 } 4897 4898 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF( 4899 unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) { 4900 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4901 unsigned SmallestType, WidestType; 4902 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 4903 4904 // Get the maximum safe dependence distance in bits computed by LAA. 4905 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 4906 // the memory accesses that is most restrictive (involved in the smallest 4907 // dependence distance). 4908 unsigned MaxSafeElements = 4909 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 4910 4911 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 4912 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 4913 4914 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 4915 << ".\n"); 4916 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 4917 << ".\n"); 4918 4919 // First analyze the UserVF, fall back if the UserVF should be ignored. 4920 if (UserVF) { 4921 auto MaxSafeUserVF = 4922 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 4923 4924 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 4925 // If `VF=vscale x N` is safe, then so is `VF=N` 4926 if (UserVF.isScalable()) 4927 return FixedScalableVFPair( 4928 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 4929 else 4930 return UserVF; 4931 } 4932 4933 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 4934 4935 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 4936 // is better to ignore the hint and let the compiler choose a suitable VF. 4937 if (!UserVF.isScalable()) { 4938 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4939 << " is unsafe, clamping to max safe VF=" 4940 << MaxSafeFixedVF << ".\n"); 4941 ORE->emit([&]() { 4942 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4943 TheLoop->getStartLoc(), 4944 TheLoop->getHeader()) 4945 << "User-specified vectorization factor " 4946 << ore::NV("UserVectorizationFactor", UserVF) 4947 << " is unsafe, clamping to maximum safe vectorization factor " 4948 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 4949 }); 4950 return MaxSafeFixedVF; 4951 } 4952 4953 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 4954 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4955 << " is ignored because scalable vectors are not " 4956 "available.\n"); 4957 ORE->emit([&]() { 4958 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4959 TheLoop->getStartLoc(), 4960 TheLoop->getHeader()) 4961 << "User-specified vectorization factor " 4962 << ore::NV("UserVectorizationFactor", UserVF) 4963 << " is ignored because the target does not support scalable " 4964 "vectors. The compiler will pick a more suitable value."; 4965 }); 4966 } else { 4967 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4968 << " is unsafe. Ignoring scalable UserVF.\n"); 4969 ORE->emit([&]() { 4970 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4971 TheLoop->getStartLoc(), 4972 TheLoop->getHeader()) 4973 << "User-specified vectorization factor " 4974 << ore::NV("UserVectorizationFactor", UserVF) 4975 << " is unsafe. Ignoring the hint to let the compiler pick a " 4976 "more suitable value."; 4977 }); 4978 } 4979 } 4980 4981 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 4982 << " / " << WidestType << " bits.\n"); 4983 4984 FixedScalableVFPair Result(ElementCount::getFixed(1), 4985 ElementCount::getScalable(0)); 4986 if (auto MaxVF = 4987 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 4988 MaxSafeFixedVF, FoldTailByMasking)) 4989 Result.FixedVF = MaxVF; 4990 4991 if (auto MaxVF = 4992 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 4993 MaxSafeScalableVF, FoldTailByMasking)) 4994 if (MaxVF.isScalable()) { 4995 Result.ScalableVF = MaxVF; 4996 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 4997 << "\n"); 4998 } 4999 5000 return Result; 5001 } 5002 5003 FixedScalableVFPair 5004 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5005 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5006 // TODO: It may by useful to do since it's still likely to be dynamically 5007 // uniform if the target can skip. 5008 reportVectorizationFailure( 5009 "Not inserting runtime ptr check for divergent target", 5010 "runtime pointer checks needed. Not enabled for divergent target", 5011 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5012 return FixedScalableVFPair::getNone(); 5013 } 5014 5015 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5016 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5017 if (TC == 1) { 5018 reportVectorizationFailure("Single iteration (non) loop", 5019 "loop trip count is one, irrelevant for vectorization", 5020 "SingleIterationLoop", ORE, TheLoop); 5021 return FixedScalableVFPair::getNone(); 5022 } 5023 5024 switch (ScalarEpilogueStatus) { 5025 case CM_ScalarEpilogueAllowed: 5026 return computeFeasibleMaxVF(TC, UserVF, false); 5027 case CM_ScalarEpilogueNotAllowedUsePredicate: 5028 LLVM_FALLTHROUGH; 5029 case CM_ScalarEpilogueNotNeededUsePredicate: 5030 LLVM_DEBUG( 5031 dbgs() << "LV: vector predicate hint/switch found.\n" 5032 << "LV: Not allowing scalar epilogue, creating predicated " 5033 << "vector loop.\n"); 5034 break; 5035 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5036 // fallthrough as a special case of OptForSize 5037 case CM_ScalarEpilogueNotAllowedOptSize: 5038 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5039 LLVM_DEBUG( 5040 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5041 else 5042 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5043 << "count.\n"); 5044 5045 // Bail if runtime checks are required, which are not good when optimising 5046 // for size. 5047 if (runtimeChecksRequired()) 5048 return FixedScalableVFPair::getNone(); 5049 5050 break; 5051 } 5052 5053 // The only loops we can vectorize without a scalar epilogue, are loops with 5054 // a bottom-test and a single exiting block. We'd have to handle the fact 5055 // that not every instruction executes on the last iteration. This will 5056 // require a lane mask which varies through the vector loop body. (TODO) 5057 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5058 // If there was a tail-folding hint/switch, but we can't fold the tail by 5059 // masking, fallback to a vectorization with a scalar epilogue. 5060 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5061 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5062 "scalar epilogue instead.\n"); 5063 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5064 return computeFeasibleMaxVF(TC, UserVF, false); 5065 } 5066 return FixedScalableVFPair::getNone(); 5067 } 5068 5069 // Now try the tail folding 5070 5071 // Invalidate interleave groups that require an epilogue if we can't mask 5072 // the interleave-group. 5073 if (!useMaskedInterleavedAccesses(TTI)) { 5074 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5075 "No decisions should have been taken at this point"); 5076 // Note: There is no need to invalidate any cost modeling decisions here, as 5077 // non where taken so far. 5078 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5079 } 5080 5081 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true); 5082 // Avoid tail folding if the trip count is known to be a multiple of any VF 5083 // we chose. 5084 // FIXME: The condition below pessimises the case for fixed-width vectors, 5085 // when scalable VFs are also candidates for vectorization. 5086 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5087 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5088 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5089 "MaxFixedVF must be a power of 2"); 5090 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5091 : MaxFixedVF.getFixedValue(); 5092 ScalarEvolution *SE = PSE.getSE(); 5093 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5094 const SCEV *ExitCount = SE->getAddExpr( 5095 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5096 const SCEV *Rem = SE->getURemExpr( 5097 SE->applyLoopGuards(ExitCount, TheLoop), 5098 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5099 if (Rem->isZero()) { 5100 // Accept MaxFixedVF if we do not have a tail. 5101 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5102 return MaxFactors; 5103 } 5104 } 5105 5106 // For scalable vectors don't use tail folding for low trip counts or 5107 // optimizing for code size. We only permit this if the user has explicitly 5108 // requested it. 5109 if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate && 5110 ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate && 5111 MaxFactors.ScalableVF.isVector()) 5112 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5113 5114 // If we don't know the precise trip count, or if the trip count that we 5115 // found modulo the vectorization factor is not zero, try to fold the tail 5116 // by masking. 5117 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5118 if (Legal->prepareToFoldTailByMasking()) { 5119 FoldTailByMasking = true; 5120 return MaxFactors; 5121 } 5122 5123 // If there was a tail-folding hint/switch, but we can't fold the tail by 5124 // masking, fallback to a vectorization with a scalar epilogue. 5125 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5126 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5127 "scalar epilogue instead.\n"); 5128 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5129 return MaxFactors; 5130 } 5131 5132 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5133 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5134 return FixedScalableVFPair::getNone(); 5135 } 5136 5137 if (TC == 0) { 5138 reportVectorizationFailure( 5139 "Unable to calculate the loop count due to complex control flow", 5140 "unable to calculate the loop count due to complex control flow", 5141 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5142 return FixedScalableVFPair::getNone(); 5143 } 5144 5145 reportVectorizationFailure( 5146 "Cannot optimize for size and vectorize at the same time.", 5147 "cannot optimize for size and vectorize at the same time. " 5148 "Enable vectorization of this loop with '#pragma clang loop " 5149 "vectorize(enable)' when compiling with -Os/-Oz", 5150 "NoTailLoopWithOptForSize", ORE, TheLoop); 5151 return FixedScalableVFPair::getNone(); 5152 } 5153 5154 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5155 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5156 const ElementCount &MaxSafeVF, bool FoldTailByMasking) { 5157 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5158 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5159 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5160 : TargetTransformInfo::RGK_FixedWidthVector); 5161 5162 // Convenience function to return the minimum of two ElementCounts. 5163 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5164 assert((LHS.isScalable() == RHS.isScalable()) && 5165 "Scalable flags must match"); 5166 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5167 }; 5168 5169 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5170 // Note that both WidestRegister and WidestType may not be a powers of 2. 5171 auto MaxVectorElementCount = ElementCount::get( 5172 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5173 ComputeScalableMaxVF); 5174 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5175 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5176 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5177 5178 if (!MaxVectorElementCount) { 5179 LLVM_DEBUG(dbgs() << "LV: The target has no " 5180 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5181 << " vector registers.\n"); 5182 return ElementCount::getFixed(1); 5183 } 5184 5185 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5186 if (ConstTripCount && 5187 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5188 (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) { 5189 // If loop trip count (TC) is known at compile time there is no point in 5190 // choosing VF greater than TC (as done in the loop below). Select maximum 5191 // power of two which doesn't exceed TC. 5192 // If MaxVectorElementCount is scalable, we only fall back on a fixed VF 5193 // when the TC is less than or equal to the known number of lanes. 5194 auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount); 5195 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not " 5196 "exceeding the constant trip count: " 5197 << ClampedConstTripCount << "\n"); 5198 return ElementCount::getFixed(ClampedConstTripCount); 5199 } 5200 5201 ElementCount MaxVF = MaxVectorElementCount; 5202 if (MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 && 5203 TTI.shouldMaximizeVectorBandwidth())) { 5204 auto MaxVectorElementCountMaxBW = ElementCount::get( 5205 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5206 ComputeScalableMaxVF); 5207 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5208 5209 // Collect all viable vectorization factors larger than the default MaxVF 5210 // (i.e. MaxVectorElementCount). 5211 SmallVector<ElementCount, 8> VFs; 5212 for (ElementCount VS = MaxVectorElementCount * 2; 5213 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5214 VFs.push_back(VS); 5215 5216 // For each VF calculate its register usage. 5217 auto RUs = calculateRegisterUsage(VFs); 5218 5219 // Select the largest VF which doesn't require more registers than existing 5220 // ones. 5221 for (int i = RUs.size() - 1; i >= 0; --i) { 5222 bool Selected = true; 5223 for (auto &pair : RUs[i].MaxLocalUsers) { 5224 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5225 if (pair.second > TargetNumRegisters) 5226 Selected = false; 5227 } 5228 if (Selected) { 5229 MaxVF = VFs[i]; 5230 break; 5231 } 5232 } 5233 if (ElementCount MinVF = 5234 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5235 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5236 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5237 << ") with target's minimum: " << MinVF << '\n'); 5238 MaxVF = MinVF; 5239 } 5240 } 5241 5242 // Invalidate any widening decisions we might have made, in case the loop 5243 // requires prediction (decided later), but we have already made some 5244 // load/store widening decisions. 5245 invalidateCostModelingDecisions(); 5246 } 5247 return MaxVF; 5248 } 5249 5250 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const { 5251 if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) { 5252 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange); 5253 auto Min = Attr.getVScaleRangeMin(); 5254 auto Max = Attr.getVScaleRangeMax(); 5255 if (Max && Min == Max) 5256 return Max; 5257 } 5258 5259 return TTI.getVScaleForTuning(); 5260 } 5261 5262 bool LoopVectorizationCostModel::isMoreProfitable( 5263 const VectorizationFactor &A, const VectorizationFactor &B) const { 5264 InstructionCost CostA = A.Cost; 5265 InstructionCost CostB = B.Cost; 5266 5267 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 5268 5269 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 5270 MaxTripCount) { 5271 // If we are folding the tail and the trip count is a known (possibly small) 5272 // constant, the trip count will be rounded up to an integer number of 5273 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 5274 // which we compare directly. When not folding the tail, the total cost will 5275 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 5276 // approximated with the per-lane cost below instead of using the tripcount 5277 // as here. 5278 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 5279 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 5280 return RTCostA < RTCostB; 5281 } 5282 5283 // Improve estimate for the vector width if it is scalable. 5284 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 5285 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 5286 if (Optional<unsigned> VScale = getVScaleForTuning()) { 5287 if (A.Width.isScalable()) 5288 EstimatedWidthA *= VScale.getValue(); 5289 if (B.Width.isScalable()) 5290 EstimatedWidthB *= VScale.getValue(); 5291 } 5292 5293 // Assume vscale may be larger than 1 (or the value being tuned for), 5294 // so that scalable vectorization is slightly favorable over fixed-width 5295 // vectorization. 5296 if (A.Width.isScalable() && !B.Width.isScalable()) 5297 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 5298 5299 // To avoid the need for FP division: 5300 // (CostA / A.Width) < (CostB / B.Width) 5301 // <=> (CostA * B.Width) < (CostB * A.Width) 5302 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 5303 } 5304 5305 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 5306 const ElementCountSet &VFCandidates) { 5307 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5308 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5309 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5310 assert(VFCandidates.count(ElementCount::getFixed(1)) && 5311 "Expected Scalar VF to be a candidate"); 5312 5313 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 5314 VectorizationFactor ChosenFactor = ScalarCost; 5315 5316 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5317 if (ForceVectorization && VFCandidates.size() > 1) { 5318 // Ignore scalar width, because the user explicitly wants vectorization. 5319 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5320 // evaluation. 5321 ChosenFactor.Cost = InstructionCost::getMax(); 5322 } 5323 5324 SmallVector<InstructionVFPair> InvalidCosts; 5325 for (const auto &i : VFCandidates) { 5326 // The cost for scalar VF=1 is already calculated, so ignore it. 5327 if (i.isScalar()) 5328 continue; 5329 5330 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 5331 VectorizationFactor Candidate(i, C.first); 5332 5333 #ifndef NDEBUG 5334 unsigned AssumedMinimumVscale = 1; 5335 if (Optional<unsigned> VScale = getVScaleForTuning()) 5336 AssumedMinimumVscale = VScale.getValue(); 5337 unsigned Width = 5338 Candidate.Width.isScalable() 5339 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 5340 : Candidate.Width.getFixedValue(); 5341 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5342 << " costs: " << (Candidate.Cost / Width)); 5343 if (i.isScalable()) 5344 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 5345 << AssumedMinimumVscale << ")"); 5346 LLVM_DEBUG(dbgs() << ".\n"); 5347 #endif 5348 5349 if (!C.second && !ForceVectorization) { 5350 LLVM_DEBUG( 5351 dbgs() << "LV: Not considering vector loop of width " << i 5352 << " because it will not generate any vector instructions.\n"); 5353 continue; 5354 } 5355 5356 // If profitable add it to ProfitableVF list. 5357 if (isMoreProfitable(Candidate, ScalarCost)) 5358 ProfitableVFs.push_back(Candidate); 5359 5360 if (isMoreProfitable(Candidate, ChosenFactor)) 5361 ChosenFactor = Candidate; 5362 } 5363 5364 // Emit a report of VFs with invalid costs in the loop. 5365 if (!InvalidCosts.empty()) { 5366 // Group the remarks per instruction, keeping the instruction order from 5367 // InvalidCosts. 5368 std::map<Instruction *, unsigned> Numbering; 5369 unsigned I = 0; 5370 for (auto &Pair : InvalidCosts) 5371 if (!Numbering.count(Pair.first)) 5372 Numbering[Pair.first] = I++; 5373 5374 // Sort the list, first on instruction(number) then on VF. 5375 llvm::sort(InvalidCosts, 5376 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 5377 if (Numbering[A.first] != Numbering[B.first]) 5378 return Numbering[A.first] < Numbering[B.first]; 5379 ElementCountComparator ECC; 5380 return ECC(A.second, B.second); 5381 }); 5382 5383 // For a list of ordered instruction-vf pairs: 5384 // [(load, vf1), (load, vf2), (store, vf1)] 5385 // Group the instructions together to emit separate remarks for: 5386 // load (vf1, vf2) 5387 // store (vf1) 5388 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 5389 auto Subset = ArrayRef<InstructionVFPair>(); 5390 do { 5391 if (Subset.empty()) 5392 Subset = Tail.take_front(1); 5393 5394 Instruction *I = Subset.front().first; 5395 5396 // If the next instruction is different, or if there are no other pairs, 5397 // emit a remark for the collated subset. e.g. 5398 // [(load, vf1), (load, vf2))] 5399 // to emit: 5400 // remark: invalid costs for 'load' at VF=(vf, vf2) 5401 if (Subset == Tail || Tail[Subset.size()].first != I) { 5402 std::string OutString; 5403 raw_string_ostream OS(OutString); 5404 assert(!Subset.empty() && "Unexpected empty range"); 5405 OS << "Instruction with invalid costs prevented vectorization at VF=("; 5406 for (auto &Pair : Subset) 5407 OS << (Pair.second == Subset.front().second ? "" : ", ") 5408 << Pair.second; 5409 OS << "):"; 5410 if (auto *CI = dyn_cast<CallInst>(I)) 5411 OS << " call to " << CI->getCalledFunction()->getName(); 5412 else 5413 OS << " " << I->getOpcodeName(); 5414 OS.flush(); 5415 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 5416 Tail = Tail.drop_front(Subset.size()); 5417 Subset = {}; 5418 } else 5419 // Grow the subset by one element 5420 Subset = Tail.take_front(Subset.size() + 1); 5421 } while (!Tail.empty()); 5422 } 5423 5424 if (!EnableCondStoresVectorization && NumPredStores) { 5425 reportVectorizationFailure("There are conditional stores.", 5426 "store that is conditionally executed prevents vectorization", 5427 "ConditionalStore", ORE, TheLoop); 5428 ChosenFactor = ScalarCost; 5429 } 5430 5431 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 5432 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 5433 << "LV: Vectorization seems to be not beneficial, " 5434 << "but was forced by a user.\n"); 5435 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 5436 return ChosenFactor; 5437 } 5438 5439 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5440 const Loop &L, ElementCount VF) const { 5441 // Cross iteration phis such as reductions need special handling and are 5442 // currently unsupported. 5443 if (any_of(L.getHeader()->phis(), 5444 [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); })) 5445 return false; 5446 5447 // Phis with uses outside of the loop require special handling and are 5448 // currently unsupported. 5449 for (auto &Entry : Legal->getInductionVars()) { 5450 // Look for uses of the value of the induction at the last iteration. 5451 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5452 for (User *U : PostInc->users()) 5453 if (!L.contains(cast<Instruction>(U))) 5454 return false; 5455 // Look for uses of penultimate value of the induction. 5456 for (User *U : Entry.first->users()) 5457 if (!L.contains(cast<Instruction>(U))) 5458 return false; 5459 } 5460 5461 // Induction variables that are widened require special handling that is 5462 // currently not supported. 5463 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5464 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5465 this->isProfitableToScalarize(Entry.first, VF)); 5466 })) 5467 return false; 5468 5469 // Epilogue vectorization code has not been auditted to ensure it handles 5470 // non-latch exits properly. It may be fine, but it needs auditted and 5471 // tested. 5472 if (L.getExitingBlock() != L.getLoopLatch()) 5473 return false; 5474 5475 return true; 5476 } 5477 5478 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5479 const ElementCount VF) const { 5480 // FIXME: We need a much better cost-model to take different parameters such 5481 // as register pressure, code size increase and cost of extra branches into 5482 // account. For now we apply a very crude heuristic and only consider loops 5483 // with vectorization factors larger than a certain value. 5484 // We also consider epilogue vectorization unprofitable for targets that don't 5485 // consider interleaving beneficial (eg. MVE). 5486 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5487 return false; 5488 // FIXME: We should consider changing the threshold for scalable 5489 // vectors to take VScaleForTuning into account. 5490 if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF) 5491 return true; 5492 return false; 5493 } 5494 5495 VectorizationFactor 5496 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5497 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5498 VectorizationFactor Result = VectorizationFactor::Disabled(); 5499 if (!EnableEpilogueVectorization) { 5500 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5501 return Result; 5502 } 5503 5504 if (!isScalarEpilogueAllowed()) { 5505 LLVM_DEBUG( 5506 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5507 "allowed.\n";); 5508 return Result; 5509 } 5510 5511 // Not really a cost consideration, but check for unsupported cases here to 5512 // simplify the logic. 5513 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5514 LLVM_DEBUG( 5515 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5516 "not a supported candidate.\n";); 5517 return Result; 5518 } 5519 5520 if (EpilogueVectorizationForceVF > 1) { 5521 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5522 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 5523 if (LVP.hasPlanWithVF(ForcedEC)) 5524 return {ForcedEC, 0}; 5525 else { 5526 LLVM_DEBUG( 5527 dbgs() 5528 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5529 return Result; 5530 } 5531 } 5532 5533 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5534 TheLoop->getHeader()->getParent()->hasMinSize()) { 5535 LLVM_DEBUG( 5536 dbgs() 5537 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5538 return Result; 5539 } 5540 5541 if (!isEpilogueVectorizationProfitable(MainLoopVF)) { 5542 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 5543 "this loop\n"); 5544 return Result; 5545 } 5546 5547 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know 5548 // the main loop handles 8 lanes per iteration. We could still benefit from 5549 // vectorizing the epilogue loop with VF=4. 5550 ElementCount EstimatedRuntimeVF = MainLoopVF; 5551 if (MainLoopVF.isScalable()) { 5552 EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 5553 if (Optional<unsigned> VScale = getVScaleForTuning()) 5554 EstimatedRuntimeVF *= VScale.getValue(); 5555 } 5556 5557 for (auto &NextVF : ProfitableVFs) 5558 if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() && 5559 ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) || 5560 ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) && 5561 (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) && 5562 LVP.hasPlanWithVF(NextVF.Width)) 5563 Result = NextVF; 5564 5565 if (Result != VectorizationFactor::Disabled()) 5566 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5567 << Result.Width << "\n";); 5568 return Result; 5569 } 5570 5571 std::pair<unsigned, unsigned> 5572 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5573 unsigned MinWidth = -1U; 5574 unsigned MaxWidth = 8; 5575 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5576 // For in-loop reductions, no element types are added to ElementTypesInLoop 5577 // if there are no loads/stores in the loop. In this case, check through the 5578 // reduction variables to determine the maximum width. 5579 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) { 5580 // Reset MaxWidth so that we can find the smallest type used by recurrences 5581 // in the loop. 5582 MaxWidth = -1U; 5583 for (auto &PhiDescriptorPair : Legal->getReductionVars()) { 5584 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second; 5585 // When finding the min width used by the recurrence we need to account 5586 // for casts on the input operands of the recurrence. 5587 MaxWidth = std::min<unsigned>( 5588 MaxWidth, std::min<unsigned>( 5589 RdxDesc.getMinWidthCastToRecurrenceTypeInBits(), 5590 RdxDesc.getRecurrenceType()->getScalarSizeInBits())); 5591 } 5592 } else { 5593 for (Type *T : ElementTypesInLoop) { 5594 MinWidth = std::min<unsigned>( 5595 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5596 MaxWidth = std::max<unsigned>( 5597 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5598 } 5599 } 5600 return {MinWidth, MaxWidth}; 5601 } 5602 5603 void LoopVectorizationCostModel::collectElementTypesForWidening() { 5604 ElementTypesInLoop.clear(); 5605 // For each block. 5606 for (BasicBlock *BB : TheLoop->blocks()) { 5607 // For each instruction in the loop. 5608 for (Instruction &I : BB->instructionsWithoutDebug()) { 5609 Type *T = I.getType(); 5610 5611 // Skip ignored values. 5612 if (ValuesToIgnore.count(&I)) 5613 continue; 5614 5615 // Only examine Loads, Stores and PHINodes. 5616 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5617 continue; 5618 5619 // Examine PHI nodes that are reduction variables. Update the type to 5620 // account for the recurrence type. 5621 if (auto *PN = dyn_cast<PHINode>(&I)) { 5622 if (!Legal->isReductionVariable(PN)) 5623 continue; 5624 const RecurrenceDescriptor &RdxDesc = 5625 Legal->getReductionVars().find(PN)->second; 5626 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 5627 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 5628 RdxDesc.getRecurrenceType(), 5629 TargetTransformInfo::ReductionFlags())) 5630 continue; 5631 T = RdxDesc.getRecurrenceType(); 5632 } 5633 5634 // Examine the stored values. 5635 if (auto *ST = dyn_cast<StoreInst>(&I)) 5636 T = ST->getValueOperand()->getType(); 5637 5638 assert(T->isSized() && 5639 "Expected the load/store/recurrence type to be sized"); 5640 5641 ElementTypesInLoop.insert(T); 5642 } 5643 } 5644 } 5645 5646 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 5647 unsigned LoopCost) { 5648 // -- The interleave heuristics -- 5649 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5650 // There are many micro-architectural considerations that we can't predict 5651 // at this level. For example, frontend pressure (on decode or fetch) due to 5652 // code size, or the number and capabilities of the execution ports. 5653 // 5654 // We use the following heuristics to select the interleave count: 5655 // 1. If the code has reductions, then we interleave to break the cross 5656 // iteration dependency. 5657 // 2. If the loop is really small, then we interleave to reduce the loop 5658 // overhead. 5659 // 3. We don't interleave if we think that we will spill registers to memory 5660 // due to the increased register pressure. 5661 5662 if (!isScalarEpilogueAllowed()) 5663 return 1; 5664 5665 // We used the distance for the interleave count. 5666 if (Legal->getMaxSafeDepDistBytes() != -1U) 5667 return 1; 5668 5669 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 5670 const bool HasReductions = !Legal->getReductionVars().empty(); 5671 // Do not interleave loops with a relatively small known or estimated trip 5672 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 5673 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 5674 // because with the above conditions interleaving can expose ILP and break 5675 // cross iteration dependences for reductions. 5676 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 5677 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 5678 return 1; 5679 5680 // If we did not calculate the cost for VF (because the user selected the VF) 5681 // then we calculate the cost of VF here. 5682 if (LoopCost == 0) { 5683 InstructionCost C = expectedCost(VF).first; 5684 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 5685 LoopCost = *C.getValue(); 5686 5687 // Loop body is free and there is no need for interleaving. 5688 if (LoopCost == 0) 5689 return 1; 5690 } 5691 5692 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5693 // We divide by these constants so assume that we have at least one 5694 // instruction that uses at least one register. 5695 for (auto& pair : R.MaxLocalUsers) { 5696 pair.second = std::max(pair.second, 1U); 5697 } 5698 5699 // We calculate the interleave count using the following formula. 5700 // Subtract the number of loop invariants from the number of available 5701 // registers. These registers are used by all of the interleaved instances. 5702 // Next, divide the remaining registers by the number of registers that is 5703 // required by the loop, in order to estimate how many parallel instances 5704 // fit without causing spills. All of this is rounded down if necessary to be 5705 // a power of two. We want power of two interleave count to simplify any 5706 // addressing operations or alignment considerations. 5707 // We also want power of two interleave counts to ensure that the induction 5708 // variable of the vector loop wraps to zero, when tail is folded by masking; 5709 // this currently happens when OptForSize, in which case IC is set to 1 above. 5710 unsigned IC = UINT_MAX; 5711 5712 for (auto& pair : R.MaxLocalUsers) { 5713 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5714 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5715 << " registers of " 5716 << TTI.getRegisterClassName(pair.first) << " register class\n"); 5717 if (VF.isScalar()) { 5718 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5719 TargetNumRegisters = ForceTargetNumScalarRegs; 5720 } else { 5721 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5722 TargetNumRegisters = ForceTargetNumVectorRegs; 5723 } 5724 unsigned MaxLocalUsers = pair.second; 5725 unsigned LoopInvariantRegs = 0; 5726 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 5727 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 5728 5729 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 5730 // Don't count the induction variable as interleaved. 5731 if (EnableIndVarRegisterHeur) { 5732 TmpIC = 5733 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 5734 std::max(1U, (MaxLocalUsers - 1))); 5735 } 5736 5737 IC = std::min(IC, TmpIC); 5738 } 5739 5740 // Clamp the interleave ranges to reasonable counts. 5741 unsigned MaxInterleaveCount = 5742 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 5743 5744 // Check if the user has overridden the max. 5745 if (VF.isScalar()) { 5746 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5747 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5748 } else { 5749 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5750 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5751 } 5752 5753 // If trip count is known or estimated compile time constant, limit the 5754 // interleave count to be less than the trip count divided by VF, provided it 5755 // is at least 1. 5756 // 5757 // For scalable vectors we can't know if interleaving is beneficial. It may 5758 // not be beneficial for small loops if none of the lanes in the second vector 5759 // iterations is enabled. However, for larger loops, there is likely to be a 5760 // similar benefit as for fixed-width vectors. For now, we choose to leave 5761 // the InterleaveCount as if vscale is '1', although if some information about 5762 // the vector is known (e.g. min vector size), we can make a better decision. 5763 if (BestKnownTC) { 5764 MaxInterleaveCount = 5765 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 5766 // Make sure MaxInterleaveCount is greater than 0. 5767 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 5768 } 5769 5770 assert(MaxInterleaveCount > 0 && 5771 "Maximum interleave count must be greater than 0"); 5772 5773 // Clamp the calculated IC to be between the 1 and the max interleave count 5774 // that the target and trip count allows. 5775 if (IC > MaxInterleaveCount) 5776 IC = MaxInterleaveCount; 5777 else 5778 // Make sure IC is greater than 0. 5779 IC = std::max(1u, IC); 5780 5781 assert(IC > 0 && "Interleave count must be greater than 0."); 5782 5783 // Interleave if we vectorized this loop and there is a reduction that could 5784 // benefit from interleaving. 5785 if (VF.isVector() && HasReductions) { 5786 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5787 return IC; 5788 } 5789 5790 // For any scalar loop that either requires runtime checks or predication we 5791 // are better off leaving this to the unroller. Note that if we've already 5792 // vectorized the loop we will have done the runtime check and so interleaving 5793 // won't require further checks. 5794 bool ScalarInterleavingRequiresPredication = 5795 (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) { 5796 return Legal->blockNeedsPredication(BB); 5797 })); 5798 bool ScalarInterleavingRequiresRuntimePointerCheck = 5799 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 5800 5801 // We want to interleave small loops in order to reduce the loop overhead and 5802 // potentially expose ILP opportunities. 5803 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 5804 << "LV: IC is " << IC << '\n' 5805 << "LV: VF is " << VF << '\n'); 5806 const bool AggressivelyInterleaveReductions = 5807 TTI.enableAggressiveInterleaving(HasReductions); 5808 if (!ScalarInterleavingRequiresRuntimePointerCheck && 5809 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) { 5810 // We assume that the cost overhead is 1 and we use the cost model 5811 // to estimate the cost of the loop and interleave until the cost of the 5812 // loop overhead is about 5% of the cost of the loop. 5813 unsigned SmallIC = 5814 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5815 5816 // Interleave until store/load ports (estimated by max interleave count) are 5817 // saturated. 5818 unsigned NumStores = Legal->getNumStores(); 5819 unsigned NumLoads = Legal->getNumLoads(); 5820 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5821 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5822 5823 // There is little point in interleaving for reductions containing selects 5824 // and compares when VF=1 since it may just create more overhead than it's 5825 // worth for loops with small trip counts. This is because we still have to 5826 // do the final reduction after the loop. 5827 bool HasSelectCmpReductions = 5828 HasReductions && 5829 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5830 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5831 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 5832 RdxDesc.getRecurrenceKind()); 5833 }); 5834 if (HasSelectCmpReductions) { 5835 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 5836 return 1; 5837 } 5838 5839 // If we have a scalar reduction (vector reductions are already dealt with 5840 // by this point), we can increase the critical path length if the loop 5841 // we're interleaving is inside another loop. For tree-wise reductions 5842 // set the limit to 2, and for ordered reductions it's best to disable 5843 // interleaving entirely. 5844 if (HasReductions && TheLoop->getLoopDepth() > 1) { 5845 bool HasOrderedReductions = 5846 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5847 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5848 return RdxDesc.isOrdered(); 5849 }); 5850 if (HasOrderedReductions) { 5851 LLVM_DEBUG( 5852 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 5853 return 1; 5854 } 5855 5856 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5857 SmallIC = std::min(SmallIC, F); 5858 StoresIC = std::min(StoresIC, F); 5859 LoadsIC = std::min(LoadsIC, F); 5860 } 5861 5862 if (EnableLoadStoreRuntimeInterleave && 5863 std::max(StoresIC, LoadsIC) > SmallIC) { 5864 LLVM_DEBUG( 5865 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5866 return std::max(StoresIC, LoadsIC); 5867 } 5868 5869 // If there are scalar reductions and TTI has enabled aggressive 5870 // interleaving for reductions, we will interleave to expose ILP. 5871 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 5872 AggressivelyInterleaveReductions) { 5873 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5874 // Interleave no less than SmallIC but not as aggressive as the normal IC 5875 // to satisfy the rare situation when resources are too limited. 5876 return std::max(IC / 2, SmallIC); 5877 } else { 5878 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5879 return SmallIC; 5880 } 5881 } 5882 5883 // Interleave if this is a large loop (small loops are already dealt with by 5884 // this point) that could benefit from interleaving. 5885 if (AggressivelyInterleaveReductions) { 5886 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5887 return IC; 5888 } 5889 5890 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5891 return 1; 5892 } 5893 5894 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5895 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 5896 // This function calculates the register usage by measuring the highest number 5897 // of values that are alive at a single location. Obviously, this is a very 5898 // rough estimation. We scan the loop in a topological order in order and 5899 // assign a number to each instruction. We use RPO to ensure that defs are 5900 // met before their users. We assume that each instruction that has in-loop 5901 // users starts an interval. We record every time that an in-loop value is 5902 // used, so we have a list of the first and last occurrences of each 5903 // instruction. Next, we transpose this data structure into a multi map that 5904 // holds the list of intervals that *end* at a specific location. This multi 5905 // map allows us to perform a linear search. We scan the instructions linearly 5906 // and record each time that a new interval starts, by placing it in a set. 5907 // If we find this value in the multi-map then we remove it from the set. 5908 // The max register usage is the maximum size of the set. 5909 // We also search for instructions that are defined outside the loop, but are 5910 // used inside the loop. We need this number separately from the max-interval 5911 // usage number because when we unroll, loop-invariant values do not take 5912 // more register. 5913 LoopBlocksDFS DFS(TheLoop); 5914 DFS.perform(LI); 5915 5916 RegisterUsage RU; 5917 5918 // Each 'key' in the map opens a new interval. The values 5919 // of the map are the index of the 'last seen' usage of the 5920 // instruction that is the key. 5921 using IntervalMap = DenseMap<Instruction *, unsigned>; 5922 5923 // Maps instruction to its index. 5924 SmallVector<Instruction *, 64> IdxToInstr; 5925 // Marks the end of each interval. 5926 IntervalMap EndPoint; 5927 // Saves the list of instruction indices that are used in the loop. 5928 SmallPtrSet<Instruction *, 8> Ends; 5929 // Saves the list of values that are used in the loop but are 5930 // defined outside the loop, such as arguments and constants. 5931 SmallPtrSet<Value *, 8> LoopInvariants; 5932 5933 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5934 for (Instruction &I : BB->instructionsWithoutDebug()) { 5935 IdxToInstr.push_back(&I); 5936 5937 // Save the end location of each USE. 5938 for (Value *U : I.operands()) { 5939 auto *Instr = dyn_cast<Instruction>(U); 5940 5941 // Ignore non-instruction values such as arguments, constants, etc. 5942 if (!Instr) 5943 continue; 5944 5945 // If this instruction is outside the loop then record it and continue. 5946 if (!TheLoop->contains(Instr)) { 5947 LoopInvariants.insert(Instr); 5948 continue; 5949 } 5950 5951 // Overwrite previous end points. 5952 EndPoint[Instr] = IdxToInstr.size(); 5953 Ends.insert(Instr); 5954 } 5955 } 5956 } 5957 5958 // Saves the list of intervals that end with the index in 'key'. 5959 using InstrList = SmallVector<Instruction *, 2>; 5960 DenseMap<unsigned, InstrList> TransposeEnds; 5961 5962 // Transpose the EndPoints to a list of values that end at each index. 5963 for (auto &Interval : EndPoint) 5964 TransposeEnds[Interval.second].push_back(Interval.first); 5965 5966 SmallPtrSet<Instruction *, 8> OpenIntervals; 5967 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5968 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 5969 5970 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5971 5972 // A lambda that gets the register usage for the given type and VF. 5973 const auto &TTICapture = TTI; 5974 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 5975 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 5976 return 0; 5977 InstructionCost::CostType RegUsage = 5978 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 5979 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 5980 "Nonsensical values for register usage."); 5981 return RegUsage; 5982 }; 5983 5984 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 5985 Instruction *I = IdxToInstr[i]; 5986 5987 // Remove all of the instructions that end at this location. 5988 InstrList &List = TransposeEnds[i]; 5989 for (Instruction *ToRemove : List) 5990 OpenIntervals.erase(ToRemove); 5991 5992 // Ignore instructions that are never used within the loop. 5993 if (!Ends.count(I)) 5994 continue; 5995 5996 // Skip ignored values. 5997 if (ValuesToIgnore.count(I)) 5998 continue; 5999 6000 // For each VF find the maximum usage of registers. 6001 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6002 // Count the number of live intervals. 6003 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6004 6005 if (VFs[j].isScalar()) { 6006 for (auto Inst : OpenIntervals) { 6007 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6008 if (RegUsage.find(ClassID) == RegUsage.end()) 6009 RegUsage[ClassID] = 1; 6010 else 6011 RegUsage[ClassID] += 1; 6012 } 6013 } else { 6014 collectUniformsAndScalars(VFs[j]); 6015 for (auto Inst : OpenIntervals) { 6016 // Skip ignored values for VF > 1. 6017 if (VecValuesToIgnore.count(Inst)) 6018 continue; 6019 if (isScalarAfterVectorization(Inst, VFs[j])) { 6020 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6021 if (RegUsage.find(ClassID) == RegUsage.end()) 6022 RegUsage[ClassID] = 1; 6023 else 6024 RegUsage[ClassID] += 1; 6025 } else { 6026 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6027 if (RegUsage.find(ClassID) == RegUsage.end()) 6028 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6029 else 6030 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6031 } 6032 } 6033 } 6034 6035 for (auto& pair : RegUsage) { 6036 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6037 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6038 else 6039 MaxUsages[j][pair.first] = pair.second; 6040 } 6041 } 6042 6043 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6044 << OpenIntervals.size() << '\n'); 6045 6046 // Add the current instruction to the list of open intervals. 6047 OpenIntervals.insert(I); 6048 } 6049 6050 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6051 SmallMapVector<unsigned, unsigned, 4> Invariant; 6052 6053 for (auto Inst : LoopInvariants) { 6054 unsigned Usage = 6055 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6056 unsigned ClassID = 6057 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6058 if (Invariant.find(ClassID) == Invariant.end()) 6059 Invariant[ClassID] = Usage; 6060 else 6061 Invariant[ClassID] += Usage; 6062 } 6063 6064 LLVM_DEBUG({ 6065 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6066 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6067 << " item\n"; 6068 for (const auto &pair : MaxUsages[i]) { 6069 dbgs() << "LV(REG): RegisterClass: " 6070 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6071 << " registers\n"; 6072 } 6073 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6074 << " item\n"; 6075 for (const auto &pair : Invariant) { 6076 dbgs() << "LV(REG): RegisterClass: " 6077 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6078 << " registers\n"; 6079 } 6080 }); 6081 6082 RU.LoopInvariantRegs = Invariant; 6083 RU.MaxLocalUsers = MaxUsages[i]; 6084 RUs[i] = RU; 6085 } 6086 6087 return RUs; 6088 } 6089 6090 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I, 6091 ElementCount VF) { 6092 // TODO: Cost model for emulated masked load/store is completely 6093 // broken. This hack guides the cost model to use an artificially 6094 // high enough value to practically disable vectorization with such 6095 // operations, except where previously deployed legality hack allowed 6096 // using very low cost values. This is to avoid regressions coming simply 6097 // from moving "masked load/store" check from legality to cost model. 6098 // Masked Load/Gather emulation was previously never allowed. 6099 // Limited number of Masked Store/Scatter emulation was allowed. 6100 assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction"); 6101 return isa<LoadInst>(I) || 6102 (isa<StoreInst>(I) && 6103 NumPredStores > NumberOfStoresToPredicate); 6104 } 6105 6106 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6107 // If we aren't vectorizing the loop, or if we've already collected the 6108 // instructions to scalarize, there's nothing to do. Collection may already 6109 // have occurred if we have a user-selected VF and are now computing the 6110 // expected cost for interleaving. 6111 if (VF.isScalar() || VF.isZero() || 6112 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6113 return; 6114 6115 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6116 // not profitable to scalarize any instructions, the presence of VF in the 6117 // map will indicate that we've analyzed it already. 6118 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6119 6120 // Find all the instructions that are scalar with predication in the loop and 6121 // determine if it would be better to not if-convert the blocks they are in. 6122 // If so, we also record the instructions to scalarize. 6123 for (BasicBlock *BB : TheLoop->blocks()) { 6124 if (!blockNeedsPredicationForAnyReason(BB)) 6125 continue; 6126 for (Instruction &I : *BB) 6127 if (isScalarWithPredication(&I, VF)) { 6128 ScalarCostsTy ScalarCosts; 6129 // Do not apply discount if scalable, because that would lead to 6130 // invalid scalarization costs. 6131 // Do not apply discount logic if hacked cost is needed 6132 // for emulated masked memrefs. 6133 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) && 6134 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6135 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6136 // Remember that BB will remain after vectorization. 6137 PredicatedBBsAfterVectorization.insert(BB); 6138 } 6139 } 6140 } 6141 6142 int LoopVectorizationCostModel::computePredInstDiscount( 6143 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6144 assert(!isUniformAfterVectorization(PredInst, VF) && 6145 "Instruction marked uniform-after-vectorization will be predicated"); 6146 6147 // Initialize the discount to zero, meaning that the scalar version and the 6148 // vector version cost the same. 6149 InstructionCost Discount = 0; 6150 6151 // Holds instructions to analyze. The instructions we visit are mapped in 6152 // ScalarCosts. Those instructions are the ones that would be scalarized if 6153 // we find that the scalar version costs less. 6154 SmallVector<Instruction *, 8> Worklist; 6155 6156 // Returns true if the given instruction can be scalarized. 6157 auto canBeScalarized = [&](Instruction *I) -> bool { 6158 // We only attempt to scalarize instructions forming a single-use chain 6159 // from the original predicated block that would otherwise be vectorized. 6160 // Although not strictly necessary, we give up on instructions we know will 6161 // already be scalar to avoid traversing chains that are unlikely to be 6162 // beneficial. 6163 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6164 isScalarAfterVectorization(I, VF)) 6165 return false; 6166 6167 // If the instruction is scalar with predication, it will be analyzed 6168 // separately. We ignore it within the context of PredInst. 6169 if (isScalarWithPredication(I, VF)) 6170 return false; 6171 6172 // If any of the instruction's operands are uniform after vectorization, 6173 // the instruction cannot be scalarized. This prevents, for example, a 6174 // masked load from being scalarized. 6175 // 6176 // We assume we will only emit a value for lane zero of an instruction 6177 // marked uniform after vectorization, rather than VF identical values. 6178 // Thus, if we scalarize an instruction that uses a uniform, we would 6179 // create uses of values corresponding to the lanes we aren't emitting code 6180 // for. This behavior can be changed by allowing getScalarValue to clone 6181 // the lane zero values for uniforms rather than asserting. 6182 for (Use &U : I->operands()) 6183 if (auto *J = dyn_cast<Instruction>(U.get())) 6184 if (isUniformAfterVectorization(J, VF)) 6185 return false; 6186 6187 // Otherwise, we can scalarize the instruction. 6188 return true; 6189 }; 6190 6191 // Compute the expected cost discount from scalarizing the entire expression 6192 // feeding the predicated instruction. We currently only consider expressions 6193 // that are single-use instruction chains. 6194 Worklist.push_back(PredInst); 6195 while (!Worklist.empty()) { 6196 Instruction *I = Worklist.pop_back_val(); 6197 6198 // If we've already analyzed the instruction, there's nothing to do. 6199 if (ScalarCosts.find(I) != ScalarCosts.end()) 6200 continue; 6201 6202 // Compute the cost of the vector instruction. Note that this cost already 6203 // includes the scalarization overhead of the predicated instruction. 6204 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6205 6206 // Compute the cost of the scalarized instruction. This cost is the cost of 6207 // the instruction as if it wasn't if-converted and instead remained in the 6208 // predicated block. We will scale this cost by block probability after 6209 // computing the scalarization overhead. 6210 InstructionCost ScalarCost = 6211 VF.getFixedValue() * 6212 getInstructionCost(I, ElementCount::getFixed(1)).first; 6213 6214 // Compute the scalarization overhead of needed insertelement instructions 6215 // and phi nodes. 6216 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) { 6217 ScalarCost += TTI.getScalarizationOverhead( 6218 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6219 APInt::getAllOnes(VF.getFixedValue()), true, false); 6220 ScalarCost += 6221 VF.getFixedValue() * 6222 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6223 } 6224 6225 // Compute the scalarization overhead of needed extractelement 6226 // instructions. For each of the instruction's operands, if the operand can 6227 // be scalarized, add it to the worklist; otherwise, account for the 6228 // overhead. 6229 for (Use &U : I->operands()) 6230 if (auto *J = dyn_cast<Instruction>(U.get())) { 6231 assert(VectorType::isValidElementType(J->getType()) && 6232 "Instruction has non-scalar type"); 6233 if (canBeScalarized(J)) 6234 Worklist.push_back(J); 6235 else if (needsExtract(J, VF)) { 6236 ScalarCost += TTI.getScalarizationOverhead( 6237 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6238 APInt::getAllOnes(VF.getFixedValue()), false, true); 6239 } 6240 } 6241 6242 // Scale the total scalar cost by block probability. 6243 ScalarCost /= getReciprocalPredBlockProb(); 6244 6245 // Compute the discount. A non-negative discount means the vector version 6246 // of the instruction costs more, and scalarizing would be beneficial. 6247 Discount += VectorCost - ScalarCost; 6248 ScalarCosts[I] = ScalarCost; 6249 } 6250 6251 return *Discount.getValue(); 6252 } 6253 6254 LoopVectorizationCostModel::VectorizationCostTy 6255 LoopVectorizationCostModel::expectedCost( 6256 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6257 VectorizationCostTy Cost; 6258 6259 // For each block. 6260 for (BasicBlock *BB : TheLoop->blocks()) { 6261 VectorizationCostTy BlockCost; 6262 6263 // For each instruction in the old loop. 6264 for (Instruction &I : BB->instructionsWithoutDebug()) { 6265 // Skip ignored values. 6266 if (ValuesToIgnore.count(&I) || 6267 (VF.isVector() && VecValuesToIgnore.count(&I))) 6268 continue; 6269 6270 VectorizationCostTy C = getInstructionCost(&I, VF); 6271 6272 // Check if we should override the cost. 6273 if (C.first.isValid() && 6274 ForceTargetInstructionCost.getNumOccurrences() > 0) 6275 C.first = InstructionCost(ForceTargetInstructionCost); 6276 6277 // Keep a list of instructions with invalid costs. 6278 if (Invalid && !C.first.isValid()) 6279 Invalid->emplace_back(&I, VF); 6280 6281 BlockCost.first += C.first; 6282 BlockCost.second |= C.second; 6283 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6284 << " for VF " << VF << " For instruction: " << I 6285 << '\n'); 6286 } 6287 6288 // If we are vectorizing a predicated block, it will have been 6289 // if-converted. This means that the block's instructions (aside from 6290 // stores and instructions that may divide by zero) will now be 6291 // unconditionally executed. For the scalar case, we may not always execute 6292 // the predicated block, if it is an if-else block. Thus, scale the block's 6293 // cost by the probability of executing it. blockNeedsPredication from 6294 // Legal is used so as to not include all blocks in tail folded loops. 6295 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6296 BlockCost.first /= getReciprocalPredBlockProb(); 6297 6298 Cost.first += BlockCost.first; 6299 Cost.second |= BlockCost.second; 6300 } 6301 6302 return Cost; 6303 } 6304 6305 /// Gets Address Access SCEV after verifying that the access pattern 6306 /// is loop invariant except the induction variable dependence. 6307 /// 6308 /// This SCEV can be sent to the Target in order to estimate the address 6309 /// calculation cost. 6310 static const SCEV *getAddressAccessSCEV( 6311 Value *Ptr, 6312 LoopVectorizationLegality *Legal, 6313 PredicatedScalarEvolution &PSE, 6314 const Loop *TheLoop) { 6315 6316 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6317 if (!Gep) 6318 return nullptr; 6319 6320 // We are looking for a gep with all loop invariant indices except for one 6321 // which should be an induction variable. 6322 auto SE = PSE.getSE(); 6323 unsigned NumOperands = Gep->getNumOperands(); 6324 for (unsigned i = 1; i < NumOperands; ++i) { 6325 Value *Opd = Gep->getOperand(i); 6326 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6327 !Legal->isInductionVariable(Opd)) 6328 return nullptr; 6329 } 6330 6331 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6332 return PSE.getSCEV(Ptr); 6333 } 6334 6335 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6336 return Legal->hasStride(I->getOperand(0)) || 6337 Legal->hasStride(I->getOperand(1)); 6338 } 6339 6340 InstructionCost 6341 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6342 ElementCount VF) { 6343 assert(VF.isVector() && 6344 "Scalarization cost of instruction implies vectorization."); 6345 if (VF.isScalable()) 6346 return InstructionCost::getInvalid(); 6347 6348 Type *ValTy = getLoadStoreType(I); 6349 auto SE = PSE.getSE(); 6350 6351 unsigned AS = getLoadStoreAddressSpace(I); 6352 Value *Ptr = getLoadStorePointerOperand(I); 6353 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6354 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost` 6355 // that it is being called from this specific place. 6356 6357 // Figure out whether the access is strided and get the stride value 6358 // if it's known in compile time 6359 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6360 6361 // Get the cost of the scalar memory instruction and address computation. 6362 InstructionCost Cost = 6363 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6364 6365 // Don't pass *I here, since it is scalar but will actually be part of a 6366 // vectorized loop where the user of it is a vectorized instruction. 6367 const Align Alignment = getLoadStoreAlignment(I); 6368 Cost += VF.getKnownMinValue() * 6369 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6370 AS, TTI::TCK_RecipThroughput); 6371 6372 // Get the overhead of the extractelement and insertelement instructions 6373 // we might create due to scalarization. 6374 Cost += getScalarizationOverhead(I, VF); 6375 6376 // If we have a predicated load/store, it will need extra i1 extracts and 6377 // conditional branches, but may not be executed for each vector lane. Scale 6378 // the cost by the probability of executing the predicated block. 6379 if (isPredicatedInst(I, VF)) { 6380 Cost /= getReciprocalPredBlockProb(); 6381 6382 // Add the cost of an i1 extract and a branch 6383 auto *Vec_i1Ty = 6384 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6385 Cost += TTI.getScalarizationOverhead( 6386 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 6387 /*Insert=*/false, /*Extract=*/true); 6388 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6389 6390 if (useEmulatedMaskMemRefHack(I, VF)) 6391 // Artificially setting to a high enough value to practically disable 6392 // vectorization with such operations. 6393 Cost = 3000000; 6394 } 6395 6396 return Cost; 6397 } 6398 6399 InstructionCost 6400 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6401 ElementCount VF) { 6402 Type *ValTy = getLoadStoreType(I); 6403 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6404 Value *Ptr = getLoadStorePointerOperand(I); 6405 unsigned AS = getLoadStoreAddressSpace(I); 6406 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 6407 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6408 6409 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6410 "Stride should be 1 or -1 for consecutive memory access"); 6411 const Align Alignment = getLoadStoreAlignment(I); 6412 InstructionCost Cost = 0; 6413 if (Legal->isMaskRequired(I)) 6414 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6415 CostKind); 6416 else 6417 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6418 CostKind, I); 6419 6420 bool Reverse = ConsecutiveStride < 0; 6421 if (Reverse) 6422 Cost += 6423 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6424 return Cost; 6425 } 6426 6427 InstructionCost 6428 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6429 ElementCount VF) { 6430 assert(Legal->isUniformMemOp(*I)); 6431 6432 Type *ValTy = getLoadStoreType(I); 6433 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6434 const Align Alignment = getLoadStoreAlignment(I); 6435 unsigned AS = getLoadStoreAddressSpace(I); 6436 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6437 if (isa<LoadInst>(I)) { 6438 return TTI.getAddressComputationCost(ValTy) + 6439 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6440 CostKind) + 6441 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6442 } 6443 StoreInst *SI = cast<StoreInst>(I); 6444 6445 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6446 return TTI.getAddressComputationCost(ValTy) + 6447 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6448 CostKind) + 6449 (isLoopInvariantStoreValue 6450 ? 0 6451 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6452 VF.getKnownMinValue() - 1)); 6453 } 6454 6455 InstructionCost 6456 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6457 ElementCount VF) { 6458 Type *ValTy = getLoadStoreType(I); 6459 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6460 const Align Alignment = getLoadStoreAlignment(I); 6461 const Value *Ptr = getLoadStorePointerOperand(I); 6462 6463 return TTI.getAddressComputationCost(VectorTy) + 6464 TTI.getGatherScatterOpCost( 6465 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6466 TargetTransformInfo::TCK_RecipThroughput, I); 6467 } 6468 6469 InstructionCost 6470 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6471 ElementCount VF) { 6472 // TODO: Once we have support for interleaving with scalable vectors 6473 // we can calculate the cost properly here. 6474 if (VF.isScalable()) 6475 return InstructionCost::getInvalid(); 6476 6477 Type *ValTy = getLoadStoreType(I); 6478 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6479 unsigned AS = getLoadStoreAddressSpace(I); 6480 6481 auto Group = getInterleavedAccessGroup(I); 6482 assert(Group && "Fail to get an interleaved access group."); 6483 6484 unsigned InterleaveFactor = Group->getFactor(); 6485 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6486 6487 // Holds the indices of existing members in the interleaved group. 6488 SmallVector<unsigned, 4> Indices; 6489 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 6490 if (Group->getMember(IF)) 6491 Indices.push_back(IF); 6492 6493 // Calculate the cost of the whole interleaved group. 6494 bool UseMaskForGaps = 6495 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 6496 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 6497 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6498 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6499 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6500 6501 if (Group->isReverse()) { 6502 // TODO: Add support for reversed masked interleaved access. 6503 assert(!Legal->isMaskRequired(I) && 6504 "Reverse masked interleaved access not supported."); 6505 Cost += 6506 Group->getNumMembers() * 6507 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6508 } 6509 return Cost; 6510 } 6511 6512 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 6513 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6514 using namespace llvm::PatternMatch; 6515 // Early exit for no inloop reductions 6516 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6517 return None; 6518 auto *VectorTy = cast<VectorType>(Ty); 6519 6520 // We are looking for a pattern of, and finding the minimal acceptable cost: 6521 // reduce(mul(ext(A), ext(B))) or 6522 // reduce(mul(A, B)) or 6523 // reduce(ext(A)) or 6524 // reduce(A). 6525 // The basic idea is that we walk down the tree to do that, finding the root 6526 // reduction instruction in InLoopReductionImmediateChains. From there we find 6527 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6528 // of the components. If the reduction cost is lower then we return it for the 6529 // reduction instruction and 0 for the other instructions in the pattern. If 6530 // it is not we return an invalid cost specifying the orignal cost method 6531 // should be used. 6532 Instruction *RetI = I; 6533 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 6534 if (!RetI->hasOneUser()) 6535 return None; 6536 RetI = RetI->user_back(); 6537 } 6538 if (match(RetI, m_Mul(m_Value(), m_Value())) && 6539 RetI->user_back()->getOpcode() == Instruction::Add) { 6540 if (!RetI->hasOneUser()) 6541 return None; 6542 RetI = RetI->user_back(); 6543 } 6544 6545 // Test if the found instruction is a reduction, and if not return an invalid 6546 // cost specifying the parent to use the original cost modelling. 6547 if (!InLoopReductionImmediateChains.count(RetI)) 6548 return None; 6549 6550 // Find the reduction this chain is a part of and calculate the basic cost of 6551 // the reduction on its own. 6552 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6553 Instruction *ReductionPhi = LastChain; 6554 while (!isa<PHINode>(ReductionPhi)) 6555 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6556 6557 const RecurrenceDescriptor &RdxDesc = 6558 Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second; 6559 6560 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 6561 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 6562 6563 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a 6564 // normal fmul instruction to the cost of the fadd reduction. 6565 if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd) 6566 BaseCost += 6567 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind); 6568 6569 // If we're using ordered reductions then we can just return the base cost 6570 // here, since getArithmeticReductionCost calculates the full ordered 6571 // reduction cost when FP reassociation is not allowed. 6572 if (useOrderedReductions(RdxDesc)) 6573 return BaseCost; 6574 6575 // Get the operand that was not the reduction chain and match it to one of the 6576 // patterns, returning the better cost if it is found. 6577 Instruction *RedOp = RetI->getOperand(1) == LastChain 6578 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6579 : dyn_cast<Instruction>(RetI->getOperand(1)); 6580 6581 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6582 6583 Instruction *Op0, *Op1; 6584 if (RedOp && 6585 match(RedOp, 6586 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 6587 match(Op0, m_ZExtOrSExt(m_Value())) && 6588 Op0->getOpcode() == Op1->getOpcode() && 6589 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6590 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 6591 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 6592 6593 // Matched reduce(ext(mul(ext(A), ext(B))) 6594 // Note that the extend opcodes need to all match, or if A==B they will have 6595 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 6596 // which is equally fine. 6597 bool IsUnsigned = isa<ZExtInst>(Op0); 6598 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6599 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 6600 6601 InstructionCost ExtCost = 6602 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 6603 TTI::CastContextHint::None, CostKind, Op0); 6604 InstructionCost MulCost = 6605 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 6606 InstructionCost Ext2Cost = 6607 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 6608 TTI::CastContextHint::None, CostKind, RedOp); 6609 6610 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6611 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6612 CostKind); 6613 6614 if (RedCost.isValid() && 6615 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 6616 return I == RetI ? RedCost : 0; 6617 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 6618 !TheLoop->isLoopInvariant(RedOp)) { 6619 // Matched reduce(ext(A)) 6620 bool IsUnsigned = isa<ZExtInst>(RedOp); 6621 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6622 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6623 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6624 CostKind); 6625 6626 InstructionCost ExtCost = 6627 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6628 TTI::CastContextHint::None, CostKind, RedOp); 6629 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6630 return I == RetI ? RedCost : 0; 6631 } else if (RedOp && 6632 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 6633 if (match(Op0, m_ZExtOrSExt(m_Value())) && 6634 Op0->getOpcode() == Op1->getOpcode() && 6635 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 6636 bool IsUnsigned = isa<ZExtInst>(Op0); 6637 Type *Op0Ty = Op0->getOperand(0)->getType(); 6638 Type *Op1Ty = Op1->getOperand(0)->getType(); 6639 Type *LargestOpTy = 6640 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty 6641 : Op0Ty; 6642 auto *ExtType = VectorType::get(LargestOpTy, VectorTy); 6643 6644 // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of 6645 // different sizes. We take the largest type as the ext to reduce, and add 6646 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))). 6647 InstructionCost ExtCost0 = TTI.getCastInstrCost( 6648 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy), 6649 TTI::CastContextHint::None, CostKind, Op0); 6650 InstructionCost ExtCost1 = TTI.getCastInstrCost( 6651 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy), 6652 TTI::CastContextHint::None, CostKind, Op1); 6653 InstructionCost MulCost = 6654 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6655 6656 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6657 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6658 CostKind); 6659 InstructionCost ExtraExtCost = 0; 6660 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) { 6661 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1; 6662 ExtraExtCost = TTI.getCastInstrCost( 6663 ExtraExtOp->getOpcode(), ExtType, 6664 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy), 6665 TTI::CastContextHint::None, CostKind, ExtraExtOp); 6666 } 6667 6668 if (RedCost.isValid() && 6669 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost)) 6670 return I == RetI ? RedCost : 0; 6671 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 6672 // Matched reduce(mul()) 6673 InstructionCost MulCost = 6674 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6675 6676 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6677 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 6678 CostKind); 6679 6680 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 6681 return I == RetI ? RedCost : 0; 6682 } 6683 } 6684 6685 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 6686 } 6687 6688 InstructionCost 6689 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6690 ElementCount VF) { 6691 // Calculate scalar cost only. Vectorization cost should be ready at this 6692 // moment. 6693 if (VF.isScalar()) { 6694 Type *ValTy = getLoadStoreType(I); 6695 const Align Alignment = getLoadStoreAlignment(I); 6696 unsigned AS = getLoadStoreAddressSpace(I); 6697 6698 return TTI.getAddressComputationCost(ValTy) + 6699 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 6700 TTI::TCK_RecipThroughput, I); 6701 } 6702 return getWideningCost(I, VF); 6703 } 6704 6705 LoopVectorizationCostModel::VectorizationCostTy 6706 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6707 ElementCount VF) { 6708 // If we know that this instruction will remain uniform, check the cost of 6709 // the scalar version. 6710 if (isUniformAfterVectorization(I, VF)) 6711 VF = ElementCount::getFixed(1); 6712 6713 if (VF.isVector() && isProfitableToScalarize(I, VF)) 6714 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6715 6716 // Forced scalars do not have any scalarization overhead. 6717 auto ForcedScalar = ForcedScalars.find(VF); 6718 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 6719 auto InstSet = ForcedScalar->second; 6720 if (InstSet.count(I)) 6721 return VectorizationCostTy( 6722 (getInstructionCost(I, ElementCount::getFixed(1)).first * 6723 VF.getKnownMinValue()), 6724 false); 6725 } 6726 6727 Type *VectorTy; 6728 InstructionCost C = getInstructionCost(I, VF, VectorTy); 6729 6730 bool TypeNotScalarized = false; 6731 if (VF.isVector() && VectorTy->isVectorTy()) { 6732 unsigned NumParts = TTI.getNumberOfParts(VectorTy); 6733 if (NumParts) 6734 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 6735 else 6736 C = InstructionCost::getInvalid(); 6737 } 6738 return VectorizationCostTy(C, TypeNotScalarized); 6739 } 6740 6741 InstructionCost 6742 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6743 ElementCount VF) const { 6744 6745 // There is no mechanism yet to create a scalable scalarization loop, 6746 // so this is currently Invalid. 6747 if (VF.isScalable()) 6748 return InstructionCost::getInvalid(); 6749 6750 if (VF.isScalar()) 6751 return 0; 6752 6753 InstructionCost Cost = 0; 6754 Type *RetTy = ToVectorTy(I->getType(), VF); 6755 if (!RetTy->isVoidTy() && 6756 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 6757 Cost += TTI.getScalarizationOverhead( 6758 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 6759 false); 6760 6761 // Some targets keep addresses scalar. 6762 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 6763 return Cost; 6764 6765 // Some targets support efficient element stores. 6766 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 6767 return Cost; 6768 6769 // Collect operands to consider. 6770 CallInst *CI = dyn_cast<CallInst>(I); 6771 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 6772 6773 // Skip operands that do not require extraction/scalarization and do not incur 6774 // any overhead. 6775 SmallVector<Type *> Tys; 6776 for (auto *V : filterExtractingOperands(Ops, VF)) 6777 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 6778 return Cost + TTI.getOperandsScalarizationOverhead( 6779 filterExtractingOperands(Ops, VF), Tys); 6780 } 6781 6782 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 6783 if (VF.isScalar()) 6784 return; 6785 NumPredStores = 0; 6786 for (BasicBlock *BB : TheLoop->blocks()) { 6787 // For each instruction in the old loop. 6788 for (Instruction &I : *BB) { 6789 Value *Ptr = getLoadStorePointerOperand(&I); 6790 if (!Ptr) 6791 continue; 6792 6793 // TODO: We should generate better code and update the cost model for 6794 // predicated uniform stores. Today they are treated as any other 6795 // predicated store (see added test cases in 6796 // invariant-store-vectorization.ll). 6797 if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF)) 6798 NumPredStores++; 6799 6800 if (Legal->isUniformMemOp(I)) { 6801 // TODO: Avoid replicating loads and stores instead of 6802 // relying on instcombine to remove them. 6803 // Load: Scalar load + broadcast 6804 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 6805 InstructionCost Cost; 6806 if (isa<StoreInst>(&I) && VF.isScalable() && 6807 isLegalGatherOrScatter(&I, VF)) { 6808 Cost = getGatherScatterCost(&I, VF); 6809 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 6810 } else { 6811 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 6812 "Cannot yet scalarize uniform stores"); 6813 Cost = getUniformMemOpCost(&I, VF); 6814 setWideningDecision(&I, VF, CM_Scalarize, Cost); 6815 } 6816 continue; 6817 } 6818 6819 // We assume that widening is the best solution when possible. 6820 if (memoryInstructionCanBeWidened(&I, VF)) { 6821 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 6822 int ConsecutiveStride = Legal->isConsecutivePtr( 6823 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 6824 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6825 "Expected consecutive stride."); 6826 InstWidening Decision = 6827 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 6828 setWideningDecision(&I, VF, Decision, Cost); 6829 continue; 6830 } 6831 6832 // Choose between Interleaving, Gather/Scatter or Scalarization. 6833 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 6834 unsigned NumAccesses = 1; 6835 if (isAccessInterleaved(&I)) { 6836 auto Group = getInterleavedAccessGroup(&I); 6837 assert(Group && "Fail to get an interleaved access group."); 6838 6839 // Make one decision for the whole group. 6840 if (getWideningDecision(&I, VF) != CM_Unknown) 6841 continue; 6842 6843 NumAccesses = Group->getNumMembers(); 6844 if (interleavedAccessCanBeWidened(&I, VF)) 6845 InterleaveCost = getInterleaveGroupCost(&I, VF); 6846 } 6847 6848 InstructionCost GatherScatterCost = 6849 isLegalGatherOrScatter(&I, VF) 6850 ? getGatherScatterCost(&I, VF) * NumAccesses 6851 : InstructionCost::getInvalid(); 6852 6853 InstructionCost ScalarizationCost = 6854 getMemInstScalarizationCost(&I, VF) * NumAccesses; 6855 6856 // Choose better solution for the current VF, 6857 // write down this decision and use it during vectorization. 6858 InstructionCost Cost; 6859 InstWidening Decision; 6860 if (InterleaveCost <= GatherScatterCost && 6861 InterleaveCost < ScalarizationCost) { 6862 Decision = CM_Interleave; 6863 Cost = InterleaveCost; 6864 } else if (GatherScatterCost < ScalarizationCost) { 6865 Decision = CM_GatherScatter; 6866 Cost = GatherScatterCost; 6867 } else { 6868 Decision = CM_Scalarize; 6869 Cost = ScalarizationCost; 6870 } 6871 // If the instructions belongs to an interleave group, the whole group 6872 // receives the same decision. The whole group receives the cost, but 6873 // the cost will actually be assigned to one instruction. 6874 if (auto Group = getInterleavedAccessGroup(&I)) 6875 setWideningDecision(Group, VF, Decision, Cost); 6876 else 6877 setWideningDecision(&I, VF, Decision, Cost); 6878 } 6879 } 6880 6881 // Make sure that any load of address and any other address computation 6882 // remains scalar unless there is gather/scatter support. This avoids 6883 // inevitable extracts into address registers, and also has the benefit of 6884 // activating LSR more, since that pass can't optimize vectorized 6885 // addresses. 6886 if (TTI.prefersVectorizedAddressing()) 6887 return; 6888 6889 // Start with all scalar pointer uses. 6890 SmallPtrSet<Instruction *, 8> AddrDefs; 6891 for (BasicBlock *BB : TheLoop->blocks()) 6892 for (Instruction &I : *BB) { 6893 Instruction *PtrDef = 6894 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 6895 if (PtrDef && TheLoop->contains(PtrDef) && 6896 getWideningDecision(&I, VF) != CM_GatherScatter) 6897 AddrDefs.insert(PtrDef); 6898 } 6899 6900 // Add all instructions used to generate the addresses. 6901 SmallVector<Instruction *, 4> Worklist; 6902 append_range(Worklist, AddrDefs); 6903 while (!Worklist.empty()) { 6904 Instruction *I = Worklist.pop_back_val(); 6905 for (auto &Op : I->operands()) 6906 if (auto *InstOp = dyn_cast<Instruction>(Op)) 6907 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 6908 AddrDefs.insert(InstOp).second) 6909 Worklist.push_back(InstOp); 6910 } 6911 6912 for (auto *I : AddrDefs) { 6913 if (isa<LoadInst>(I)) { 6914 // Setting the desired widening decision should ideally be handled in 6915 // by cost functions, but since this involves the task of finding out 6916 // if the loaded register is involved in an address computation, it is 6917 // instead changed here when we know this is the case. 6918 InstWidening Decision = getWideningDecision(I, VF); 6919 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 6920 // Scalarize a widened load of address. 6921 setWideningDecision( 6922 I, VF, CM_Scalarize, 6923 (VF.getKnownMinValue() * 6924 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 6925 else if (auto Group = getInterleavedAccessGroup(I)) { 6926 // Scalarize an interleave group of address loads. 6927 for (unsigned I = 0; I < Group->getFactor(); ++I) { 6928 if (Instruction *Member = Group->getMember(I)) 6929 setWideningDecision( 6930 Member, VF, CM_Scalarize, 6931 (VF.getKnownMinValue() * 6932 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 6933 } 6934 } 6935 } else 6936 // Make sure I gets scalarized and a cost estimate without 6937 // scalarization overhead. 6938 ForcedScalars[VF].insert(I); 6939 } 6940 } 6941 6942 InstructionCost 6943 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 6944 Type *&VectorTy) { 6945 Type *RetTy = I->getType(); 6946 if (canTruncateToMinimalBitwidth(I, VF)) 6947 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6948 auto SE = PSE.getSE(); 6949 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6950 6951 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 6952 ElementCount VF) -> bool { 6953 if (VF.isScalar()) 6954 return true; 6955 6956 auto Scalarized = InstsToScalarize.find(VF); 6957 assert(Scalarized != InstsToScalarize.end() && 6958 "VF not yet analyzed for scalarization profitability"); 6959 return !Scalarized->second.count(I) && 6960 llvm::all_of(I->users(), [&](User *U) { 6961 auto *UI = cast<Instruction>(U); 6962 return !Scalarized->second.count(UI); 6963 }); 6964 }; 6965 (void) hasSingleCopyAfterVectorization; 6966 6967 if (isScalarAfterVectorization(I, VF)) { 6968 // With the exception of GEPs and PHIs, after scalarization there should 6969 // only be one copy of the instruction generated in the loop. This is 6970 // because the VF is either 1, or any instructions that need scalarizing 6971 // have already been dealt with by the the time we get here. As a result, 6972 // it means we don't have to multiply the instruction cost by VF. 6973 assert(I->getOpcode() == Instruction::GetElementPtr || 6974 I->getOpcode() == Instruction::PHI || 6975 (I->getOpcode() == Instruction::BitCast && 6976 I->getType()->isPointerTy()) || 6977 hasSingleCopyAfterVectorization(I, VF)); 6978 VectorTy = RetTy; 6979 } else 6980 VectorTy = ToVectorTy(RetTy, VF); 6981 6982 // TODO: We need to estimate the cost of intrinsic calls. 6983 switch (I->getOpcode()) { 6984 case Instruction::GetElementPtr: 6985 // We mark this instruction as zero-cost because the cost of GEPs in 6986 // vectorized code depends on whether the corresponding memory instruction 6987 // is scalarized or not. Therefore, we handle GEPs with the memory 6988 // instruction cost. 6989 return 0; 6990 case Instruction::Br: { 6991 // In cases of scalarized and predicated instructions, there will be VF 6992 // predicated blocks in the vectorized loop. Each branch around these 6993 // blocks requires also an extract of its vector compare i1 element. 6994 bool ScalarPredicatedBB = false; 6995 BranchInst *BI = cast<BranchInst>(I); 6996 if (VF.isVector() && BI->isConditional() && 6997 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 6998 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 6999 ScalarPredicatedBB = true; 7000 7001 if (ScalarPredicatedBB) { 7002 // Not possible to scalarize scalable vector with predicated instructions. 7003 if (VF.isScalable()) 7004 return InstructionCost::getInvalid(); 7005 // Return cost for branches around scalarized and predicated blocks. 7006 auto *Vec_i1Ty = 7007 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7008 return ( 7009 TTI.getScalarizationOverhead( 7010 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7011 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7012 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7013 // The back-edge branch will remain, as will all scalar branches. 7014 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7015 else 7016 // This branch will be eliminated by if-conversion. 7017 return 0; 7018 // Note: We currently assume zero cost for an unconditional branch inside 7019 // a predicated block since it will become a fall-through, although we 7020 // may decide in the future to call TTI for all branches. 7021 } 7022 case Instruction::PHI: { 7023 auto *Phi = cast<PHINode>(I); 7024 7025 // First-order recurrences are replaced by vector shuffles inside the loop. 7026 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7027 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7028 return TTI.getShuffleCost( 7029 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7030 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7031 7032 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7033 // converted into select instructions. We require N - 1 selects per phi 7034 // node, where N is the number of incoming values. 7035 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7036 return (Phi->getNumIncomingValues() - 1) * 7037 TTI.getCmpSelInstrCost( 7038 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7039 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7040 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7041 7042 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7043 } 7044 case Instruction::UDiv: 7045 case Instruction::SDiv: 7046 case Instruction::URem: 7047 case Instruction::SRem: 7048 // If we have a predicated instruction, it may not be executed for each 7049 // vector lane. Get the scalarization cost and scale this amount by the 7050 // probability of executing the predicated block. If the instruction is not 7051 // predicated, we fall through to the next case. 7052 if (VF.isVector() && isScalarWithPredication(I, VF)) { 7053 InstructionCost Cost = 0; 7054 7055 // These instructions have a non-void type, so account for the phi nodes 7056 // that we will create. This cost is likely to be zero. The phi node 7057 // cost, if any, should be scaled by the block probability because it 7058 // models a copy at the end of each predicated block. 7059 Cost += VF.getKnownMinValue() * 7060 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7061 7062 // The cost of the non-predicated instruction. 7063 Cost += VF.getKnownMinValue() * 7064 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7065 7066 // The cost of insertelement and extractelement instructions needed for 7067 // scalarization. 7068 Cost += getScalarizationOverhead(I, VF); 7069 7070 // Scale the cost by the probability of executing the predicated blocks. 7071 // This assumes the predicated block for each vector lane is equally 7072 // likely. 7073 return Cost / getReciprocalPredBlockProb(); 7074 } 7075 LLVM_FALLTHROUGH; 7076 case Instruction::Add: 7077 case Instruction::FAdd: 7078 case Instruction::Sub: 7079 case Instruction::FSub: 7080 case Instruction::Mul: 7081 case Instruction::FMul: 7082 case Instruction::FDiv: 7083 case Instruction::FRem: 7084 case Instruction::Shl: 7085 case Instruction::LShr: 7086 case Instruction::AShr: 7087 case Instruction::And: 7088 case Instruction::Or: 7089 case Instruction::Xor: { 7090 // Since we will replace the stride by 1 the multiplication should go away. 7091 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7092 return 0; 7093 7094 // Detect reduction patterns 7095 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7096 return *RedCost; 7097 7098 // Certain instructions can be cheaper to vectorize if they have a constant 7099 // second vector operand. One example of this are shifts on x86. 7100 Value *Op2 = I->getOperand(1); 7101 TargetTransformInfo::OperandValueProperties Op2VP; 7102 TargetTransformInfo::OperandValueKind Op2VK = 7103 TTI.getOperandInfo(Op2, Op2VP); 7104 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7105 Op2VK = TargetTransformInfo::OK_UniformValue; 7106 7107 SmallVector<const Value *, 4> Operands(I->operand_values()); 7108 return TTI.getArithmeticInstrCost( 7109 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7110 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7111 } 7112 case Instruction::FNeg: { 7113 return TTI.getArithmeticInstrCost( 7114 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7115 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7116 TargetTransformInfo::OP_None, I->getOperand(0), I); 7117 } 7118 case Instruction::Select: { 7119 SelectInst *SI = cast<SelectInst>(I); 7120 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7121 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7122 7123 const Value *Op0, *Op1; 7124 using namespace llvm::PatternMatch; 7125 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7126 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7127 // select x, y, false --> x & y 7128 // select x, true, y --> x | y 7129 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7130 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7131 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7132 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7133 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7134 Op1->getType()->getScalarSizeInBits() == 1); 7135 7136 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7137 return TTI.getArithmeticInstrCost( 7138 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7139 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7140 } 7141 7142 Type *CondTy = SI->getCondition()->getType(); 7143 if (!ScalarCond) 7144 CondTy = VectorType::get(CondTy, VF); 7145 7146 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 7147 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition())) 7148 Pred = Cmp->getPredicate(); 7149 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred, 7150 CostKind, I); 7151 } 7152 case Instruction::ICmp: 7153 case Instruction::FCmp: { 7154 Type *ValTy = I->getOperand(0)->getType(); 7155 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7156 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7157 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7158 VectorTy = ToVectorTy(ValTy, VF); 7159 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7160 cast<CmpInst>(I)->getPredicate(), CostKind, 7161 I); 7162 } 7163 case Instruction::Store: 7164 case Instruction::Load: { 7165 ElementCount Width = VF; 7166 if (Width.isVector()) { 7167 InstWidening Decision = getWideningDecision(I, Width); 7168 assert(Decision != CM_Unknown && 7169 "CM decision should be taken at this point"); 7170 if (Decision == CM_Scalarize) 7171 Width = ElementCount::getFixed(1); 7172 } 7173 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7174 return getMemoryInstructionCost(I, VF); 7175 } 7176 case Instruction::BitCast: 7177 if (I->getType()->isPointerTy()) 7178 return 0; 7179 LLVM_FALLTHROUGH; 7180 case Instruction::ZExt: 7181 case Instruction::SExt: 7182 case Instruction::FPToUI: 7183 case Instruction::FPToSI: 7184 case Instruction::FPExt: 7185 case Instruction::PtrToInt: 7186 case Instruction::IntToPtr: 7187 case Instruction::SIToFP: 7188 case Instruction::UIToFP: 7189 case Instruction::Trunc: 7190 case Instruction::FPTrunc: { 7191 // Computes the CastContextHint from a Load/Store instruction. 7192 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7193 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7194 "Expected a load or a store!"); 7195 7196 if (VF.isScalar() || !TheLoop->contains(I)) 7197 return TTI::CastContextHint::Normal; 7198 7199 switch (getWideningDecision(I, VF)) { 7200 case LoopVectorizationCostModel::CM_GatherScatter: 7201 return TTI::CastContextHint::GatherScatter; 7202 case LoopVectorizationCostModel::CM_Interleave: 7203 return TTI::CastContextHint::Interleave; 7204 case LoopVectorizationCostModel::CM_Scalarize: 7205 case LoopVectorizationCostModel::CM_Widen: 7206 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7207 : TTI::CastContextHint::Normal; 7208 case LoopVectorizationCostModel::CM_Widen_Reverse: 7209 return TTI::CastContextHint::Reversed; 7210 case LoopVectorizationCostModel::CM_Unknown: 7211 llvm_unreachable("Instr did not go through cost modelling?"); 7212 } 7213 7214 llvm_unreachable("Unhandled case!"); 7215 }; 7216 7217 unsigned Opcode = I->getOpcode(); 7218 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7219 // For Trunc, the context is the only user, which must be a StoreInst. 7220 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7221 if (I->hasOneUse()) 7222 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7223 CCH = ComputeCCH(Store); 7224 } 7225 // For Z/Sext, the context is the operand, which must be a LoadInst. 7226 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7227 Opcode == Instruction::FPExt) { 7228 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7229 CCH = ComputeCCH(Load); 7230 } 7231 7232 // We optimize the truncation of induction variables having constant 7233 // integer steps. The cost of these truncations is the same as the scalar 7234 // operation. 7235 if (isOptimizableIVTruncate(I, VF)) { 7236 auto *Trunc = cast<TruncInst>(I); 7237 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7238 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7239 } 7240 7241 // Detect reduction patterns 7242 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7243 return *RedCost; 7244 7245 Type *SrcScalarTy = I->getOperand(0)->getType(); 7246 Type *SrcVecTy = 7247 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7248 if (canTruncateToMinimalBitwidth(I, VF)) { 7249 // This cast is going to be shrunk. This may remove the cast or it might 7250 // turn it into slightly different cast. For example, if MinBW == 16, 7251 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7252 // 7253 // Calculate the modified src and dest types. 7254 Type *MinVecTy = VectorTy; 7255 if (Opcode == Instruction::Trunc) { 7256 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7257 VectorTy = 7258 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7259 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7260 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7261 VectorTy = 7262 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7263 } 7264 } 7265 7266 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7267 } 7268 case Instruction::Call: { 7269 if (RecurrenceDescriptor::isFMulAddIntrinsic(I)) 7270 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7271 return *RedCost; 7272 bool NeedToScalarize; 7273 CallInst *CI = cast<CallInst>(I); 7274 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7275 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7276 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7277 return std::min(CallCost, IntrinsicCost); 7278 } 7279 return CallCost; 7280 } 7281 case Instruction::ExtractValue: 7282 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7283 case Instruction::Alloca: 7284 // We cannot easily widen alloca to a scalable alloca, as 7285 // the result would need to be a vector of pointers. 7286 if (VF.isScalable()) 7287 return InstructionCost::getInvalid(); 7288 LLVM_FALLTHROUGH; 7289 default: 7290 // This opcode is unknown. Assume that it is the same as 'mul'. 7291 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7292 } // end of switch. 7293 } 7294 7295 char LoopVectorize::ID = 0; 7296 7297 static const char lv_name[] = "Loop Vectorization"; 7298 7299 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7300 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7301 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7302 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7303 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7304 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7305 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7306 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7307 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7308 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7309 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7310 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7311 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7312 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7313 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7314 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7315 7316 namespace llvm { 7317 7318 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7319 7320 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7321 bool VectorizeOnlyWhenForced) { 7322 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7323 } 7324 7325 } // end namespace llvm 7326 7327 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7328 // Check if the pointer operand of a load or store instruction is 7329 // consecutive. 7330 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7331 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 7332 return false; 7333 } 7334 7335 void LoopVectorizationCostModel::collectValuesToIgnore() { 7336 // Ignore ephemeral values. 7337 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7338 7339 // Ignore type-promoting instructions we identified during reduction 7340 // detection. 7341 for (auto &Reduction : Legal->getReductionVars()) { 7342 const RecurrenceDescriptor &RedDes = Reduction.second; 7343 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7344 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7345 } 7346 // Ignore type-casting instructions we identified during induction 7347 // detection. 7348 for (auto &Induction : Legal->getInductionVars()) { 7349 const InductionDescriptor &IndDes = Induction.second; 7350 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7351 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7352 } 7353 } 7354 7355 void LoopVectorizationCostModel::collectInLoopReductions() { 7356 for (auto &Reduction : Legal->getReductionVars()) { 7357 PHINode *Phi = Reduction.first; 7358 const RecurrenceDescriptor &RdxDesc = Reduction.second; 7359 7360 // We don't collect reductions that are type promoted (yet). 7361 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7362 continue; 7363 7364 // If the target would prefer this reduction to happen "in-loop", then we 7365 // want to record it as such. 7366 unsigned Opcode = RdxDesc.getOpcode(); 7367 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7368 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7369 TargetTransformInfo::ReductionFlags())) 7370 continue; 7371 7372 // Check that we can correctly put the reductions into the loop, by 7373 // finding the chain of operations that leads from the phi to the loop 7374 // exit value. 7375 SmallVector<Instruction *, 4> ReductionOperations = 7376 RdxDesc.getReductionOpChain(Phi, TheLoop); 7377 bool InLoop = !ReductionOperations.empty(); 7378 if (InLoop) { 7379 InLoopReductionChains[Phi] = ReductionOperations; 7380 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7381 Instruction *LastChain = Phi; 7382 for (auto *I : ReductionOperations) { 7383 InLoopReductionImmediateChains[I] = LastChain; 7384 LastChain = I; 7385 } 7386 } 7387 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7388 << " reduction for phi: " << *Phi << "\n"); 7389 } 7390 } 7391 7392 // TODO: we could return a pair of values that specify the max VF and 7393 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7394 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7395 // doesn't have a cost model that can choose which plan to execute if 7396 // more than one is generated. 7397 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7398 LoopVectorizationCostModel &CM) { 7399 unsigned WidestType; 7400 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7401 return WidestVectorRegBits / WidestType; 7402 } 7403 7404 VectorizationFactor 7405 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7406 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7407 ElementCount VF = UserVF; 7408 // Outer loop handling: They may require CFG and instruction level 7409 // transformations before even evaluating whether vectorization is profitable. 7410 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7411 // the vectorization pipeline. 7412 if (!OrigLoop->isInnermost()) { 7413 // If the user doesn't provide a vectorization factor, determine a 7414 // reasonable one. 7415 if (UserVF.isZero()) { 7416 VF = ElementCount::getFixed(determineVPlanVF( 7417 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7418 .getFixedSize(), 7419 CM)); 7420 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7421 7422 // Make sure we have a VF > 1 for stress testing. 7423 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7424 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7425 << "overriding computed VF.\n"); 7426 VF = ElementCount::getFixed(4); 7427 } 7428 } 7429 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7430 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7431 "VF needs to be a power of two"); 7432 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7433 << "VF " << VF << " to build VPlans.\n"); 7434 buildVPlans(VF, VF); 7435 7436 // For VPlan build stress testing, we bail out after VPlan construction. 7437 if (VPlanBuildStressTest) 7438 return VectorizationFactor::Disabled(); 7439 7440 return {VF, 0 /*Cost*/}; 7441 } 7442 7443 LLVM_DEBUG( 7444 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7445 "VPlan-native path.\n"); 7446 return VectorizationFactor::Disabled(); 7447 } 7448 7449 Optional<VectorizationFactor> 7450 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7451 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7452 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 7453 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 7454 return None; 7455 7456 // Invalidate interleave groups if all blocks of loop will be predicated. 7457 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 7458 !useMaskedInterleavedAccesses(*TTI)) { 7459 LLVM_DEBUG( 7460 dbgs() 7461 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7462 "which requires masked-interleaved support.\n"); 7463 if (CM.InterleaveInfo.invalidateGroups()) 7464 // Invalidating interleave groups also requires invalidating all decisions 7465 // based on them, which includes widening decisions and uniform and scalar 7466 // values. 7467 CM.invalidateCostModelingDecisions(); 7468 } 7469 7470 ElementCount MaxUserVF = 7471 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 7472 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 7473 if (!UserVF.isZero() && UserVFIsLegal) { 7474 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 7475 "VF needs to be a power of two"); 7476 // Collect the instructions (and their associated costs) that will be more 7477 // profitable to scalarize. 7478 if (CM.selectUserVectorizationFactor(UserVF)) { 7479 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7480 CM.collectInLoopReductions(); 7481 buildVPlansWithVPRecipes(UserVF, UserVF); 7482 LLVM_DEBUG(printPlans(dbgs())); 7483 return {{UserVF, 0}}; 7484 } else 7485 reportVectorizationInfo("UserVF ignored because of invalid costs.", 7486 "InvalidCost", ORE, OrigLoop); 7487 } 7488 7489 // Populate the set of Vectorization Factor Candidates. 7490 ElementCountSet VFCandidates; 7491 for (auto VF = ElementCount::getFixed(1); 7492 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 7493 VFCandidates.insert(VF); 7494 for (auto VF = ElementCount::getScalable(1); 7495 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 7496 VFCandidates.insert(VF); 7497 7498 for (const auto &VF : VFCandidates) { 7499 // Collect Uniform and Scalar instructions after vectorization with VF. 7500 CM.collectUniformsAndScalars(VF); 7501 7502 // Collect the instructions (and their associated costs) that will be more 7503 // profitable to scalarize. 7504 if (VF.isVector()) 7505 CM.collectInstsToScalarize(VF); 7506 } 7507 7508 CM.collectInLoopReductions(); 7509 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 7510 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 7511 7512 LLVM_DEBUG(printPlans(dbgs())); 7513 if (!MaxFactors.hasVector()) 7514 return VectorizationFactor::Disabled(); 7515 7516 // Select the optimal vectorization factor. 7517 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 7518 7519 // Check if it is profitable to vectorize with runtime checks. 7520 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 7521 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 7522 bool PragmaThresholdReached = 7523 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 7524 bool ThresholdReached = 7525 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 7526 if ((ThresholdReached && !Hints.allowReordering()) || 7527 PragmaThresholdReached) { 7528 ORE->emit([&]() { 7529 return OptimizationRemarkAnalysisAliasing( 7530 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 7531 OrigLoop->getHeader()) 7532 << "loop not vectorized: cannot prove it is safe to reorder " 7533 "memory operations"; 7534 }); 7535 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 7536 Hints.emitRemarkWithHints(); 7537 return VectorizationFactor::Disabled(); 7538 } 7539 } 7540 return SelectedVF; 7541 } 7542 7543 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 7544 assert(count_if(VPlans, 7545 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 7546 1 && 7547 "Best VF has not a single VPlan."); 7548 7549 for (const VPlanPtr &Plan : VPlans) { 7550 if (Plan->hasVF(VF)) 7551 return *Plan.get(); 7552 } 7553 llvm_unreachable("No plan found!"); 7554 } 7555 7556 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7557 SmallVector<Metadata *, 4> MDs; 7558 // Reserve first location for self reference to the LoopID metadata node. 7559 MDs.push_back(nullptr); 7560 bool IsUnrollMetadata = false; 7561 MDNode *LoopID = L->getLoopID(); 7562 if (LoopID) { 7563 // First find existing loop unrolling disable metadata. 7564 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7565 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7566 if (MD) { 7567 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7568 IsUnrollMetadata = 7569 S && S->getString().startswith("llvm.loop.unroll.disable"); 7570 } 7571 MDs.push_back(LoopID->getOperand(i)); 7572 } 7573 } 7574 7575 if (!IsUnrollMetadata) { 7576 // Add runtime unroll disable metadata. 7577 LLVMContext &Context = L->getHeader()->getContext(); 7578 SmallVector<Metadata *, 1> DisableOperands; 7579 DisableOperands.push_back( 7580 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7581 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7582 MDs.push_back(DisableNode); 7583 MDNode *NewLoopID = MDNode::get(Context, MDs); 7584 // Set operand 0 to refer to the loop id itself. 7585 NewLoopID->replaceOperandWith(0, NewLoopID); 7586 L->setLoopID(NewLoopID); 7587 } 7588 } 7589 7590 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 7591 VPlan &BestVPlan, 7592 InnerLoopVectorizer &ILV, 7593 DominatorTree *DT) { 7594 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 7595 << '\n'); 7596 7597 // Perform the actual loop transformation. 7598 7599 // 1. Set up the skeleton for vectorization, including vector pre-header and 7600 // middle block. The vector loop is created during VPlan execution. 7601 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 7602 Value *CanonicalIVStartValue; 7603 std::tie(State.CFG.PrevBB, CanonicalIVStartValue) = 7604 ILV.createVectorizedLoopSkeleton(); 7605 ILV.collectPoisonGeneratingRecipes(State); 7606 7607 ILV.printDebugTracesAtStart(); 7608 7609 //===------------------------------------------------===// 7610 // 7611 // Notice: any optimization or new instruction that go 7612 // into the code below should also be implemented in 7613 // the cost-model. 7614 // 7615 //===------------------------------------------------===// 7616 7617 // 2. Copy and widen instructions from the old loop into the new loop. 7618 BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr), 7619 ILV.getOrCreateVectorTripCount(nullptr), 7620 CanonicalIVStartValue, State); 7621 BestVPlan.execute(&State); 7622 7623 // Keep all loop hints from the original loop on the vector loop (we'll 7624 // replace the vectorizer-specific hints below). 7625 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7626 7627 Optional<MDNode *> VectorizedLoopID = 7628 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7629 LLVMLoopVectorizeFollowupVectorized}); 7630 7631 Loop *L = LI->getLoopFor(State.CFG.PrevBB); 7632 if (VectorizedLoopID.hasValue()) 7633 L->setLoopID(VectorizedLoopID.getValue()); 7634 else { 7635 // Keep all loop hints from the original loop on the vector loop (we'll 7636 // replace the vectorizer-specific hints below). 7637 if (MDNode *LID = OrigLoop->getLoopID()) 7638 L->setLoopID(LID); 7639 7640 LoopVectorizeHints Hints(L, true, *ORE); 7641 Hints.setAlreadyVectorized(); 7642 } 7643 // Disable runtime unrolling when vectorizing the epilogue loop. 7644 if (CanonicalIVStartValue) 7645 AddRuntimeUnrollDisableMetaData(L); 7646 7647 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7648 // predication, updating analyses. 7649 ILV.fixVectorizedLoop(State); 7650 7651 ILV.printDebugTracesAtEnd(); 7652 } 7653 7654 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 7655 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 7656 for (const auto &Plan : VPlans) 7657 if (PrintVPlansInDotFormat) 7658 Plan->printDOT(O); 7659 else 7660 Plan->print(O); 7661 } 7662 #endif 7663 7664 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7665 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7666 7667 // We create new control-flow for the vectorized loop, so the original exit 7668 // conditions will be dead after vectorization if it's only used by the 7669 // terminator 7670 SmallVector<BasicBlock*> ExitingBlocks; 7671 OrigLoop->getExitingBlocks(ExitingBlocks); 7672 for (auto *BB : ExitingBlocks) { 7673 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 7674 if (!Cmp || !Cmp->hasOneUse()) 7675 continue; 7676 7677 // TODO: we should introduce a getUniqueExitingBlocks on Loop 7678 if (!DeadInstructions.insert(Cmp).second) 7679 continue; 7680 7681 // The operands of the icmp is often a dead trunc, used by IndUpdate. 7682 // TODO: can recurse through operands in general 7683 for (Value *Op : Cmp->operands()) { 7684 if (isa<TruncInst>(Op) && Op->hasOneUse()) 7685 DeadInstructions.insert(cast<Instruction>(Op)); 7686 } 7687 } 7688 7689 // We create new "steps" for induction variable updates to which the original 7690 // induction variables map. An original update instruction will be dead if 7691 // all its users except the induction variable are dead. 7692 auto *Latch = OrigLoop->getLoopLatch(); 7693 for (auto &Induction : Legal->getInductionVars()) { 7694 PHINode *Ind = Induction.first; 7695 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7696 7697 // If the tail is to be folded by masking, the primary induction variable, 7698 // if exists, isn't dead: it will be used for masking. Don't kill it. 7699 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 7700 continue; 7701 7702 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7703 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7704 })) 7705 DeadInstructions.insert(IndUpdate); 7706 } 7707 } 7708 7709 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7710 7711 //===--------------------------------------------------------------------===// 7712 // EpilogueVectorizerMainLoop 7713 //===--------------------------------------------------------------------===// 7714 7715 /// This function is partially responsible for generating the control flow 7716 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7717 std::pair<BasicBlock *, Value *> 7718 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 7719 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7720 7721 // Workaround! Compute the trip count of the original loop and cache it 7722 // before we start modifying the CFG. This code has a systemic problem 7723 // wherein it tries to run analysis over partially constructed IR; this is 7724 // wrong, and not simply for SCEV. The trip count of the original loop 7725 // simply happens to be prone to hitting this in practice. In theory, we 7726 // can hit the same issue for any SCEV, or ValueTracking query done during 7727 // mutation. See PR49900. 7728 getOrCreateTripCount(OrigLoop->getLoopPreheader()); 7729 createVectorLoopSkeleton(""); 7730 7731 // Generate the code to check the minimum iteration count of the vector 7732 // epilogue (see below). 7733 EPI.EpilogueIterationCountCheck = 7734 emitMinimumIterationCountCheck(LoopScalarPreHeader, true); 7735 EPI.EpilogueIterationCountCheck->setName("iter.check"); 7736 7737 // Generate the code to check any assumptions that we've made for SCEV 7738 // expressions. 7739 EPI.SCEVSafetyCheck = emitSCEVChecks(LoopScalarPreHeader); 7740 7741 // Generate the code that checks at runtime if arrays overlap. We put the 7742 // checks into a separate block to make the more common case of few elements 7743 // faster. 7744 EPI.MemSafetyCheck = emitMemRuntimeChecks(LoopScalarPreHeader); 7745 7746 // Generate the iteration count check for the main loop, *after* the check 7747 // for the epilogue loop, so that the path-length is shorter for the case 7748 // that goes directly through the vector epilogue. The longer-path length for 7749 // the main loop is compensated for, by the gain from vectorizing the larger 7750 // trip count. Note: the branch will get updated later on when we vectorize 7751 // the epilogue. 7752 EPI.MainLoopIterationCountCheck = 7753 emitMinimumIterationCountCheck(LoopScalarPreHeader, false); 7754 7755 // Generate the induction variable. 7756 Value *CountRoundDown = getOrCreateVectorTripCount(LoopVectorPreHeader); 7757 EPI.VectorTripCount = CountRoundDown; 7758 7759 // Skip induction resume value creation here because they will be created in 7760 // the second pass. If we created them here, they wouldn't be used anyway, 7761 // because the vplan in the second pass still contains the inductions from the 7762 // original loop. 7763 7764 return {completeLoopSkeleton(OrigLoopID), nullptr}; 7765 } 7766 7767 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 7768 LLVM_DEBUG({ 7769 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 7770 << "Main Loop VF:" << EPI.MainLoopVF 7771 << ", Main Loop UF:" << EPI.MainLoopUF 7772 << ", Epilogue Loop VF:" << EPI.EpilogueVF 7773 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7774 }); 7775 } 7776 7777 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 7778 DEBUG_WITH_TYPE(VerboseDebug, { 7779 dbgs() << "intermediate fn:\n" 7780 << *OrigLoop->getHeader()->getParent() << "\n"; 7781 }); 7782 } 7783 7784 BasicBlock * 7785 EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(BasicBlock *Bypass, 7786 bool ForEpilogue) { 7787 assert(Bypass && "Expected valid bypass basic block."); 7788 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 7789 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 7790 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 7791 // Reuse existing vector loop preheader for TC checks. 7792 // Note that new preheader block is generated for vector loop. 7793 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 7794 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 7795 7796 // Generate code to check if the loop's trip count is less than VF * UF of the 7797 // main vector loop. 7798 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 7799 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7800 7801 Value *CheckMinIters = Builder.CreateICmp( 7802 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 7803 "min.iters.check"); 7804 7805 if (!ForEpilogue) 7806 TCCheckBlock->setName("vector.main.loop.iter.check"); 7807 7808 // Create new preheader for vector loop. 7809 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 7810 DT, LI, nullptr, "vector.ph"); 7811 7812 if (ForEpilogue) { 7813 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 7814 DT->getNode(Bypass)->getIDom()) && 7815 "TC check is expected to dominate Bypass"); 7816 7817 // Update dominator for Bypass & LoopExit. 7818 DT->changeImmediateDominator(Bypass, TCCheckBlock); 7819 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7820 // For loops with multiple exits, there's no edge from the middle block 7821 // to exit blocks (as the epilogue must run) and thus no need to update 7822 // the immediate dominator of the exit blocks. 7823 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 7824 7825 LoopBypassBlocks.push_back(TCCheckBlock); 7826 7827 // Save the trip count so we don't have to regenerate it in the 7828 // vec.epilog.iter.check. This is safe to do because the trip count 7829 // generated here dominates the vector epilog iter check. 7830 EPI.TripCount = Count; 7831 } 7832 7833 ReplaceInstWithInst( 7834 TCCheckBlock->getTerminator(), 7835 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7836 7837 return TCCheckBlock; 7838 } 7839 7840 //===--------------------------------------------------------------------===// 7841 // EpilogueVectorizerEpilogueLoop 7842 //===--------------------------------------------------------------------===// 7843 7844 /// This function is partially responsible for generating the control flow 7845 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7846 std::pair<BasicBlock *, Value *> 7847 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 7848 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7849 createVectorLoopSkeleton("vec.epilog."); 7850 7851 // Now, compare the remaining count and if there aren't enough iterations to 7852 // execute the vectorized epilogue skip to the scalar part. 7853 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 7854 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 7855 LoopVectorPreHeader = 7856 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 7857 LI, nullptr, "vec.epilog.ph"); 7858 emitMinimumVectorEpilogueIterCountCheck(LoopScalarPreHeader, 7859 VecEpilogueIterationCountCheck); 7860 7861 // Adjust the control flow taking the state info from the main loop 7862 // vectorization into account. 7863 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 7864 "expected this to be saved from the previous pass."); 7865 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 7866 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 7867 7868 DT->changeImmediateDominator(LoopVectorPreHeader, 7869 EPI.MainLoopIterationCountCheck); 7870 7871 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 7872 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7873 7874 if (EPI.SCEVSafetyCheck) 7875 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 7876 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7877 if (EPI.MemSafetyCheck) 7878 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 7879 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7880 7881 DT->changeImmediateDominator( 7882 VecEpilogueIterationCountCheck, 7883 VecEpilogueIterationCountCheck->getSinglePredecessor()); 7884 7885 DT->changeImmediateDominator(LoopScalarPreHeader, 7886 EPI.EpilogueIterationCountCheck); 7887 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7888 // If there is an epilogue which must run, there's no edge from the 7889 // middle block to exit blocks and thus no need to update the immediate 7890 // dominator of the exit blocks. 7891 DT->changeImmediateDominator(LoopExitBlock, 7892 EPI.EpilogueIterationCountCheck); 7893 7894 // Keep track of bypass blocks, as they feed start values to the induction 7895 // phis in the scalar loop preheader. 7896 if (EPI.SCEVSafetyCheck) 7897 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 7898 if (EPI.MemSafetyCheck) 7899 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 7900 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 7901 7902 // The vec.epilog.iter.check block may contain Phi nodes from reductions which 7903 // merge control-flow from the latch block and the middle block. Update the 7904 // incoming values here and move the Phi into the preheader. 7905 SmallVector<PHINode *, 4> PhisInBlock; 7906 for (PHINode &Phi : VecEpilogueIterationCountCheck->phis()) 7907 PhisInBlock.push_back(&Phi); 7908 7909 for (PHINode *Phi : PhisInBlock) { 7910 Phi->replaceIncomingBlockWith( 7911 VecEpilogueIterationCountCheck->getSinglePredecessor(), 7912 VecEpilogueIterationCountCheck); 7913 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck); 7914 if (EPI.SCEVSafetyCheck) 7915 Phi->removeIncomingValue(EPI.SCEVSafetyCheck); 7916 if (EPI.MemSafetyCheck) 7917 Phi->removeIncomingValue(EPI.MemSafetyCheck); 7918 Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI()); 7919 } 7920 7921 // Generate a resume induction for the vector epilogue and put it in the 7922 // vector epilogue preheader 7923 Type *IdxTy = Legal->getWidestInductionType(); 7924 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 7925 LoopVectorPreHeader->getFirstNonPHI()); 7926 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 7927 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 7928 EPI.MainLoopIterationCountCheck); 7929 7930 // Generate induction resume values. These variables save the new starting 7931 // indexes for the scalar loop. They are used to test if there are any tail 7932 // iterations left once the vector loop has completed. 7933 // Note that when the vectorized epilogue is skipped due to iteration count 7934 // check, then the resume value for the induction variable comes from 7935 // the trip count of the main vector loop, hence passing the AdditionalBypass 7936 // argument. 7937 createInductionResumeValues({VecEpilogueIterationCountCheck, 7938 EPI.VectorTripCount} /* AdditionalBypass */); 7939 7940 return {completeLoopSkeleton(OrigLoopID), EPResumeVal}; 7941 } 7942 7943 BasicBlock * 7944 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 7945 BasicBlock *Bypass, BasicBlock *Insert) { 7946 7947 assert(EPI.TripCount && 7948 "Expected trip count to have been safed in the first pass."); 7949 assert( 7950 (!isa<Instruction>(EPI.TripCount) || 7951 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 7952 "saved trip count does not dominate insertion point."); 7953 Value *TC = EPI.TripCount; 7954 IRBuilder<> Builder(Insert->getTerminator()); 7955 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 7956 7957 // Generate code to check if the loop's trip count is less than VF * UF of the 7958 // vector epilogue loop. 7959 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 7960 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7961 7962 Value *CheckMinIters = 7963 Builder.CreateICmp(P, Count, 7964 createStepForVF(Builder, Count->getType(), 7965 EPI.EpilogueVF, EPI.EpilogueUF), 7966 "min.epilog.iters.check"); 7967 7968 ReplaceInstWithInst( 7969 Insert->getTerminator(), 7970 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7971 7972 LoopBypassBlocks.push_back(Insert); 7973 return Insert; 7974 } 7975 7976 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 7977 LLVM_DEBUG({ 7978 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 7979 << "Epilogue Loop VF:" << EPI.EpilogueVF 7980 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7981 }); 7982 } 7983 7984 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 7985 DEBUG_WITH_TYPE(VerboseDebug, { 7986 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n"; 7987 }); 7988 } 7989 7990 bool LoopVectorizationPlanner::getDecisionAndClampRange( 7991 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 7992 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 7993 bool PredicateAtRangeStart = Predicate(Range.Start); 7994 7995 for (ElementCount TmpVF = Range.Start * 2; 7996 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 7997 if (Predicate(TmpVF) != PredicateAtRangeStart) { 7998 Range.End = TmpVF; 7999 break; 8000 } 8001 8002 return PredicateAtRangeStart; 8003 } 8004 8005 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8006 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8007 /// of VF's starting at a given VF and extending it as much as possible. Each 8008 /// vectorization decision can potentially shorten this sub-range during 8009 /// buildVPlan(). 8010 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8011 ElementCount MaxVF) { 8012 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8013 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8014 VFRange SubRange = {VF, MaxVFPlusOne}; 8015 VPlans.push_back(buildVPlan(SubRange)); 8016 VF = SubRange.End; 8017 } 8018 } 8019 8020 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8021 VPlanPtr &Plan) { 8022 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8023 8024 // Look for cached value. 8025 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8026 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8027 if (ECEntryIt != EdgeMaskCache.end()) 8028 return ECEntryIt->second; 8029 8030 VPValue *SrcMask = createBlockInMask(Src, Plan); 8031 8032 // The terminator has to be a branch inst! 8033 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8034 assert(BI && "Unexpected terminator found"); 8035 8036 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8037 return EdgeMaskCache[Edge] = SrcMask; 8038 8039 // If source is an exiting block, we know the exit edge is dynamically dead 8040 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8041 // adding uses of an otherwise potentially dead instruction. 8042 if (OrigLoop->isLoopExiting(Src)) 8043 return EdgeMaskCache[Edge] = SrcMask; 8044 8045 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8046 assert(EdgeMask && "No Edge Mask found for condition"); 8047 8048 if (BI->getSuccessor(0) != Dst) 8049 EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc()); 8050 8051 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8052 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8053 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8054 // The select version does not introduce new UB if SrcMask is false and 8055 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8056 VPValue *False = Plan->getOrAddVPValue( 8057 ConstantInt::getFalse(BI->getCondition()->getType())); 8058 EdgeMask = 8059 Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc()); 8060 } 8061 8062 return EdgeMaskCache[Edge] = EdgeMask; 8063 } 8064 8065 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8066 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8067 8068 // Look for cached value. 8069 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8070 if (BCEntryIt != BlockMaskCache.end()) 8071 return BCEntryIt->second; 8072 8073 // All-one mask is modelled as no-mask following the convention for masked 8074 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8075 VPValue *BlockMask = nullptr; 8076 8077 if (OrigLoop->getHeader() == BB) { 8078 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8079 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8080 8081 // Introduce the early-exit compare IV <= BTC to form header block mask. 8082 // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by 8083 // constructing the desired canonical IV in the header block as its first 8084 // non-phi instructions. 8085 assert(CM.foldTailByMasking() && "must fold the tail"); 8086 VPBasicBlock *HeaderVPBB = 8087 Plan->getVectorLoopRegion()->getEntryBasicBlock(); 8088 auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi(); 8089 auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV()); 8090 HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi()); 8091 8092 VPBuilder::InsertPointGuard Guard(Builder); 8093 Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint); 8094 if (CM.TTI.emitGetActiveLaneMask()) { 8095 VPValue *TC = Plan->getOrCreateTripCount(); 8096 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC}); 8097 } else { 8098 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8099 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8100 } 8101 return BlockMaskCache[BB] = BlockMask; 8102 } 8103 8104 // This is the block mask. We OR all incoming edges. 8105 for (auto *Predecessor : predecessors(BB)) { 8106 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8107 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8108 return BlockMaskCache[BB] = EdgeMask; 8109 8110 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8111 BlockMask = EdgeMask; 8112 continue; 8113 } 8114 8115 BlockMask = Builder.createOr(BlockMask, EdgeMask, {}); 8116 } 8117 8118 return BlockMaskCache[BB] = BlockMask; 8119 } 8120 8121 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8122 ArrayRef<VPValue *> Operands, 8123 VFRange &Range, 8124 VPlanPtr &Plan) { 8125 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8126 "Must be called with either a load or store"); 8127 8128 auto willWiden = [&](ElementCount VF) -> bool { 8129 if (VF.isScalar()) 8130 return false; 8131 LoopVectorizationCostModel::InstWidening Decision = 8132 CM.getWideningDecision(I, VF); 8133 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8134 "CM decision should be taken at this point."); 8135 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8136 return true; 8137 if (CM.isScalarAfterVectorization(I, VF) || 8138 CM.isProfitableToScalarize(I, VF)) 8139 return false; 8140 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8141 }; 8142 8143 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8144 return nullptr; 8145 8146 VPValue *Mask = nullptr; 8147 if (Legal->isMaskRequired(I)) 8148 Mask = createBlockInMask(I->getParent(), Plan); 8149 8150 // Determine if the pointer operand of the access is either consecutive or 8151 // reverse consecutive. 8152 LoopVectorizationCostModel::InstWidening Decision = 8153 CM.getWideningDecision(I, Range.Start); 8154 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8155 bool Consecutive = 8156 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8157 8158 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8159 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8160 Consecutive, Reverse); 8161 8162 StoreInst *Store = cast<StoreInst>(I); 8163 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8164 Mask, Consecutive, Reverse); 8165 } 8166 8167 /// Creates a VPWidenIntOrFpInductionRecpipe for \p Phi. If needed, it will also 8168 /// insert a recipe to expand the step for the induction recipe. 8169 static VPWidenIntOrFpInductionRecipe *createWidenInductionRecipes( 8170 PHINode *Phi, Instruction *PhiOrTrunc, VPValue *Start, 8171 const InductionDescriptor &IndDesc, LoopVectorizationCostModel &CM, 8172 VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop, VFRange &Range) { 8173 // Returns true if an instruction \p I should be scalarized instead of 8174 // vectorized for the chosen vectorization factor. 8175 auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) { 8176 return CM.isScalarAfterVectorization(I, VF) || 8177 CM.isProfitableToScalarize(I, VF); 8178 }; 8179 8180 bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange( 8181 [&](ElementCount VF) { 8182 // Returns true if we should generate a scalar version of \p IV. 8183 if (ShouldScalarizeInstruction(PhiOrTrunc, VF)) 8184 return true; 8185 auto isScalarInst = [&](User *U) -> bool { 8186 auto *I = cast<Instruction>(U); 8187 return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF); 8188 }; 8189 return any_of(PhiOrTrunc->users(), isScalarInst); 8190 }, 8191 Range); 8192 bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange( 8193 [&](ElementCount VF) { 8194 return ShouldScalarizeInstruction(PhiOrTrunc, VF); 8195 }, 8196 Range); 8197 assert(IndDesc.getStartValue() == 8198 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader())); 8199 assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) && 8200 "step must be loop invariant"); 8201 8202 VPValue *Step = 8203 vputils::getOrCreateVPValueForSCEVExpr(Plan, IndDesc.getStep(), SE); 8204 if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) { 8205 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, IndDesc, TruncI, 8206 NeedsScalarIV, !NeedsScalarIVOnly); 8207 } 8208 assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here"); 8209 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, IndDesc, 8210 NeedsScalarIV, !NeedsScalarIVOnly); 8211 } 8212 8213 VPRecipeBase *VPRecipeBuilder::tryToOptimizeInductionPHI( 8214 PHINode *Phi, ArrayRef<VPValue *> Operands, VPlan &Plan, VFRange &Range) { 8215 8216 // Check if this is an integer or fp induction. If so, build the recipe that 8217 // produces its scalar and vector values. 8218 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) 8219 return createWidenInductionRecipes(Phi, Phi, Operands[0], *II, CM, Plan, 8220 *PSE.getSE(), *OrigLoop, Range); 8221 8222 // Check if this is pointer induction. If so, build the recipe for it. 8223 if (auto *II = Legal->getPointerInductionDescriptor(Phi)) 8224 return new VPWidenPointerInductionRecipe(Phi, Operands[0], *II, 8225 *PSE.getSE()); 8226 return nullptr; 8227 } 8228 8229 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8230 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, VPlan &Plan) { 8231 // Optimize the special case where the source is a constant integer 8232 // induction variable. Notice that we can only optimize the 'trunc' case 8233 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8234 // (c) other casts depend on pointer size. 8235 8236 // Determine whether \p K is a truncation based on an induction variable that 8237 // can be optimized. 8238 auto isOptimizableIVTruncate = 8239 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8240 return [=](ElementCount VF) -> bool { 8241 return CM.isOptimizableIVTruncate(K, VF); 8242 }; 8243 }; 8244 8245 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8246 isOptimizableIVTruncate(I), Range)) { 8247 8248 auto *Phi = cast<PHINode>(I->getOperand(0)); 8249 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi); 8250 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8251 return createWidenInductionRecipes(Phi, I, Start, II, CM, Plan, 8252 *PSE.getSE(), *OrigLoop, Range); 8253 } 8254 return nullptr; 8255 } 8256 8257 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8258 ArrayRef<VPValue *> Operands, 8259 VPlanPtr &Plan) { 8260 // If all incoming values are equal, the incoming VPValue can be used directly 8261 // instead of creating a new VPBlendRecipe. 8262 VPValue *FirstIncoming = Operands[0]; 8263 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8264 return FirstIncoming == Inc; 8265 })) { 8266 return Operands[0]; 8267 } 8268 8269 unsigned NumIncoming = Phi->getNumIncomingValues(); 8270 // For in-loop reductions, we do not need to create an additional select. 8271 VPValue *InLoopVal = nullptr; 8272 for (unsigned In = 0; In < NumIncoming; In++) { 8273 PHINode *PhiOp = 8274 dyn_cast_or_null<PHINode>(Operands[In]->getUnderlyingValue()); 8275 if (PhiOp && CM.isInLoopReduction(PhiOp)) { 8276 assert(!InLoopVal && "Found more than one in-loop reduction!"); 8277 InLoopVal = Operands[In]; 8278 } 8279 } 8280 8281 assert((!InLoopVal || NumIncoming == 2) && 8282 "Found an in-loop reduction for PHI with unexpected number of " 8283 "incoming values"); 8284 if (InLoopVal) 8285 return Operands[Operands[0] == InLoopVal ? 1 : 0]; 8286 8287 // We know that all PHIs in non-header blocks are converted into selects, so 8288 // we don't have to worry about the insertion order and we can just use the 8289 // builder. At this point we generate the predication tree. There may be 8290 // duplications since this is a simple recursive scan, but future 8291 // optimizations will clean it up. 8292 SmallVector<VPValue *, 2> OperandsWithMask; 8293 8294 for (unsigned In = 0; In < NumIncoming; In++) { 8295 VPValue *EdgeMask = 8296 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8297 assert((EdgeMask || NumIncoming == 1) && 8298 "Multiple predecessors with one having a full mask"); 8299 OperandsWithMask.push_back(Operands[In]); 8300 if (EdgeMask) 8301 OperandsWithMask.push_back(EdgeMask); 8302 } 8303 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8304 } 8305 8306 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8307 ArrayRef<VPValue *> Operands, 8308 VFRange &Range) const { 8309 8310 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8311 [this, CI](ElementCount VF) { 8312 return CM.isScalarWithPredication(CI, VF); 8313 }, 8314 Range); 8315 8316 if (IsPredicated) 8317 return nullptr; 8318 8319 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8320 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8321 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8322 ID == Intrinsic::pseudoprobe || 8323 ID == Intrinsic::experimental_noalias_scope_decl)) 8324 return nullptr; 8325 8326 auto willWiden = [&](ElementCount VF) -> bool { 8327 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8328 // The following case may be scalarized depending on the VF. 8329 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8330 // version of the instruction. 8331 // Is it beneficial to perform intrinsic call compared to lib call? 8332 bool NeedToScalarize = false; 8333 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8334 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8335 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8336 return UseVectorIntrinsic || !NeedToScalarize; 8337 }; 8338 8339 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8340 return nullptr; 8341 8342 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8343 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8344 } 8345 8346 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8347 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8348 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8349 // Instruction should be widened, unless it is scalar after vectorization, 8350 // scalarization is profitable or it is predicated. 8351 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8352 return CM.isScalarAfterVectorization(I, VF) || 8353 CM.isProfitableToScalarize(I, VF) || 8354 CM.isScalarWithPredication(I, VF); 8355 }; 8356 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8357 Range); 8358 } 8359 8360 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8361 ArrayRef<VPValue *> Operands) const { 8362 auto IsVectorizableOpcode = [](unsigned Opcode) { 8363 switch (Opcode) { 8364 case Instruction::Add: 8365 case Instruction::And: 8366 case Instruction::AShr: 8367 case Instruction::BitCast: 8368 case Instruction::FAdd: 8369 case Instruction::FCmp: 8370 case Instruction::FDiv: 8371 case Instruction::FMul: 8372 case Instruction::FNeg: 8373 case Instruction::FPExt: 8374 case Instruction::FPToSI: 8375 case Instruction::FPToUI: 8376 case Instruction::FPTrunc: 8377 case Instruction::FRem: 8378 case Instruction::FSub: 8379 case Instruction::ICmp: 8380 case Instruction::IntToPtr: 8381 case Instruction::LShr: 8382 case Instruction::Mul: 8383 case Instruction::Or: 8384 case Instruction::PtrToInt: 8385 case Instruction::SDiv: 8386 case Instruction::Select: 8387 case Instruction::SExt: 8388 case Instruction::Shl: 8389 case Instruction::SIToFP: 8390 case Instruction::SRem: 8391 case Instruction::Sub: 8392 case Instruction::Trunc: 8393 case Instruction::UDiv: 8394 case Instruction::UIToFP: 8395 case Instruction::URem: 8396 case Instruction::Xor: 8397 case Instruction::ZExt: 8398 return true; 8399 } 8400 return false; 8401 }; 8402 8403 if (!IsVectorizableOpcode(I->getOpcode())) 8404 return nullptr; 8405 8406 // Success: widen this instruction. 8407 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8408 } 8409 8410 void VPRecipeBuilder::fixHeaderPhis() { 8411 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8412 for (VPHeaderPHIRecipe *R : PhisToFix) { 8413 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8414 VPRecipeBase *IncR = 8415 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8416 R->addOperand(IncR->getVPSingleValue()); 8417 } 8418 } 8419 8420 VPBasicBlock *VPRecipeBuilder::handleReplication( 8421 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8422 VPlanPtr &Plan) { 8423 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8424 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8425 Range); 8426 8427 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8428 [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); }, 8429 Range); 8430 8431 // Even if the instruction is not marked as uniform, there are certain 8432 // intrinsic calls that can be effectively treated as such, so we check for 8433 // them here. Conservatively, we only do this for scalable vectors, since 8434 // for fixed-width VFs we can always fall back on full scalarization. 8435 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 8436 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 8437 case Intrinsic::assume: 8438 case Intrinsic::lifetime_start: 8439 case Intrinsic::lifetime_end: 8440 // For scalable vectors if one of the operands is variant then we still 8441 // want to mark as uniform, which will generate one instruction for just 8442 // the first lane of the vector. We can't scalarize the call in the same 8443 // way as for fixed-width vectors because we don't know how many lanes 8444 // there are. 8445 // 8446 // The reasons for doing it this way for scalable vectors are: 8447 // 1. For the assume intrinsic generating the instruction for the first 8448 // lane is still be better than not generating any at all. For 8449 // example, the input may be a splat across all lanes. 8450 // 2. For the lifetime start/end intrinsics the pointer operand only 8451 // does anything useful when the input comes from a stack object, 8452 // which suggests it should always be uniform. For non-stack objects 8453 // the effect is to poison the object, which still allows us to 8454 // remove the call. 8455 IsUniform = true; 8456 break; 8457 default: 8458 break; 8459 } 8460 } 8461 8462 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8463 IsUniform, IsPredicated); 8464 setRecipe(I, Recipe); 8465 Plan->addVPValue(I, Recipe); 8466 8467 // Find if I uses a predicated instruction. If so, it will use its scalar 8468 // value. Avoid hoisting the insert-element which packs the scalar value into 8469 // a vector value, as that happens iff all users use the vector value. 8470 for (VPValue *Op : Recipe->operands()) { 8471 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8472 if (!PredR) 8473 continue; 8474 auto *RepR = 8475 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8476 assert(RepR->isPredicated() && 8477 "expected Replicate recipe to be predicated"); 8478 RepR->setAlsoPack(false); 8479 } 8480 8481 // Finalize the recipe for Instr, first if it is not predicated. 8482 if (!IsPredicated) { 8483 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8484 VPBB->appendRecipe(Recipe); 8485 return VPBB; 8486 } 8487 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8488 8489 VPBlockBase *SingleSucc = VPBB->getSingleSuccessor(); 8490 assert(SingleSucc && "VPBB must have a single successor when handling " 8491 "predicated replication."); 8492 VPBlockUtils::disconnectBlocks(VPBB, SingleSucc); 8493 // Record predicated instructions for above packing optimizations. 8494 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8495 VPBlockUtils::insertBlockAfter(Region, VPBB); 8496 auto *RegSucc = new VPBasicBlock(); 8497 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8498 VPBlockUtils::connectBlocks(RegSucc, SingleSucc); 8499 return RegSucc; 8500 } 8501 8502 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8503 VPRecipeBase *PredRecipe, 8504 VPlanPtr &Plan) { 8505 // Instructions marked for predication are replicated and placed under an 8506 // if-then construct to prevent side-effects. 8507 8508 // Generate recipes to compute the block mask for this region. 8509 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8510 8511 // Build the triangular if-then region. 8512 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8513 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8514 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8515 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8516 auto *PHIRecipe = Instr->getType()->isVoidTy() 8517 ? nullptr 8518 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8519 if (PHIRecipe) { 8520 Plan->removeVPValueFor(Instr); 8521 Plan->addVPValue(Instr, PHIRecipe); 8522 } 8523 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8524 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8525 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8526 8527 // Note: first set Entry as region entry and then connect successors starting 8528 // from it in order, to propagate the "parent" of each VPBasicBlock. 8529 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8530 VPBlockUtils::connectBlocks(Pred, Exit); 8531 8532 return Region; 8533 } 8534 8535 VPRecipeOrVPValueTy 8536 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8537 ArrayRef<VPValue *> Operands, 8538 VFRange &Range, VPlanPtr &Plan) { 8539 // First, check for specific widening recipes that deal with calls, memory 8540 // operations, inductions and Phi nodes. 8541 if (auto *CI = dyn_cast<CallInst>(Instr)) 8542 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8543 8544 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8545 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8546 8547 VPRecipeBase *Recipe; 8548 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8549 if (Phi->getParent() != OrigLoop->getHeader()) 8550 return tryToBlend(Phi, Operands, Plan); 8551 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, *Plan, Range))) 8552 return toVPRecipeResult(Recipe); 8553 8554 VPHeaderPHIRecipe *PhiRecipe = nullptr; 8555 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 8556 VPValue *StartV = Operands[0]; 8557 if (Legal->isReductionVariable(Phi)) { 8558 const RecurrenceDescriptor &RdxDesc = 8559 Legal->getReductionVars().find(Phi)->second; 8560 assert(RdxDesc.getRecurrenceStartValue() == 8561 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8562 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 8563 CM.isInLoopReduction(Phi), 8564 CM.useOrderedReductions(RdxDesc)); 8565 } else { 8566 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 8567 } 8568 8569 // Record the incoming value from the backedge, so we can add the incoming 8570 // value from the backedge after all recipes have been created. 8571 recordRecipeOf(cast<Instruction>( 8572 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8573 PhisToFix.push_back(PhiRecipe); 8574 } else { 8575 // TODO: record backedge value for remaining pointer induction phis. 8576 assert(Phi->getType()->isPointerTy() && 8577 "only pointer phis should be handled here"); 8578 assert(Legal->getInductionVars().count(Phi) && 8579 "Not an induction variable"); 8580 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8581 VPValue *Start = Plan->getOrAddVPValue(II.getStartValue()); 8582 PhiRecipe = new VPWidenPHIRecipe(Phi, Start); 8583 } 8584 8585 return toVPRecipeResult(PhiRecipe); 8586 } 8587 8588 if (isa<TruncInst>(Instr) && 8589 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8590 Range, *Plan))) 8591 return toVPRecipeResult(Recipe); 8592 8593 if (!shouldWiden(Instr, Range)) 8594 return nullptr; 8595 8596 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8597 return toVPRecipeResult(new VPWidenGEPRecipe( 8598 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8599 8600 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8601 bool InvariantCond = 8602 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8603 return toVPRecipeResult(new VPWidenSelectRecipe( 8604 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8605 } 8606 8607 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8608 } 8609 8610 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8611 ElementCount MaxVF) { 8612 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8613 8614 // Collect instructions from the original loop that will become trivially dead 8615 // in the vectorized loop. We don't need to vectorize these instructions. For 8616 // example, original induction update instructions can become dead because we 8617 // separately emit induction "steps" when generating code for the new loop. 8618 // Similarly, we create a new latch condition when setting up the structure 8619 // of the new loop, so the old one can become dead. 8620 SmallPtrSet<Instruction *, 4> DeadInstructions; 8621 collectTriviallyDeadInstructions(DeadInstructions); 8622 8623 // Add assume instructions we need to drop to DeadInstructions, to prevent 8624 // them from being added to the VPlan. 8625 // TODO: We only need to drop assumes in blocks that get flattend. If the 8626 // control flow is preserved, we should keep them. 8627 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8628 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8629 8630 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8631 // Dead instructions do not need sinking. Remove them from SinkAfter. 8632 for (Instruction *I : DeadInstructions) 8633 SinkAfter.erase(I); 8634 8635 // Cannot sink instructions after dead instructions (there won't be any 8636 // recipes for them). Instead, find the first non-dead previous instruction. 8637 for (auto &P : Legal->getSinkAfter()) { 8638 Instruction *SinkTarget = P.second; 8639 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 8640 (void)FirstInst; 8641 while (DeadInstructions.contains(SinkTarget)) { 8642 assert( 8643 SinkTarget != FirstInst && 8644 "Must find a live instruction (at least the one feeding the " 8645 "first-order recurrence PHI) before reaching beginning of the block"); 8646 SinkTarget = SinkTarget->getPrevNode(); 8647 assert(SinkTarget != P.first && 8648 "sink source equals target, no sinking required"); 8649 } 8650 P.second = SinkTarget; 8651 } 8652 8653 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8654 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8655 VFRange SubRange = {VF, MaxVFPlusOne}; 8656 VPlans.push_back( 8657 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8658 VF = SubRange.End; 8659 } 8660 } 8661 8662 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a 8663 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a 8664 // BranchOnCount VPInstruction to the latch. 8665 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL, 8666 bool HasNUW, bool IsVPlanNative) { 8667 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8668 auto *StartV = Plan.getOrAddVPValue(StartIdx); 8669 8670 auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL); 8671 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion(); 8672 VPBasicBlock *Header = TopRegion->getEntryBasicBlock(); 8673 Header->insert(CanonicalIVPHI, Header->begin()); 8674 8675 auto *CanonicalIVIncrement = 8676 new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW 8677 : VPInstruction::CanonicalIVIncrement, 8678 {CanonicalIVPHI}, DL); 8679 CanonicalIVPHI->addOperand(CanonicalIVIncrement); 8680 8681 VPBasicBlock *EB = TopRegion->getExitBasicBlock(); 8682 if (IsVPlanNative) 8683 EB->setCondBit(nullptr); 8684 EB->appendRecipe(CanonicalIVIncrement); 8685 8686 auto *BranchOnCount = 8687 new VPInstruction(VPInstruction::BranchOnCount, 8688 {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL); 8689 EB->appendRecipe(BranchOnCount); 8690 } 8691 8692 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8693 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8694 const MapVector<Instruction *, Instruction *> &SinkAfter) { 8695 8696 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8697 8698 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8699 8700 // --------------------------------------------------------------------------- 8701 // Pre-construction: record ingredients whose recipes we'll need to further 8702 // process after constructing the initial VPlan. 8703 // --------------------------------------------------------------------------- 8704 8705 // Mark instructions we'll need to sink later and their targets as 8706 // ingredients whose recipe we'll need to record. 8707 for (auto &Entry : SinkAfter) { 8708 RecipeBuilder.recordRecipeOf(Entry.first); 8709 RecipeBuilder.recordRecipeOf(Entry.second); 8710 } 8711 for (auto &Reduction : CM.getInLoopReductionChains()) { 8712 PHINode *Phi = Reduction.first; 8713 RecurKind Kind = 8714 Legal->getReductionVars().find(Phi)->second.getRecurrenceKind(); 8715 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8716 8717 RecipeBuilder.recordRecipeOf(Phi); 8718 for (auto &R : ReductionOperations) { 8719 RecipeBuilder.recordRecipeOf(R); 8720 // For min/max reductions, where we have a pair of icmp/select, we also 8721 // need to record the ICmp recipe, so it can be removed later. 8722 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 8723 "Only min/max recurrences allowed for inloop reductions"); 8724 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8725 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8726 } 8727 } 8728 8729 // For each interleave group which is relevant for this (possibly trimmed) 8730 // Range, add it to the set of groups to be later applied to the VPlan and add 8731 // placeholders for its members' Recipes which we'll be replacing with a 8732 // single VPInterleaveRecipe. 8733 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8734 auto applyIG = [IG, this](ElementCount VF) -> bool { 8735 return (VF.isVector() && // Query is illegal for VF == 1 8736 CM.getWideningDecision(IG->getInsertPos(), VF) == 8737 LoopVectorizationCostModel::CM_Interleave); 8738 }; 8739 if (!getDecisionAndClampRange(applyIG, Range)) 8740 continue; 8741 InterleaveGroups.insert(IG); 8742 for (unsigned i = 0; i < IG->getFactor(); i++) 8743 if (Instruction *Member = IG->getMember(i)) 8744 RecipeBuilder.recordRecipeOf(Member); 8745 }; 8746 8747 // --------------------------------------------------------------------------- 8748 // Build initial VPlan: Scan the body of the loop in a topological order to 8749 // visit each basic block after having visited its predecessor basic blocks. 8750 // --------------------------------------------------------------------------- 8751 8752 // Create initial VPlan skeleton, starting with a block for the pre-header, 8753 // followed by a region for the vector loop. The skeleton vector loop region 8754 // contains a header and latch block. 8755 VPBasicBlock *Preheader = new VPBasicBlock("vector.ph"); 8756 auto Plan = std::make_unique<VPlan>(Preheader); 8757 8758 VPBasicBlock *HeaderVPBB = new VPBasicBlock("vector.body"); 8759 VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch"); 8760 VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB); 8761 auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop"); 8762 VPBlockUtils::insertBlockAfter(TopRegion, Preheader); 8763 8764 Instruction *DLInst = 8765 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 8766 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), 8767 DLInst ? DLInst->getDebugLoc() : DebugLoc(), 8768 !CM.foldTailByMasking(), false); 8769 8770 // Scan the body of the loop in a topological order to visit each basic block 8771 // after having visited its predecessor basic blocks. 8772 LoopBlocksDFS DFS(OrigLoop); 8773 DFS.perform(LI); 8774 8775 VPBasicBlock *VPBB = HeaderVPBB; 8776 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 8777 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8778 // Relevant instructions from basic block BB will be grouped into VPRecipe 8779 // ingredients and fill a new VPBasicBlock. 8780 unsigned VPBBsForBB = 0; 8781 if (VPBB != HeaderVPBB) 8782 VPBB->setName(BB->getName()); 8783 Builder.setInsertPoint(VPBB); 8784 8785 // Introduce each ingredient into VPlan. 8786 // TODO: Model and preserve debug instrinsics in VPlan. 8787 for (Instruction &I : BB->instructionsWithoutDebug()) { 8788 Instruction *Instr = &I; 8789 8790 // First filter out irrelevant instructions, to ensure no recipes are 8791 // built for them. 8792 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 8793 continue; 8794 8795 SmallVector<VPValue *, 4> Operands; 8796 auto *Phi = dyn_cast<PHINode>(Instr); 8797 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 8798 Operands.push_back(Plan->getOrAddVPValue( 8799 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 8800 } else { 8801 auto OpRange = Plan->mapToVPValues(Instr->operands()); 8802 Operands = {OpRange.begin(), OpRange.end()}; 8803 } 8804 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 8805 Instr, Operands, Range, Plan)) { 8806 // If Instr can be simplified to an existing VPValue, use it. 8807 if (RecipeOrValue.is<VPValue *>()) { 8808 auto *VPV = RecipeOrValue.get<VPValue *>(); 8809 Plan->addVPValue(Instr, VPV); 8810 // If the re-used value is a recipe, register the recipe for the 8811 // instruction, in case the recipe for Instr needs to be recorded. 8812 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 8813 RecipeBuilder.setRecipe(Instr, R); 8814 continue; 8815 } 8816 // Otherwise, add the new recipe. 8817 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 8818 for (auto *Def : Recipe->definedValues()) { 8819 auto *UV = Def->getUnderlyingValue(); 8820 Plan->addVPValue(UV, Def); 8821 } 8822 8823 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 8824 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 8825 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 8826 // of the header block. That can happen for truncates of induction 8827 // variables. Those recipes are moved to the phi section of the header 8828 // block after applying SinkAfter, which relies on the original 8829 // position of the trunc. 8830 assert(isa<TruncInst>(Instr)); 8831 InductionsToMove.push_back( 8832 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 8833 } 8834 RecipeBuilder.setRecipe(Instr, Recipe); 8835 VPBB->appendRecipe(Recipe); 8836 continue; 8837 } 8838 8839 // Otherwise, if all widening options failed, Instruction is to be 8840 // replicated. This may create a successor for VPBB. 8841 VPBasicBlock *NextVPBB = 8842 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 8843 if (NextVPBB != VPBB) { 8844 VPBB = NextVPBB; 8845 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8846 : ""); 8847 } 8848 } 8849 8850 VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB); 8851 VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor()); 8852 } 8853 8854 HeaderVPBB->setName("vector.body"); 8855 8856 // Fold the last, empty block into its predecessor. 8857 VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB); 8858 assert(VPBB && "expected to fold last (empty) block"); 8859 // After here, VPBB should not be used. 8860 VPBB = nullptr; 8861 8862 assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) && 8863 !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() && 8864 "entry block must be set to a VPRegionBlock having a non-empty entry " 8865 "VPBasicBlock"); 8866 RecipeBuilder.fixHeaderPhis(); 8867 8868 // --------------------------------------------------------------------------- 8869 // Transform initial VPlan: Apply previously taken decisions, in order, to 8870 // bring the VPlan to its final state. 8871 // --------------------------------------------------------------------------- 8872 8873 // Apply Sink-After legal constraints. 8874 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 8875 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 8876 if (Region && Region->isReplicator()) { 8877 assert(Region->getNumSuccessors() == 1 && 8878 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 8879 assert(R->getParent()->size() == 1 && 8880 "A recipe in an original replicator region must be the only " 8881 "recipe in its block"); 8882 return Region; 8883 } 8884 return nullptr; 8885 }; 8886 for (auto &Entry : SinkAfter) { 8887 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 8888 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 8889 8890 auto *TargetRegion = GetReplicateRegion(Target); 8891 auto *SinkRegion = GetReplicateRegion(Sink); 8892 if (!SinkRegion) { 8893 // If the sink source is not a replicate region, sink the recipe directly. 8894 if (TargetRegion) { 8895 // The target is in a replication region, make sure to move Sink to 8896 // the block after it, not into the replication region itself. 8897 VPBasicBlock *NextBlock = 8898 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 8899 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 8900 } else 8901 Sink->moveAfter(Target); 8902 continue; 8903 } 8904 8905 // The sink source is in a replicate region. Unhook the region from the CFG. 8906 auto *SinkPred = SinkRegion->getSinglePredecessor(); 8907 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 8908 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 8909 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 8910 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 8911 8912 if (TargetRegion) { 8913 // The target recipe is also in a replicate region, move the sink region 8914 // after the target region. 8915 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 8916 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 8917 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 8918 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 8919 } else { 8920 // The sink source is in a replicate region, we need to move the whole 8921 // replicate region, which should only contain a single recipe in the 8922 // main block. 8923 auto *SplitBlock = 8924 Target->getParent()->splitAt(std::next(Target->getIterator())); 8925 8926 auto *SplitPred = SplitBlock->getSinglePredecessor(); 8927 8928 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 8929 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 8930 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 8931 } 8932 } 8933 8934 VPlanTransforms::removeRedundantCanonicalIVs(*Plan); 8935 VPlanTransforms::removeRedundantInductionCasts(*Plan); 8936 8937 // Now that sink-after is done, move induction recipes for optimized truncates 8938 // to the phi section of the header block. 8939 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 8940 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 8941 8942 // Adjust the recipes for any inloop reductions. 8943 adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan, 8944 RecipeBuilder, Range.Start); 8945 8946 // Introduce a recipe to combine the incoming and previous values of a 8947 // first-order recurrence. 8948 for (VPRecipeBase &R : 8949 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) { 8950 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 8951 if (!RecurPhi) 8952 continue; 8953 8954 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 8955 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 8956 auto *Region = GetReplicateRegion(PrevRecipe); 8957 if (Region) 8958 InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor()); 8959 if (Region || PrevRecipe->isPhi()) 8960 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 8961 else 8962 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 8963 8964 auto *RecurSplice = cast<VPInstruction>( 8965 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 8966 {RecurPhi, RecurPhi->getBackedgeValue()})); 8967 8968 RecurPhi->replaceAllUsesWith(RecurSplice); 8969 // Set the first operand of RecurSplice to RecurPhi again, after replacing 8970 // all users. 8971 RecurSplice->setOperand(0, RecurPhi); 8972 } 8973 8974 // Interleave memory: for each Interleave Group we marked earlier as relevant 8975 // for this VPlan, replace the Recipes widening its memory instructions with a 8976 // single VPInterleaveRecipe at its insertion point. 8977 for (auto IG : InterleaveGroups) { 8978 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 8979 RecipeBuilder.getRecipe(IG->getInsertPos())); 8980 SmallVector<VPValue *, 4> StoredValues; 8981 for (unsigned i = 0; i < IG->getFactor(); ++i) 8982 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 8983 auto *StoreR = 8984 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 8985 StoredValues.push_back(StoreR->getStoredValue()); 8986 } 8987 8988 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 8989 Recipe->getMask()); 8990 VPIG->insertBefore(Recipe); 8991 unsigned J = 0; 8992 for (unsigned i = 0; i < IG->getFactor(); ++i) 8993 if (Instruction *Member = IG->getMember(i)) { 8994 if (!Member->getType()->isVoidTy()) { 8995 VPValue *OriginalV = Plan->getVPValue(Member); 8996 Plan->removeVPValueFor(Member); 8997 Plan->addVPValue(Member, VPIG->getVPValue(J)); 8998 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 8999 J++; 9000 } 9001 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9002 } 9003 } 9004 9005 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9006 // in ways that accessing values using original IR values is incorrect. 9007 Plan->disableValue2VPValue(); 9008 9009 VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE()); 9010 VPlanTransforms::sinkScalarOperands(*Plan); 9011 VPlanTransforms::mergeReplicateRegions(*Plan); 9012 VPlanTransforms::removeDeadRecipes(*Plan, *OrigLoop); 9013 VPlanTransforms::removeRedundantExpandSCEVRecipes(*Plan); 9014 9015 std::string PlanName; 9016 raw_string_ostream RSO(PlanName); 9017 ElementCount VF = Range.Start; 9018 Plan->addVF(VF); 9019 RSO << "Initial VPlan for VF={" << VF; 9020 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9021 Plan->addVF(VF); 9022 RSO << "," << VF; 9023 } 9024 RSO << "},UF>=1"; 9025 RSO.flush(); 9026 Plan->setName(PlanName); 9027 9028 // Fold Exit block into its predecessor if possible. 9029 // TODO: Fold block earlier once all VPlan transforms properly maintain a 9030 // VPBasicBlock as exit. 9031 VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit()); 9032 9033 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9034 return Plan; 9035 } 9036 9037 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9038 // Outer loop handling: They may require CFG and instruction level 9039 // transformations before even evaluating whether vectorization is profitable. 9040 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9041 // the vectorization pipeline. 9042 assert(!OrigLoop->isInnermost()); 9043 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9044 9045 // Create new empty VPlan 9046 auto Plan = std::make_unique<VPlan>(); 9047 9048 // Build hierarchical CFG 9049 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9050 HCFGBuilder.buildHierarchicalCFG(); 9051 9052 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9053 VF *= 2) 9054 Plan->addVF(VF); 9055 9056 if (EnableVPlanPredication) { 9057 VPlanPredicator VPP(*Plan); 9058 VPP.predicate(); 9059 9060 // Avoid running transformation to recipes until masked code generation in 9061 // VPlan-native path is in place. 9062 return Plan; 9063 } 9064 9065 SmallPtrSet<Instruction *, 1> DeadInstructions; 9066 VPlanTransforms::VPInstructionsToVPRecipes( 9067 OrigLoop, Plan, 9068 [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); }, 9069 DeadInstructions, *PSE.getSE()); 9070 9071 // Update plan to be compatible with the inner loop vectorizer for 9072 // code-generation. 9073 VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion(); 9074 VPBasicBlock *Preheader = LoopRegion->getEntryBasicBlock(); 9075 VPBasicBlock *Exit = LoopRegion->getExitBasicBlock(); 9076 VPBlockBase *Latch = Exit->getSinglePredecessor(); 9077 VPBlockBase *Header = Preheader->getSingleSuccessor(); 9078 9079 // 1. Move preheader block out of main vector loop. 9080 Preheader->setParent(LoopRegion->getParent()); 9081 VPBlockUtils::disconnectBlocks(Preheader, Header); 9082 VPBlockUtils::connectBlocks(Preheader, LoopRegion); 9083 Plan->setEntry(Preheader); 9084 9085 // 2. Disconnect backedge and exit block. 9086 VPBlockUtils::disconnectBlocks(Latch, Header); 9087 VPBlockUtils::disconnectBlocks(Latch, Exit); 9088 9089 // 3. Update entry and exit of main vector loop region. 9090 LoopRegion->setEntry(Header); 9091 LoopRegion->setExit(Latch); 9092 9093 // 4. Remove exit block. 9094 delete Exit; 9095 9096 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(), 9097 true, true); 9098 return Plan; 9099 } 9100 9101 // Adjust the recipes for reductions. For in-loop reductions the chain of 9102 // instructions leading from the loop exit instr to the phi need to be converted 9103 // to reductions, with one operand being vector and the other being the scalar 9104 // reduction chain. For other reductions, a select is introduced between the phi 9105 // and live-out recipes when folding the tail. 9106 void LoopVectorizationPlanner::adjustRecipesForReductions( 9107 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9108 ElementCount MinVF) { 9109 for (auto &Reduction : CM.getInLoopReductionChains()) { 9110 PHINode *Phi = Reduction.first; 9111 const RecurrenceDescriptor &RdxDesc = 9112 Legal->getReductionVars().find(Phi)->second; 9113 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9114 9115 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9116 continue; 9117 9118 // ReductionOperations are orders top-down from the phi's use to the 9119 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9120 // which of the two operands will remain scalar and which will be reduced. 9121 // For minmax the chain will be the select instructions. 9122 Instruction *Chain = Phi; 9123 for (Instruction *R : ReductionOperations) { 9124 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9125 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9126 9127 VPValue *ChainOp = Plan->getVPValue(Chain); 9128 unsigned FirstOpId; 9129 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9130 "Only min/max recurrences allowed for inloop reductions"); 9131 // Recognize a call to the llvm.fmuladd intrinsic. 9132 bool IsFMulAdd = (Kind == RecurKind::FMulAdd); 9133 assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) && 9134 "Expected instruction to be a call to the llvm.fmuladd intrinsic"); 9135 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9136 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9137 "Expected to replace a VPWidenSelectSC"); 9138 FirstOpId = 1; 9139 } else { 9140 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) || 9141 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) && 9142 "Expected to replace a VPWidenSC"); 9143 FirstOpId = 0; 9144 } 9145 unsigned VecOpId = 9146 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9147 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9148 9149 auto *CondOp = CM.blockNeedsPredicationForAnyReason(R->getParent()) 9150 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9151 : nullptr; 9152 9153 if (IsFMulAdd) { 9154 // If the instruction is a call to the llvm.fmuladd intrinsic then we 9155 // need to create an fmul recipe to use as the vector operand for the 9156 // fadd reduction. 9157 VPInstruction *FMulRecipe = new VPInstruction( 9158 Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))}); 9159 FMulRecipe->setFastMathFlags(R->getFastMathFlags()); 9160 WidenRecipe->getParent()->insert(FMulRecipe, 9161 WidenRecipe->getIterator()); 9162 VecOp = FMulRecipe; 9163 } 9164 VPReductionRecipe *RedRecipe = 9165 new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9166 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9167 Plan->removeVPValueFor(R); 9168 Plan->addVPValue(R, RedRecipe); 9169 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9170 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9171 WidenRecipe->eraseFromParent(); 9172 9173 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9174 VPRecipeBase *CompareRecipe = 9175 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9176 assert(isa<VPWidenRecipe>(CompareRecipe) && 9177 "Expected to replace a VPWidenSC"); 9178 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9179 "Expected no remaining users"); 9180 CompareRecipe->eraseFromParent(); 9181 } 9182 Chain = R; 9183 } 9184 } 9185 9186 // If tail is folded by masking, introduce selects between the phi 9187 // and the live-out instruction of each reduction, at the beginning of the 9188 // dedicated latch block. 9189 if (CM.foldTailByMasking()) { 9190 Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin()); 9191 for (VPRecipeBase &R : 9192 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) { 9193 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9194 if (!PhiR || PhiR->isInLoop()) 9195 continue; 9196 VPValue *Cond = 9197 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9198 VPValue *Red = PhiR->getBackedgeValue(); 9199 assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB && 9200 "reduction recipe must be defined before latch"); 9201 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9202 } 9203 } 9204 } 9205 9206 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9207 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9208 VPSlotTracker &SlotTracker) const { 9209 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9210 IG->getInsertPos()->printAsOperand(O, false); 9211 O << ", "; 9212 getAddr()->printAsOperand(O, SlotTracker); 9213 VPValue *Mask = getMask(); 9214 if (Mask) { 9215 O << ", "; 9216 Mask->printAsOperand(O, SlotTracker); 9217 } 9218 9219 unsigned OpIdx = 0; 9220 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9221 if (!IG->getMember(i)) 9222 continue; 9223 if (getNumStoreOperands() > 0) { 9224 O << "\n" << Indent << " store "; 9225 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9226 O << " to index " << i; 9227 } else { 9228 O << "\n" << Indent << " "; 9229 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9230 O << " = load from index " << i; 9231 } 9232 ++OpIdx; 9233 } 9234 } 9235 #endif 9236 9237 void VPWidenCallRecipe::execute(VPTransformState &State) { 9238 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9239 *this, State); 9240 } 9241 9242 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9243 auto &I = *cast<SelectInst>(getUnderlyingInstr()); 9244 State.ILV->setDebugLocFromInst(&I); 9245 9246 // The condition can be loop invariant but still defined inside the 9247 // loop. This means that we can't just use the original 'cond' value. 9248 // We have to take the 'vectorized' value and pick the first lane. 9249 // Instcombine will make this a no-op. 9250 auto *InvarCond = 9251 InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr; 9252 9253 for (unsigned Part = 0; Part < State.UF; ++Part) { 9254 Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part); 9255 Value *Op0 = State.get(getOperand(1), Part); 9256 Value *Op1 = State.get(getOperand(2), Part); 9257 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1); 9258 State.set(this, Sel, Part); 9259 State.ILV->addMetadata(Sel, &I); 9260 } 9261 } 9262 9263 void VPWidenRecipe::execute(VPTransformState &State) { 9264 auto &I = *cast<Instruction>(getUnderlyingValue()); 9265 auto &Builder = State.Builder; 9266 switch (I.getOpcode()) { 9267 case Instruction::Call: 9268 case Instruction::Br: 9269 case Instruction::PHI: 9270 case Instruction::GetElementPtr: 9271 case Instruction::Select: 9272 llvm_unreachable("This instruction is handled by a different recipe."); 9273 case Instruction::UDiv: 9274 case Instruction::SDiv: 9275 case Instruction::SRem: 9276 case Instruction::URem: 9277 case Instruction::Add: 9278 case Instruction::FAdd: 9279 case Instruction::Sub: 9280 case Instruction::FSub: 9281 case Instruction::FNeg: 9282 case Instruction::Mul: 9283 case Instruction::FMul: 9284 case Instruction::FDiv: 9285 case Instruction::FRem: 9286 case Instruction::Shl: 9287 case Instruction::LShr: 9288 case Instruction::AShr: 9289 case Instruction::And: 9290 case Instruction::Or: 9291 case Instruction::Xor: { 9292 // Just widen unops and binops. 9293 State.ILV->setDebugLocFromInst(&I); 9294 9295 for (unsigned Part = 0; Part < State.UF; ++Part) { 9296 SmallVector<Value *, 2> Ops; 9297 for (VPValue *VPOp : operands()) 9298 Ops.push_back(State.get(VPOp, Part)); 9299 9300 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 9301 9302 if (auto *VecOp = dyn_cast<Instruction>(V)) { 9303 VecOp->copyIRFlags(&I); 9304 9305 // If the instruction is vectorized and was in a basic block that needed 9306 // predication, we can't propagate poison-generating flags (nuw/nsw, 9307 // exact, etc.). The control flow has been linearized and the 9308 // instruction is no longer guarded by the predicate, which could make 9309 // the flag properties to no longer hold. 9310 if (State.MayGeneratePoisonRecipes.contains(this)) 9311 VecOp->dropPoisonGeneratingFlags(); 9312 } 9313 9314 // Use this vector value for all users of the original instruction. 9315 State.set(this, V, Part); 9316 State.ILV->addMetadata(V, &I); 9317 } 9318 9319 break; 9320 } 9321 case Instruction::ICmp: 9322 case Instruction::FCmp: { 9323 // Widen compares. Generate vector compares. 9324 bool FCmp = (I.getOpcode() == Instruction::FCmp); 9325 auto *Cmp = cast<CmpInst>(&I); 9326 State.ILV->setDebugLocFromInst(Cmp); 9327 for (unsigned Part = 0; Part < State.UF; ++Part) { 9328 Value *A = State.get(getOperand(0), Part); 9329 Value *B = State.get(getOperand(1), Part); 9330 Value *C = nullptr; 9331 if (FCmp) { 9332 // Propagate fast math flags. 9333 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9334 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 9335 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 9336 } else { 9337 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 9338 } 9339 State.set(this, C, Part); 9340 State.ILV->addMetadata(C, &I); 9341 } 9342 9343 break; 9344 } 9345 9346 case Instruction::ZExt: 9347 case Instruction::SExt: 9348 case Instruction::FPToUI: 9349 case Instruction::FPToSI: 9350 case Instruction::FPExt: 9351 case Instruction::PtrToInt: 9352 case Instruction::IntToPtr: 9353 case Instruction::SIToFP: 9354 case Instruction::UIToFP: 9355 case Instruction::Trunc: 9356 case Instruction::FPTrunc: 9357 case Instruction::BitCast: { 9358 auto *CI = cast<CastInst>(&I); 9359 State.ILV->setDebugLocFromInst(CI); 9360 9361 /// Vectorize casts. 9362 Type *DestTy = (State.VF.isScalar()) 9363 ? CI->getType() 9364 : VectorType::get(CI->getType(), State.VF); 9365 9366 for (unsigned Part = 0; Part < State.UF; ++Part) { 9367 Value *A = State.get(getOperand(0), Part); 9368 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 9369 State.set(this, Cast, Part); 9370 State.ILV->addMetadata(Cast, &I); 9371 } 9372 break; 9373 } 9374 default: 9375 // This instruction is not vectorized by simple widening. 9376 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 9377 llvm_unreachable("Unhandled instruction!"); 9378 } // end of switch. 9379 } 9380 9381 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9382 auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr()); 9383 // Construct a vector GEP by widening the operands of the scalar GEP as 9384 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 9385 // results in a vector of pointers when at least one operand of the GEP 9386 // is vector-typed. Thus, to keep the representation compact, we only use 9387 // vector-typed operands for loop-varying values. 9388 9389 if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 9390 // If we are vectorizing, but the GEP has only loop-invariant operands, 9391 // the GEP we build (by only using vector-typed operands for 9392 // loop-varying values) would be a scalar pointer. Thus, to ensure we 9393 // produce a vector of pointers, we need to either arbitrarily pick an 9394 // operand to broadcast, or broadcast a clone of the original GEP. 9395 // Here, we broadcast a clone of the original. 9396 // 9397 // TODO: If at some point we decide to scalarize instructions having 9398 // loop-invariant operands, this special case will no longer be 9399 // required. We would add the scalarization decision to 9400 // collectLoopScalars() and teach getVectorValue() to broadcast 9401 // the lane-zero scalar value. 9402 auto *Clone = State.Builder.Insert(GEP->clone()); 9403 for (unsigned Part = 0; Part < State.UF; ++Part) { 9404 Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone); 9405 State.set(this, EntryPart, Part); 9406 State.ILV->addMetadata(EntryPart, GEP); 9407 } 9408 } else { 9409 // If the GEP has at least one loop-varying operand, we are sure to 9410 // produce a vector of pointers. But if we are only unrolling, we want 9411 // to produce a scalar GEP for each unroll part. Thus, the GEP we 9412 // produce with the code below will be scalar (if VF == 1) or vector 9413 // (otherwise). Note that for the unroll-only case, we still maintain 9414 // values in the vector mapping with initVector, as we do for other 9415 // instructions. 9416 for (unsigned Part = 0; Part < State.UF; ++Part) { 9417 // The pointer operand of the new GEP. If it's loop-invariant, we 9418 // won't broadcast it. 9419 auto *Ptr = IsPtrLoopInvariant 9420 ? State.get(getOperand(0), VPIteration(0, 0)) 9421 : State.get(getOperand(0), Part); 9422 9423 // Collect all the indices for the new GEP. If any index is 9424 // loop-invariant, we won't broadcast it. 9425 SmallVector<Value *, 4> Indices; 9426 for (unsigned I = 1, E = getNumOperands(); I < E; I++) { 9427 VPValue *Operand = getOperand(I); 9428 if (IsIndexLoopInvariant[I - 1]) 9429 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 9430 else 9431 Indices.push_back(State.get(Operand, Part)); 9432 } 9433 9434 // If the GEP instruction is vectorized and was in a basic block that 9435 // needed predication, we can't propagate the poison-generating 'inbounds' 9436 // flag. The control flow has been linearized and the GEP is no longer 9437 // guarded by the predicate, which could make the 'inbounds' properties to 9438 // no longer hold. 9439 bool IsInBounds = 9440 GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0; 9441 9442 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 9443 // but it should be a vector, otherwise. 9444 auto *NewGEP = IsInBounds 9445 ? State.Builder.CreateInBoundsGEP( 9446 GEP->getSourceElementType(), Ptr, Indices) 9447 : State.Builder.CreateGEP(GEP->getSourceElementType(), 9448 Ptr, Indices); 9449 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) && 9450 "NewGEP is not a pointer vector"); 9451 State.set(this, NewGEP, Part); 9452 State.ILV->addMetadata(NewGEP, GEP); 9453 } 9454 } 9455 } 9456 9457 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9458 assert(!State.Instance && "Int or FP induction being replicated."); 9459 9460 Value *Start = getStartValue()->getLiveInIRValue(); 9461 const InductionDescriptor &ID = getInductionDescriptor(); 9462 TruncInst *Trunc = getTruncInst(); 9463 IRBuilderBase &Builder = State.Builder; 9464 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 9465 assert(State.VF.isVector() && "must have vector VF"); 9466 9467 // The value from the original loop to which we are mapping the new induction 9468 // variable. 9469 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 9470 9471 // Fast-math-flags propagate from the original induction instruction. 9472 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9473 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 9474 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 9475 9476 // Now do the actual transformations, and start with fetching the step value. 9477 Value *Step = State.get(getStepValue(), VPIteration(0, 0)); 9478 9479 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 9480 "Expected either an induction phi-node or a truncate of it!"); 9481 9482 // Construct the initial value of the vector IV in the vector loop preheader 9483 auto CurrIP = Builder.saveIP(); 9484 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); 9485 Builder.SetInsertPoint(VectorPH->getTerminator()); 9486 if (isa<TruncInst>(EntryVal)) { 9487 assert(Start->getType()->isIntegerTy() && 9488 "Truncation requires an integer type"); 9489 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 9490 Step = Builder.CreateTrunc(Step, TruncType); 9491 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 9492 } 9493 9494 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 9495 Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start); 9496 Value *SteppedStart = getStepVector( 9497 SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder); 9498 9499 // We create vector phi nodes for both integer and floating-point induction 9500 // variables. Here, we determine the kind of arithmetic we will perform. 9501 Instruction::BinaryOps AddOp; 9502 Instruction::BinaryOps MulOp; 9503 if (Step->getType()->isIntegerTy()) { 9504 AddOp = Instruction::Add; 9505 MulOp = Instruction::Mul; 9506 } else { 9507 AddOp = ID.getInductionOpcode(); 9508 MulOp = Instruction::FMul; 9509 } 9510 9511 // Multiply the vectorization factor by the step using integer or 9512 // floating-point arithmetic as appropriate. 9513 Type *StepType = Step->getType(); 9514 Value *RuntimeVF; 9515 if (Step->getType()->isFloatingPointTy()) 9516 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF); 9517 else 9518 RuntimeVF = getRuntimeVF(Builder, StepType, State.VF); 9519 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 9520 9521 // Create a vector splat to use in the induction update. 9522 // 9523 // FIXME: If the step is non-constant, we create the vector splat with 9524 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 9525 // handle a constant vector splat. 9526 Value *SplatVF = isa<Constant>(Mul) 9527 ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul)) 9528 : Builder.CreateVectorSplat(State.VF, Mul); 9529 Builder.restoreIP(CurrIP); 9530 9531 // We may need to add the step a number of times, depending on the unroll 9532 // factor. The last of those goes into the PHI. 9533 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 9534 &*State.CFG.PrevBB->getFirstInsertionPt()); 9535 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 9536 Instruction *LastInduction = VecInd; 9537 for (unsigned Part = 0; Part < State.UF; ++Part) { 9538 State.set(this, LastInduction, Part); 9539 9540 if (isa<TruncInst>(EntryVal)) 9541 State.ILV->addMetadata(LastInduction, EntryVal); 9542 9543 LastInduction = cast<Instruction>( 9544 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 9545 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 9546 } 9547 9548 LastInduction->setName("vec.ind.next"); 9549 VecInd->addIncoming(SteppedStart, VectorPH); 9550 // Add induction update using an incorrect block temporarily. The phi node 9551 // will be fixed after VPlan execution. Note that at this point the latch 9552 // block cannot be used, as it does not exist yet. 9553 // TODO: Model increment value in VPlan, by turning the recipe into a 9554 // multi-def and a subclass of VPHeaderPHIRecipe. 9555 VecInd->addIncoming(LastInduction, VectorPH); 9556 } 9557 9558 void VPWidenPointerInductionRecipe::execute(VPTransformState &State) { 9559 assert(IndDesc.getKind() == InductionDescriptor::IK_PtrInduction && 9560 "Not a pointer induction according to InductionDescriptor!"); 9561 assert(cast<PHINode>(getUnderlyingInstr())->getType()->isPointerTy() && 9562 "Unexpected type."); 9563 9564 auto *IVR = getParent()->getPlan()->getCanonicalIV(); 9565 PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0)); 9566 9567 if (all_of(users(), [this](const VPUser *U) { 9568 return cast<VPRecipeBase>(U)->usesScalars(this); 9569 })) { 9570 // This is the normalized GEP that starts counting at zero. 9571 Value *PtrInd = State.Builder.CreateSExtOrTrunc( 9572 CanonicalIV, IndDesc.getStep()->getType()); 9573 // Determine the number of scalars we need to generate for each unroll 9574 // iteration. If the instruction is uniform, we only need to generate the 9575 // first lane. Otherwise, we generate all VF values. 9576 bool IsUniform = vputils::onlyFirstLaneUsed(this); 9577 assert((IsUniform || !State.VF.isScalable()) && 9578 "Cannot scalarize a scalable VF"); 9579 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 9580 9581 for (unsigned Part = 0; Part < State.UF; ++Part) { 9582 Value *PartStart = 9583 createStepForVF(State.Builder, PtrInd->getType(), State.VF, Part); 9584 9585 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 9586 Value *Idx = State.Builder.CreateAdd( 9587 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 9588 Value *GlobalIdx = State.Builder.CreateAdd(PtrInd, Idx); 9589 9590 Value *Step = CreateStepValue(IndDesc.getStep(), SE, 9591 State.CFG.PrevBB->getTerminator()); 9592 Value *SclrGep = emitTransformedIndex( 9593 State.Builder, GlobalIdx, IndDesc.getStartValue(), Step, IndDesc); 9594 SclrGep->setName("next.gep"); 9595 State.set(this, SclrGep, VPIteration(Part, Lane)); 9596 } 9597 } 9598 return; 9599 } 9600 9601 assert(isa<SCEVConstant>(IndDesc.getStep()) && 9602 "Induction step not a SCEV constant!"); 9603 Type *PhiType = IndDesc.getStep()->getType(); 9604 9605 // Build a pointer phi 9606 Value *ScalarStartValue = getStartValue()->getLiveInIRValue(); 9607 Type *ScStValueType = ScalarStartValue->getType(); 9608 PHINode *NewPointerPhi = 9609 PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV); 9610 9611 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); 9612 NewPointerPhi->addIncoming(ScalarStartValue, VectorPH); 9613 9614 // A pointer induction, performed by using a gep 9615 const DataLayout &DL = NewPointerPhi->getModule()->getDataLayout(); 9616 Instruction *InductionLoc = &*State.Builder.GetInsertPoint(); 9617 9618 const SCEV *ScalarStep = IndDesc.getStep(); 9619 SCEVExpander Exp(SE, DL, "induction"); 9620 Value *ScalarStepValue = Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 9621 Value *RuntimeVF = getRuntimeVF(State.Builder, PhiType, State.VF); 9622 Value *NumUnrolledElems = 9623 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 9624 Value *InductionGEP = GetElementPtrInst::Create( 9625 IndDesc.getElementType(), NewPointerPhi, 9626 State.Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 9627 InductionLoc); 9628 // Add induction update using an incorrect block temporarily. The phi node 9629 // will be fixed after VPlan execution. Note that at this point the latch 9630 // block cannot be used, as it does not exist yet. 9631 // TODO: Model increment value in VPlan, by turning the recipe into a 9632 // multi-def and a subclass of VPHeaderPHIRecipe. 9633 NewPointerPhi->addIncoming(InductionGEP, VectorPH); 9634 9635 // Create UF many actual address geps that use the pointer 9636 // phi as base and a vectorized version of the step value 9637 // (<step*0, ..., step*N>) as offset. 9638 for (unsigned Part = 0; Part < State.UF; ++Part) { 9639 Type *VecPhiType = VectorType::get(PhiType, State.VF); 9640 Value *StartOffsetScalar = 9641 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 9642 Value *StartOffset = 9643 State.Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 9644 // Create a vector of consecutive numbers from zero to VF. 9645 StartOffset = State.Builder.CreateAdd( 9646 StartOffset, State.Builder.CreateStepVector(VecPhiType)); 9647 9648 Value *GEP = State.Builder.CreateGEP( 9649 IndDesc.getElementType(), NewPointerPhi, 9650 State.Builder.CreateMul( 9651 StartOffset, 9652 State.Builder.CreateVectorSplat(State.VF, ScalarStepValue), 9653 "vector.gep")); 9654 State.set(this, GEP, Part); 9655 } 9656 } 9657 9658 void VPScalarIVStepsRecipe::execute(VPTransformState &State) { 9659 assert(!State.Instance && "VPScalarIVStepsRecipe being replicated."); 9660 9661 // Fast-math-flags propagate from the original induction instruction. 9662 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder); 9663 if (IndDesc.getInductionBinOp() && 9664 isa<FPMathOperator>(IndDesc.getInductionBinOp())) 9665 State.Builder.setFastMathFlags( 9666 IndDesc.getInductionBinOp()->getFastMathFlags()); 9667 9668 Value *Step = State.get(getStepValue(), VPIteration(0, 0)); 9669 auto CreateScalarIV = [&](Value *&Step) -> Value * { 9670 Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0)); 9671 auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0); 9672 if (!isCanonical() || CanonicalIV->getType() != Ty) { 9673 ScalarIV = 9674 Ty->isIntegerTy() 9675 ? State.Builder.CreateSExtOrTrunc(ScalarIV, Ty) 9676 : State.Builder.CreateCast(Instruction::SIToFP, ScalarIV, Ty); 9677 ScalarIV = emitTransformedIndex(State.Builder, ScalarIV, 9678 getStartValue()->getLiveInIRValue(), Step, 9679 IndDesc); 9680 ScalarIV->setName("offset.idx"); 9681 } 9682 if (TruncToTy) { 9683 assert(Step->getType()->isIntegerTy() && 9684 "Truncation requires an integer step"); 9685 ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncToTy); 9686 Step = State.Builder.CreateTrunc(Step, TruncToTy); 9687 } 9688 return ScalarIV; 9689 }; 9690 9691 Value *ScalarIV = CreateScalarIV(Step); 9692 if (State.VF.isVector()) { 9693 buildScalarSteps(ScalarIV, Step, IndDesc, this, State); 9694 return; 9695 } 9696 9697 for (unsigned Part = 0; Part < State.UF; ++Part) { 9698 assert(!State.VF.isScalable() && "scalable vectors not yet supported."); 9699 Value *EntryPart; 9700 if (Step->getType()->isFloatingPointTy()) { 9701 Value *StartIdx = 9702 getRuntimeVFAsFloat(State.Builder, Step->getType(), State.VF * Part); 9703 // Floating-point operations inherit FMF via the builder's flags. 9704 Value *MulOp = State.Builder.CreateFMul(StartIdx, Step); 9705 EntryPart = State.Builder.CreateBinOp(IndDesc.getInductionOpcode(), 9706 ScalarIV, MulOp); 9707 } else { 9708 Value *StartIdx = 9709 getRuntimeVF(State.Builder, Step->getType(), State.VF * Part); 9710 EntryPart = State.Builder.CreateAdd( 9711 ScalarIV, State.Builder.CreateMul(StartIdx, Step), "induction"); 9712 } 9713 State.set(this, EntryPart, Part); 9714 } 9715 } 9716 9717 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9718 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9719 State); 9720 } 9721 9722 void VPBlendRecipe::execute(VPTransformState &State) { 9723 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9724 // We know that all PHIs in non-header blocks are converted into 9725 // selects, so we don't have to worry about the insertion order and we 9726 // can just use the builder. 9727 // At this point we generate the predication tree. There may be 9728 // duplications since this is a simple recursive scan, but future 9729 // optimizations will clean it up. 9730 9731 unsigned NumIncoming = getNumIncomingValues(); 9732 9733 // Generate a sequence of selects of the form: 9734 // SELECT(Mask3, In3, 9735 // SELECT(Mask2, In2, 9736 // SELECT(Mask1, In1, 9737 // In0))) 9738 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9739 // are essentially undef are taken from In0. 9740 InnerLoopVectorizer::VectorParts Entry(State.UF); 9741 for (unsigned In = 0; In < NumIncoming; ++In) { 9742 for (unsigned Part = 0; Part < State.UF; ++Part) { 9743 // We might have single edge PHIs (blocks) - use an identity 9744 // 'select' for the first PHI operand. 9745 Value *In0 = State.get(getIncomingValue(In), Part); 9746 if (In == 0) 9747 Entry[Part] = In0; // Initialize with the first incoming value. 9748 else { 9749 // Select between the current value and the previous incoming edge 9750 // based on the incoming mask. 9751 Value *Cond = State.get(getMask(In), Part); 9752 Entry[Part] = 9753 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9754 } 9755 } 9756 } 9757 for (unsigned Part = 0; Part < State.UF; ++Part) 9758 State.set(this, Entry[Part], Part); 9759 } 9760 9761 void VPInterleaveRecipe::execute(VPTransformState &State) { 9762 assert(!State.Instance && "Interleave group being replicated."); 9763 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9764 getStoredValues(), getMask()); 9765 } 9766 9767 void VPReductionRecipe::execute(VPTransformState &State) { 9768 assert(!State.Instance && "Reduction being replicated."); 9769 Value *PrevInChain = State.get(getChainOp(), 0); 9770 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9771 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9772 // Propagate the fast-math flags carried by the underlying instruction. 9773 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9774 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9775 for (unsigned Part = 0; Part < State.UF; ++Part) { 9776 Value *NewVecOp = State.get(getVecOp(), Part); 9777 if (VPValue *Cond = getCondOp()) { 9778 Value *NewCond = State.get(Cond, Part); 9779 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9780 Value *Iden = RdxDesc->getRecurrenceIdentity( 9781 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9782 Value *IdenVec = 9783 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9784 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9785 NewVecOp = Select; 9786 } 9787 Value *NewRed; 9788 Value *NextInChain; 9789 if (IsOrdered) { 9790 if (State.VF.isVector()) 9791 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9792 PrevInChain); 9793 else 9794 NewRed = State.Builder.CreateBinOp( 9795 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9796 NewVecOp); 9797 PrevInChain = NewRed; 9798 } else { 9799 PrevInChain = State.get(getChainOp(), Part); 9800 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9801 } 9802 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9803 NextInChain = 9804 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9805 NewRed, PrevInChain); 9806 } else if (IsOrdered) 9807 NextInChain = NewRed; 9808 else 9809 NextInChain = State.Builder.CreateBinOp( 9810 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9811 PrevInChain); 9812 State.set(this, NextInChain, Part); 9813 } 9814 } 9815 9816 void VPReplicateRecipe::execute(VPTransformState &State) { 9817 if (State.Instance) { // Generate a single instance. 9818 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9819 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance, 9820 IsPredicated, State); 9821 // Insert scalar instance packing it into a vector. 9822 if (AlsoPack && State.VF.isVector()) { 9823 // If we're constructing lane 0, initialize to start from poison. 9824 if (State.Instance->Lane.isFirstLane()) { 9825 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9826 Value *Poison = PoisonValue::get( 9827 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9828 State.set(this, Poison, State.Instance->Part); 9829 } 9830 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9831 } 9832 return; 9833 } 9834 9835 // Generate scalar instances for all VF lanes of all UF parts, unless the 9836 // instruction is uniform inwhich case generate only the first lane for each 9837 // of the UF parts. 9838 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9839 assert((!State.VF.isScalable() || IsUniform) && 9840 "Can't scalarize a scalable vector"); 9841 for (unsigned Part = 0; Part < State.UF; ++Part) 9842 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9843 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 9844 VPIteration(Part, Lane), IsPredicated, 9845 State); 9846 } 9847 9848 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9849 assert(State.Instance && "Branch on Mask works only on single instance."); 9850 9851 unsigned Part = State.Instance->Part; 9852 unsigned Lane = State.Instance->Lane.getKnownLane(); 9853 9854 Value *ConditionBit = nullptr; 9855 VPValue *BlockInMask = getMask(); 9856 if (BlockInMask) { 9857 ConditionBit = State.get(BlockInMask, Part); 9858 if (ConditionBit->getType()->isVectorTy()) 9859 ConditionBit = State.Builder.CreateExtractElement( 9860 ConditionBit, State.Builder.getInt32(Lane)); 9861 } else // Block in mask is all-one. 9862 ConditionBit = State.Builder.getTrue(); 9863 9864 // Replace the temporary unreachable terminator with a new conditional branch, 9865 // whose two destinations will be set later when they are created. 9866 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9867 assert(isa<UnreachableInst>(CurrentTerminator) && 9868 "Expected to replace unreachable terminator with conditional branch."); 9869 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9870 CondBr->setSuccessor(0, nullptr); 9871 ReplaceInstWithInst(CurrentTerminator, CondBr); 9872 } 9873 9874 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9875 assert(State.Instance && "Predicated instruction PHI works per instance."); 9876 Instruction *ScalarPredInst = 9877 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9878 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9879 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9880 assert(PredicatingBB && "Predicated block has no single predecessor."); 9881 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9882 "operand must be VPReplicateRecipe"); 9883 9884 // By current pack/unpack logic we need to generate only a single phi node: if 9885 // a vector value for the predicated instruction exists at this point it means 9886 // the instruction has vector users only, and a phi for the vector value is 9887 // needed. In this case the recipe of the predicated instruction is marked to 9888 // also do that packing, thereby "hoisting" the insert-element sequence. 9889 // Otherwise, a phi node for the scalar value is needed. 9890 unsigned Part = State.Instance->Part; 9891 if (State.hasVectorValue(getOperand(0), Part)) { 9892 Value *VectorValue = State.get(getOperand(0), Part); 9893 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9894 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9895 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9896 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9897 if (State.hasVectorValue(this, Part)) 9898 State.reset(this, VPhi, Part); 9899 else 9900 State.set(this, VPhi, Part); 9901 // NOTE: Currently we need to update the value of the operand, so the next 9902 // predicated iteration inserts its generated value in the correct vector. 9903 State.reset(getOperand(0), VPhi, Part); 9904 } else { 9905 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9906 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9907 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9908 PredicatingBB); 9909 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9910 if (State.hasScalarValue(this, *State.Instance)) 9911 State.reset(this, Phi, *State.Instance); 9912 else 9913 State.set(this, Phi, *State.Instance); 9914 // NOTE: Currently we need to update the value of the operand, so the next 9915 // predicated iteration inserts its generated value in the correct vector. 9916 State.reset(getOperand(0), Phi, *State.Instance); 9917 } 9918 } 9919 9920 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9921 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9922 9923 // Attempt to issue a wide load. 9924 LoadInst *LI = dyn_cast<LoadInst>(&Ingredient); 9925 StoreInst *SI = dyn_cast<StoreInst>(&Ingredient); 9926 9927 assert((LI || SI) && "Invalid Load/Store instruction"); 9928 assert((!SI || StoredValue) && "No stored value provided for widened store"); 9929 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 9930 9931 Type *ScalarDataTy = getLoadStoreType(&Ingredient); 9932 9933 auto *DataTy = VectorType::get(ScalarDataTy, State.VF); 9934 const Align Alignment = getLoadStoreAlignment(&Ingredient); 9935 bool CreateGatherScatter = !Consecutive; 9936 9937 auto &Builder = State.Builder; 9938 InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF); 9939 bool isMaskRequired = getMask(); 9940 if (isMaskRequired) 9941 for (unsigned Part = 0; Part < State.UF; ++Part) 9942 BlockInMaskParts[Part] = State.get(getMask(), Part); 9943 9944 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 9945 // Calculate the pointer for the specific unroll-part. 9946 GetElementPtrInst *PartPtr = nullptr; 9947 9948 bool InBounds = false; 9949 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 9950 InBounds = gep->isInBounds(); 9951 if (Reverse) { 9952 // If the address is consecutive but reversed, then the 9953 // wide store needs to start at the last vector element. 9954 // RunTimeVF = VScale * VF.getKnownMinValue() 9955 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 9956 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF); 9957 // NumElt = -Part * RunTimeVF 9958 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 9959 // LastLane = 1 - RunTimeVF 9960 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 9961 PartPtr = 9962 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 9963 PartPtr->setIsInBounds(InBounds); 9964 PartPtr = cast<GetElementPtrInst>( 9965 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 9966 PartPtr->setIsInBounds(InBounds); 9967 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 9968 BlockInMaskParts[Part] = 9969 Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse"); 9970 } else { 9971 Value *Increment = 9972 createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part); 9973 PartPtr = cast<GetElementPtrInst>( 9974 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 9975 PartPtr->setIsInBounds(InBounds); 9976 } 9977 9978 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 9979 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 9980 }; 9981 9982 // Handle Stores: 9983 if (SI) { 9984 State.ILV->setDebugLocFromInst(SI); 9985 9986 for (unsigned Part = 0; Part < State.UF; ++Part) { 9987 Instruction *NewSI = nullptr; 9988 Value *StoredVal = State.get(StoredValue, Part); 9989 if (CreateGatherScatter) { 9990 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 9991 Value *VectorGep = State.get(getAddr(), Part); 9992 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 9993 MaskPart); 9994 } else { 9995 if (Reverse) { 9996 // If we store to reverse consecutive memory locations, then we need 9997 // to reverse the order of elements in the stored value. 9998 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse"); 9999 // We don't want to update the value in the map as it might be used in 10000 // another expression. So don't call resetVectorValue(StoredVal). 10001 } 10002 auto *VecPtr = 10003 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10004 if (isMaskRequired) 10005 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 10006 BlockInMaskParts[Part]); 10007 else 10008 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 10009 } 10010 State.ILV->addMetadata(NewSI, SI); 10011 } 10012 return; 10013 } 10014 10015 // Handle loads. 10016 assert(LI && "Must have a load instruction"); 10017 State.ILV->setDebugLocFromInst(LI); 10018 for (unsigned Part = 0; Part < State.UF; ++Part) { 10019 Value *NewLI; 10020 if (CreateGatherScatter) { 10021 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 10022 Value *VectorGep = State.get(getAddr(), Part); 10023 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 10024 nullptr, "wide.masked.gather"); 10025 State.ILV->addMetadata(NewLI, LI); 10026 } else { 10027 auto *VecPtr = 10028 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10029 if (isMaskRequired) 10030 NewLI = Builder.CreateMaskedLoad( 10031 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 10032 PoisonValue::get(DataTy), "wide.masked.load"); 10033 else 10034 NewLI = 10035 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 10036 10037 // Add metadata to the load, but setVectorValue to the reverse shuffle. 10038 State.ILV->addMetadata(NewLI, LI); 10039 if (Reverse) 10040 NewLI = Builder.CreateVectorReverse(NewLI, "reverse"); 10041 } 10042 10043 State.set(this, NewLI, Part); 10044 } 10045 } 10046 10047 // Determine how to lower the scalar epilogue, which depends on 1) optimising 10048 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 10049 // predication, and 4) a TTI hook that analyses whether the loop is suitable 10050 // for predication. 10051 static ScalarEpilogueLowering getScalarEpilogueLowering( 10052 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 10053 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 10054 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 10055 LoopVectorizationLegality &LVL) { 10056 // 1) OptSize takes precedence over all other options, i.e. if this is set, 10057 // don't look at hints or options, and don't request a scalar epilogue. 10058 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 10059 // LoopAccessInfo (due to code dependency and not being able to reliably get 10060 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 10061 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 10062 // versioning when the vectorization is forced, unlike hasOptSize. So revert 10063 // back to the old way and vectorize with versioning when forced. See D81345.) 10064 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 10065 PGSOQueryType::IRPass) && 10066 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 10067 return CM_ScalarEpilogueNotAllowedOptSize; 10068 10069 // 2) If set, obey the directives 10070 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 10071 switch (PreferPredicateOverEpilogue) { 10072 case PreferPredicateTy::ScalarEpilogue: 10073 return CM_ScalarEpilogueAllowed; 10074 case PreferPredicateTy::PredicateElseScalarEpilogue: 10075 return CM_ScalarEpilogueNotNeededUsePredicate; 10076 case PreferPredicateTy::PredicateOrDontVectorize: 10077 return CM_ScalarEpilogueNotAllowedUsePredicate; 10078 }; 10079 } 10080 10081 // 3) If set, obey the hints 10082 switch (Hints.getPredicate()) { 10083 case LoopVectorizeHints::FK_Enabled: 10084 return CM_ScalarEpilogueNotNeededUsePredicate; 10085 case LoopVectorizeHints::FK_Disabled: 10086 return CM_ScalarEpilogueAllowed; 10087 }; 10088 10089 // 4) if the TTI hook indicates this is profitable, request predication. 10090 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 10091 LVL.getLAI())) 10092 return CM_ScalarEpilogueNotNeededUsePredicate; 10093 10094 return CM_ScalarEpilogueAllowed; 10095 } 10096 10097 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 10098 // If Values have been set for this Def return the one relevant for \p Part. 10099 if (hasVectorValue(Def, Part)) 10100 return Data.PerPartOutput[Def][Part]; 10101 10102 if (!hasScalarValue(Def, {Part, 0})) { 10103 Value *IRV = Def->getLiveInIRValue(); 10104 Value *B = ILV->getBroadcastInstrs(IRV); 10105 set(Def, B, Part); 10106 return B; 10107 } 10108 10109 Value *ScalarValue = get(Def, {Part, 0}); 10110 // If we aren't vectorizing, we can just copy the scalar map values over 10111 // to the vector map. 10112 if (VF.isScalar()) { 10113 set(Def, ScalarValue, Part); 10114 return ScalarValue; 10115 } 10116 10117 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 10118 bool IsUniform = RepR && RepR->isUniform(); 10119 10120 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 10121 // Check if there is a scalar value for the selected lane. 10122 if (!hasScalarValue(Def, {Part, LastLane})) { 10123 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 10124 assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) || 10125 isa<VPScalarIVStepsRecipe>(Def->getDef())) && 10126 "unexpected recipe found to be invariant"); 10127 IsUniform = true; 10128 LastLane = 0; 10129 } 10130 10131 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 10132 // Set the insert point after the last scalarized instruction or after the 10133 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 10134 // will directly follow the scalar definitions. 10135 auto OldIP = Builder.saveIP(); 10136 auto NewIP = 10137 isa<PHINode>(LastInst) 10138 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 10139 : std::next(BasicBlock::iterator(LastInst)); 10140 Builder.SetInsertPoint(&*NewIP); 10141 10142 // However, if we are vectorizing, we need to construct the vector values. 10143 // If the value is known to be uniform after vectorization, we can just 10144 // broadcast the scalar value corresponding to lane zero for each unroll 10145 // iteration. Otherwise, we construct the vector values using 10146 // insertelement instructions. Since the resulting vectors are stored in 10147 // State, we will only generate the insertelements once. 10148 Value *VectorValue = nullptr; 10149 if (IsUniform) { 10150 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 10151 set(Def, VectorValue, Part); 10152 } else { 10153 // Initialize packing with insertelements to start from undef. 10154 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 10155 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10156 set(Def, Undef, Part); 10157 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10158 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10159 VectorValue = get(Def, Part); 10160 } 10161 Builder.restoreIP(OldIP); 10162 return VectorValue; 10163 } 10164 10165 // Process the loop in the VPlan-native vectorization path. This path builds 10166 // VPlan upfront in the vectorization pipeline, which allows to apply 10167 // VPlan-to-VPlan transformations from the very beginning without modifying the 10168 // input LLVM IR. 10169 static bool processLoopInVPlanNativePath( 10170 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10171 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10172 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10173 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10174 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10175 LoopVectorizationRequirements &Requirements) { 10176 10177 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10178 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10179 return false; 10180 } 10181 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10182 Function *F = L->getHeader()->getParent(); 10183 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10184 10185 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10186 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10187 10188 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10189 &Hints, IAI); 10190 // Use the planner for outer loop vectorization. 10191 // TODO: CM is not used at this point inside the planner. Turn CM into an 10192 // optional argument if we don't need it in the future. 10193 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10194 Requirements, ORE); 10195 10196 // Get user vectorization factor. 10197 ElementCount UserVF = Hints.getWidth(); 10198 10199 CM.collectElementTypesForWidening(); 10200 10201 // Plan how to best vectorize, return the best VF and its cost. 10202 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10203 10204 // If we are stress testing VPlan builds, do not attempt to generate vector 10205 // code. Masked vector code generation support will follow soon. 10206 // Also, do not attempt to vectorize if no vector code will be produced. 10207 if (VPlanBuildStressTest || EnableVPlanPredication || 10208 VectorizationFactor::Disabled() == VF) 10209 return false; 10210 10211 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10212 10213 { 10214 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10215 F->getParent()->getDataLayout()); 10216 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10217 &CM, BFI, PSI, Checks); 10218 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10219 << L->getHeader()->getParent()->getName() << "\"\n"); 10220 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT); 10221 } 10222 10223 // Mark the loop as already vectorized to avoid vectorizing again. 10224 Hints.setAlreadyVectorized(); 10225 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10226 return true; 10227 } 10228 10229 // Emit a remark if there are stores to floats that required a floating point 10230 // extension. If the vectorized loop was generated with floating point there 10231 // will be a performance penalty from the conversion overhead and the change in 10232 // the vector width. 10233 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10234 SmallVector<Instruction *, 4> Worklist; 10235 for (BasicBlock *BB : L->getBlocks()) { 10236 for (Instruction &Inst : *BB) { 10237 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10238 if (S->getValueOperand()->getType()->isFloatTy()) 10239 Worklist.push_back(S); 10240 } 10241 } 10242 } 10243 10244 // Traverse the floating point stores upwards searching, for floating point 10245 // conversions. 10246 SmallPtrSet<const Instruction *, 4> Visited; 10247 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10248 while (!Worklist.empty()) { 10249 auto *I = Worklist.pop_back_val(); 10250 if (!L->contains(I)) 10251 continue; 10252 if (!Visited.insert(I).second) 10253 continue; 10254 10255 // Emit a remark if the floating point store required a floating 10256 // point conversion. 10257 // TODO: More work could be done to identify the root cause such as a 10258 // constant or a function return type and point the user to it. 10259 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10260 ORE->emit([&]() { 10261 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10262 I->getDebugLoc(), L->getHeader()) 10263 << "floating point conversion changes vector width. " 10264 << "Mixed floating point precision requires an up/down " 10265 << "cast that will negatively impact performance."; 10266 }); 10267 10268 for (Use &Op : I->operands()) 10269 if (auto *OpI = dyn_cast<Instruction>(Op)) 10270 Worklist.push_back(OpI); 10271 } 10272 } 10273 10274 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10275 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10276 !EnableLoopInterleaving), 10277 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10278 !EnableLoopVectorization) {} 10279 10280 bool LoopVectorizePass::processLoop(Loop *L) { 10281 assert((EnableVPlanNativePath || L->isInnermost()) && 10282 "VPlan-native path is not enabled. Only process inner loops."); 10283 10284 #ifndef NDEBUG 10285 const std::string DebugLocStr = getDebugLocString(L); 10286 #endif /* NDEBUG */ 10287 10288 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '" 10289 << L->getHeader()->getParent()->getName() << "' from " 10290 << DebugLocStr << "\n"); 10291 10292 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI); 10293 10294 LLVM_DEBUG( 10295 dbgs() << "LV: Loop hints:" 10296 << " force=" 10297 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10298 ? "disabled" 10299 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10300 ? "enabled" 10301 : "?")) 10302 << " width=" << Hints.getWidth() 10303 << " interleave=" << Hints.getInterleave() << "\n"); 10304 10305 // Function containing loop 10306 Function *F = L->getHeader()->getParent(); 10307 10308 // Looking at the diagnostic output is the only way to determine if a loop 10309 // was vectorized (other than looking at the IR or machine code), so it 10310 // is important to generate an optimization remark for each loop. Most of 10311 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10312 // generated as OptimizationRemark and OptimizationRemarkMissed are 10313 // less verbose reporting vectorized loops and unvectorized loops that may 10314 // benefit from vectorization, respectively. 10315 10316 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10317 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10318 return false; 10319 } 10320 10321 PredicatedScalarEvolution PSE(*SE, *L); 10322 10323 // Check if it is legal to vectorize the loop. 10324 LoopVectorizationRequirements Requirements; 10325 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10326 &Requirements, &Hints, DB, AC, BFI, PSI); 10327 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10328 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10329 Hints.emitRemarkWithHints(); 10330 return false; 10331 } 10332 10333 // Check the function attributes and profiles to find out if this function 10334 // should be optimized for size. 10335 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10336 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10337 10338 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10339 // here. They may require CFG and instruction level transformations before 10340 // even evaluating whether vectorization is profitable. Since we cannot modify 10341 // the incoming IR, we need to build VPlan upfront in the vectorization 10342 // pipeline. 10343 if (!L->isInnermost()) 10344 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10345 ORE, BFI, PSI, Hints, Requirements); 10346 10347 assert(L->isInnermost() && "Inner loop expected."); 10348 10349 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10350 // count by optimizing for size, to minimize overheads. 10351 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10352 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10353 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10354 << "This loop is worth vectorizing only if no scalar " 10355 << "iteration overheads are incurred."); 10356 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10357 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10358 else { 10359 LLVM_DEBUG(dbgs() << "\n"); 10360 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10361 } 10362 } 10363 10364 // Check the function attributes to see if implicit floats are allowed. 10365 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10366 // an integer loop and the vector instructions selected are purely integer 10367 // vector instructions? 10368 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10369 reportVectorizationFailure( 10370 "Can't vectorize when the NoImplicitFloat attribute is used", 10371 "loop not vectorized due to NoImplicitFloat attribute", 10372 "NoImplicitFloat", ORE, L); 10373 Hints.emitRemarkWithHints(); 10374 return false; 10375 } 10376 10377 // Check if the target supports potentially unsafe FP vectorization. 10378 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10379 // for the target we're vectorizing for, to make sure none of the 10380 // additional fp-math flags can help. 10381 if (Hints.isPotentiallyUnsafe() && 10382 TTI->isFPVectorizationPotentiallyUnsafe()) { 10383 reportVectorizationFailure( 10384 "Potentially unsafe FP op prevents vectorization", 10385 "loop not vectorized due to unsafe FP support.", 10386 "UnsafeFP", ORE, L); 10387 Hints.emitRemarkWithHints(); 10388 return false; 10389 } 10390 10391 bool AllowOrderedReductions; 10392 // If the flag is set, use that instead and override the TTI behaviour. 10393 if (ForceOrderedReductions.getNumOccurrences() > 0) 10394 AllowOrderedReductions = ForceOrderedReductions; 10395 else 10396 AllowOrderedReductions = TTI->enableOrderedReductions(); 10397 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10398 ORE->emit([&]() { 10399 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10400 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10401 ExactFPMathInst->getDebugLoc(), 10402 ExactFPMathInst->getParent()) 10403 << "loop not vectorized: cannot prove it is safe to reorder " 10404 "floating-point operations"; 10405 }); 10406 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10407 "reorder floating-point operations\n"); 10408 Hints.emitRemarkWithHints(); 10409 return false; 10410 } 10411 10412 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10413 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10414 10415 // If an override option has been passed in for interleaved accesses, use it. 10416 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10417 UseInterleaved = EnableInterleavedMemAccesses; 10418 10419 // Analyze interleaved memory accesses. 10420 if (UseInterleaved) { 10421 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10422 } 10423 10424 // Use the cost model. 10425 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10426 F, &Hints, IAI); 10427 CM.collectValuesToIgnore(); 10428 CM.collectElementTypesForWidening(); 10429 10430 // Use the planner for vectorization. 10431 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10432 Requirements, ORE); 10433 10434 // Get user vectorization factor and interleave count. 10435 ElementCount UserVF = Hints.getWidth(); 10436 unsigned UserIC = Hints.getInterleave(); 10437 10438 // Plan how to best vectorize, return the best VF and its cost. 10439 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10440 10441 VectorizationFactor VF = VectorizationFactor::Disabled(); 10442 unsigned IC = 1; 10443 10444 if (MaybeVF) { 10445 VF = *MaybeVF; 10446 // Select the interleave count. 10447 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10448 } 10449 10450 // Identify the diagnostic messages that should be produced. 10451 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10452 bool VectorizeLoop = true, InterleaveLoop = true; 10453 if (VF.Width.isScalar()) { 10454 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10455 VecDiagMsg = std::make_pair( 10456 "VectorizationNotBeneficial", 10457 "the cost-model indicates that vectorization is not beneficial"); 10458 VectorizeLoop = false; 10459 } 10460 10461 if (!MaybeVF && UserIC > 1) { 10462 // Tell the user interleaving was avoided up-front, despite being explicitly 10463 // requested. 10464 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10465 "interleaving should be avoided up front\n"); 10466 IntDiagMsg = std::make_pair( 10467 "InterleavingAvoided", 10468 "Ignoring UserIC, because interleaving was avoided up front"); 10469 InterleaveLoop = false; 10470 } else if (IC == 1 && UserIC <= 1) { 10471 // Tell the user interleaving is not beneficial. 10472 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10473 IntDiagMsg = std::make_pair( 10474 "InterleavingNotBeneficial", 10475 "the cost-model indicates that interleaving is not beneficial"); 10476 InterleaveLoop = false; 10477 if (UserIC == 1) { 10478 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10479 IntDiagMsg.second += 10480 " and is explicitly disabled or interleave count is set to 1"; 10481 } 10482 } else if (IC > 1 && UserIC == 1) { 10483 // Tell the user interleaving is beneficial, but it explicitly disabled. 10484 LLVM_DEBUG( 10485 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10486 IntDiagMsg = std::make_pair( 10487 "InterleavingBeneficialButDisabled", 10488 "the cost-model indicates that interleaving is beneficial " 10489 "but is explicitly disabled or interleave count is set to 1"); 10490 InterleaveLoop = false; 10491 } 10492 10493 // Override IC if user provided an interleave count. 10494 IC = UserIC > 0 ? UserIC : IC; 10495 10496 // Emit diagnostic messages, if any. 10497 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10498 if (!VectorizeLoop && !InterleaveLoop) { 10499 // Do not vectorize or interleaving the loop. 10500 ORE->emit([&]() { 10501 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10502 L->getStartLoc(), L->getHeader()) 10503 << VecDiagMsg.second; 10504 }); 10505 ORE->emit([&]() { 10506 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10507 L->getStartLoc(), L->getHeader()) 10508 << IntDiagMsg.second; 10509 }); 10510 return false; 10511 } else if (!VectorizeLoop && InterleaveLoop) { 10512 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10513 ORE->emit([&]() { 10514 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10515 L->getStartLoc(), L->getHeader()) 10516 << VecDiagMsg.second; 10517 }); 10518 } else if (VectorizeLoop && !InterleaveLoop) { 10519 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10520 << ") in " << DebugLocStr << '\n'); 10521 ORE->emit([&]() { 10522 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10523 L->getStartLoc(), L->getHeader()) 10524 << IntDiagMsg.second; 10525 }); 10526 } else if (VectorizeLoop && InterleaveLoop) { 10527 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10528 << ") in " << DebugLocStr << '\n'); 10529 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10530 } 10531 10532 bool DisableRuntimeUnroll = false; 10533 MDNode *OrigLoopID = L->getLoopID(); 10534 { 10535 // Optimistically generate runtime checks. Drop them if they turn out to not 10536 // be profitable. Limit the scope of Checks, so the cleanup happens 10537 // immediately after vector codegeneration is done. 10538 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10539 F->getParent()->getDataLayout()); 10540 if (!VF.Width.isScalar() || IC > 1) 10541 Checks.Create(L, *LVL.getLAI(), PSE.getPredicate()); 10542 10543 using namespace ore; 10544 if (!VectorizeLoop) { 10545 assert(IC > 1 && "interleave count should not be 1 or 0"); 10546 // If we decided that it is not legal to vectorize the loop, then 10547 // interleave it. 10548 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10549 &CM, BFI, PSI, Checks); 10550 10551 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10552 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT); 10553 10554 ORE->emit([&]() { 10555 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10556 L->getHeader()) 10557 << "interleaved loop (interleaved count: " 10558 << NV("InterleaveCount", IC) << ")"; 10559 }); 10560 } else { 10561 // If we decided that it is *legal* to vectorize the loop, then do it. 10562 10563 // Consider vectorizing the epilogue too if it's profitable. 10564 VectorizationFactor EpilogueVF = 10565 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10566 if (EpilogueVF.Width.isVector()) { 10567 10568 // The first pass vectorizes the main loop and creates a scalar epilogue 10569 // to be vectorized by executing the plan (potentially with a different 10570 // factor) again shortly afterwards. 10571 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10572 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10573 EPI, &LVL, &CM, BFI, PSI, Checks); 10574 10575 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10576 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10577 DT); 10578 ++LoopsVectorized; 10579 10580 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10581 formLCSSARecursively(*L, *DT, LI, SE); 10582 10583 // Second pass vectorizes the epilogue and adjusts the control flow 10584 // edges from the first pass. 10585 EPI.MainLoopVF = EPI.EpilogueVF; 10586 EPI.MainLoopUF = EPI.EpilogueUF; 10587 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10588 ORE, EPI, &LVL, &CM, BFI, PSI, 10589 Checks); 10590 10591 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10592 BestEpiPlan.getVectorLoopRegion()->getEntryBasicBlock()->setName( 10593 "vec.epilog.vector.body"); 10594 10595 // Ensure that the start values for any VPReductionPHIRecipes are 10596 // updated before vectorising the epilogue loop. 10597 VPBasicBlock *Header = 10598 BestEpiPlan.getVectorLoopRegion()->getEntryBasicBlock(); 10599 for (VPRecipeBase &R : Header->phis()) { 10600 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) { 10601 if (auto *Resume = MainILV.getReductionResumeValue( 10602 ReductionPhi->getRecurrenceDescriptor())) { 10603 VPValue *StartVal = BestEpiPlan.getOrAddExternalDef(Resume); 10604 ReductionPhi->setOperand(0, StartVal); 10605 } 10606 } 10607 } 10608 10609 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10610 DT); 10611 ++LoopsEpilogueVectorized; 10612 10613 if (!MainILV.areSafetyChecksAdded()) 10614 DisableRuntimeUnroll = true; 10615 } else { 10616 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10617 &LVL, &CM, BFI, PSI, Checks); 10618 10619 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10620 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT); 10621 ++LoopsVectorized; 10622 10623 // Add metadata to disable runtime unrolling a scalar loop when there 10624 // are no runtime checks about strides and memory. A scalar loop that is 10625 // rarely used is not worth unrolling. 10626 if (!LB.areSafetyChecksAdded()) 10627 DisableRuntimeUnroll = true; 10628 } 10629 // Report the vectorization decision. 10630 ORE->emit([&]() { 10631 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10632 L->getHeader()) 10633 << "vectorized loop (vectorization width: " 10634 << NV("VectorizationFactor", VF.Width) 10635 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10636 }); 10637 } 10638 10639 if (ORE->allowExtraAnalysis(LV_NAME)) 10640 checkMixedPrecision(L, ORE); 10641 } 10642 10643 Optional<MDNode *> RemainderLoopID = 10644 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10645 LLVMLoopVectorizeFollowupEpilogue}); 10646 if (RemainderLoopID.hasValue()) { 10647 L->setLoopID(RemainderLoopID.getValue()); 10648 } else { 10649 if (DisableRuntimeUnroll) 10650 AddRuntimeUnrollDisableMetaData(L); 10651 10652 // Mark the loop as already vectorized to avoid vectorizing again. 10653 Hints.setAlreadyVectorized(); 10654 } 10655 10656 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10657 return true; 10658 } 10659 10660 LoopVectorizeResult LoopVectorizePass::runImpl( 10661 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10662 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10663 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10664 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10665 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10666 SE = &SE_; 10667 LI = &LI_; 10668 TTI = &TTI_; 10669 DT = &DT_; 10670 BFI = &BFI_; 10671 TLI = TLI_; 10672 AA = &AA_; 10673 AC = &AC_; 10674 GetLAA = &GetLAA_; 10675 DB = &DB_; 10676 ORE = &ORE_; 10677 PSI = PSI_; 10678 10679 // Don't attempt if 10680 // 1. the target claims to have no vector registers, and 10681 // 2. interleaving won't help ILP. 10682 // 10683 // The second condition is necessary because, even if the target has no 10684 // vector registers, loop vectorization may still enable scalar 10685 // interleaving. 10686 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10687 TTI->getMaxInterleaveFactor(1) < 2) 10688 return LoopVectorizeResult(false, false); 10689 10690 bool Changed = false, CFGChanged = false; 10691 10692 // The vectorizer requires loops to be in simplified form. 10693 // Since simplification may add new inner loops, it has to run before the 10694 // legality and profitability checks. This means running the loop vectorizer 10695 // will simplify all loops, regardless of whether anything end up being 10696 // vectorized. 10697 for (auto &L : *LI) 10698 Changed |= CFGChanged |= 10699 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10700 10701 // Build up a worklist of inner-loops to vectorize. This is necessary as 10702 // the act of vectorizing or partially unrolling a loop creates new loops 10703 // and can invalidate iterators across the loops. 10704 SmallVector<Loop *, 8> Worklist; 10705 10706 for (Loop *L : *LI) 10707 collectSupportedLoops(*L, LI, ORE, Worklist); 10708 10709 LoopsAnalyzed += Worklist.size(); 10710 10711 // Now walk the identified inner loops. 10712 while (!Worklist.empty()) { 10713 Loop *L = Worklist.pop_back_val(); 10714 10715 // For the inner loops we actually process, form LCSSA to simplify the 10716 // transform. 10717 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10718 10719 Changed |= CFGChanged |= processLoop(L); 10720 } 10721 10722 // Process each loop nest in the function. 10723 return LoopVectorizeResult(Changed, CFGChanged); 10724 } 10725 10726 PreservedAnalyses LoopVectorizePass::run(Function &F, 10727 FunctionAnalysisManager &AM) { 10728 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10729 auto &LI = AM.getResult<LoopAnalysis>(F); 10730 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10731 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10732 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10733 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10734 auto &AA = AM.getResult<AAManager>(F); 10735 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10736 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10737 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10738 10739 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10740 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10741 [&](Loop &L) -> const LoopAccessInfo & { 10742 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10743 TLI, TTI, nullptr, nullptr, nullptr}; 10744 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10745 }; 10746 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10747 ProfileSummaryInfo *PSI = 10748 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10749 LoopVectorizeResult Result = 10750 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10751 if (!Result.MadeAnyChange) 10752 return PreservedAnalyses::all(); 10753 PreservedAnalyses PA; 10754 10755 // We currently do not preserve loopinfo/dominator analyses with outer loop 10756 // vectorization. Until this is addressed, mark these analyses as preserved 10757 // only for non-VPlan-native path. 10758 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10759 if (!EnableVPlanNativePath) { 10760 PA.preserve<LoopAnalysis>(); 10761 PA.preserve<DominatorTreeAnalysis>(); 10762 } 10763 10764 if (Result.MadeCFGChange) { 10765 // Making CFG changes likely means a loop got vectorized. Indicate that 10766 // extra simplification passes should be run. 10767 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only 10768 // be run if runtime checks have been added. 10769 AM.getResult<ShouldRunExtraVectorPasses>(F); 10770 PA.preserve<ShouldRunExtraVectorPasses>(); 10771 } else { 10772 PA.preserveSet<CFGAnalyses>(); 10773 } 10774 return PA; 10775 } 10776 10777 void LoopVectorizePass::printPipeline( 10778 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10779 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10780 OS, MapClassName2PassName); 10781 10782 OS << "<"; 10783 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10784 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10785 OS << ">"; 10786 } 10787