1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/Metadata.h" 116 #include "llvm/IR/Module.h" 117 #include "llvm/IR/Operator.h" 118 #include "llvm/IR/PatternMatch.h" 119 #include "llvm/IR/Type.h" 120 #include "llvm/IR/Use.h" 121 #include "llvm/IR/User.h" 122 #include "llvm/IR/Value.h" 123 #include "llvm/IR/ValueHandle.h" 124 #include "llvm/IR/Verifier.h" 125 #include "llvm/InitializePasses.h" 126 #include "llvm/Pass.h" 127 #include "llvm/Support/Casting.h" 128 #include "llvm/Support/CommandLine.h" 129 #include "llvm/Support/Compiler.h" 130 #include "llvm/Support/Debug.h" 131 #include "llvm/Support/ErrorHandling.h" 132 #include "llvm/Support/InstructionCost.h" 133 #include "llvm/Support/MathExtras.h" 134 #include "llvm/Support/raw_ostream.h" 135 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 136 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 137 #include "llvm/Transforms/Utils/LoopSimplify.h" 138 #include "llvm/Transforms/Utils/LoopUtils.h" 139 #include "llvm/Transforms/Utils/LoopVersioning.h" 140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 141 #include "llvm/Transforms/Utils/SizeOpts.h" 142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 143 #include <algorithm> 144 #include <cassert> 145 #include <cstdint> 146 #include <functional> 147 #include <iterator> 148 #include <limits> 149 #include <map> 150 #include <memory> 151 #include <string> 152 #include <tuple> 153 #include <utility> 154 155 using namespace llvm; 156 157 #define LV_NAME "loop-vectorize" 158 #define DEBUG_TYPE LV_NAME 159 160 #ifndef NDEBUG 161 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 162 #endif 163 164 /// @{ 165 /// Metadata attribute names 166 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 167 const char LLVMLoopVectorizeFollowupVectorized[] = 168 "llvm.loop.vectorize.followup_vectorized"; 169 const char LLVMLoopVectorizeFollowupEpilogue[] = 170 "llvm.loop.vectorize.followup_epilogue"; 171 /// @} 172 173 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 174 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 175 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 176 177 static cl::opt<bool> EnableEpilogueVectorization( 178 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 179 cl::desc("Enable vectorization of epilogue loops.")); 180 181 static cl::opt<unsigned> EpilogueVectorizationForceVF( 182 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 183 cl::desc("When epilogue vectorization is enabled, and a value greater than " 184 "1 is specified, forces the given VF for all applicable epilogue " 185 "loops.")); 186 187 static cl::opt<unsigned> EpilogueVectorizationMinVF( 188 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 189 cl::desc("Only loops with vectorization factor equal to or larger than " 190 "the specified value are considered for epilogue vectorization.")); 191 192 /// Loops with a known constant trip count below this number are vectorized only 193 /// if no scalar iteration overheads are incurred. 194 static cl::opt<unsigned> TinyTripCountVectorThreshold( 195 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 196 cl::desc("Loops with a constant trip count that is smaller than this " 197 "value are vectorized only if no scalar iteration overheads " 198 "are incurred.")); 199 200 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 201 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 202 cl::desc("The maximum allowed number of runtime memory checks with a " 203 "vectorize(enable) pragma.")); 204 205 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 206 // that predication is preferred, and this lists all options. I.e., the 207 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 208 // and predicate the instructions accordingly. If tail-folding fails, there are 209 // different fallback strategies depending on these values: 210 namespace PreferPredicateTy { 211 enum Option { 212 ScalarEpilogue = 0, 213 PredicateElseScalarEpilogue, 214 PredicateOrDontVectorize 215 }; 216 } // namespace PreferPredicateTy 217 218 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 219 "prefer-predicate-over-epilogue", 220 cl::init(PreferPredicateTy::ScalarEpilogue), 221 cl::Hidden, 222 cl::desc("Tail-folding and predication preferences over creating a scalar " 223 "epilogue loop."), 224 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 225 "scalar-epilogue", 226 "Don't tail-predicate loops, create scalar epilogue"), 227 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 228 "predicate-else-scalar-epilogue", 229 "prefer tail-folding, create scalar epilogue if tail " 230 "folding fails."), 231 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 232 "predicate-dont-vectorize", 233 "prefers tail-folding, don't attempt vectorization if " 234 "tail-folding fails."))); 235 236 static cl::opt<bool> MaximizeBandwidth( 237 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 238 cl::desc("Maximize bandwidth when selecting vectorization factor which " 239 "will be determined by the smallest type in loop.")); 240 241 static cl::opt<bool> EnableInterleavedMemAccesses( 242 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 243 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 244 245 /// An interleave-group may need masking if it resides in a block that needs 246 /// predication, or in order to mask away gaps. 247 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 248 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 249 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 250 251 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 252 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 253 cl::desc("We don't interleave loops with a estimated constant trip count " 254 "below this number")); 255 256 static cl::opt<unsigned> ForceTargetNumScalarRegs( 257 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 258 cl::desc("A flag that overrides the target's number of scalar registers.")); 259 260 static cl::opt<unsigned> ForceTargetNumVectorRegs( 261 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 262 cl::desc("A flag that overrides the target's number of vector registers.")); 263 264 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 265 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 266 cl::desc("A flag that overrides the target's max interleave factor for " 267 "scalar loops.")); 268 269 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 270 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 271 cl::desc("A flag that overrides the target's max interleave factor for " 272 "vectorized loops.")); 273 274 static cl::opt<unsigned> ForceTargetInstructionCost( 275 "force-target-instruction-cost", cl::init(0), cl::Hidden, 276 cl::desc("A flag that overrides the target's expected cost for " 277 "an instruction to a single constant value. Mostly " 278 "useful for getting consistent testing.")); 279 280 static cl::opt<bool> ForceTargetSupportsScalableVectors( 281 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 282 cl::desc( 283 "Pretend that scalable vectors are supported, even if the target does " 284 "not support them. This flag should only be used for testing.")); 285 286 static cl::opt<unsigned> SmallLoopCost( 287 "small-loop-cost", cl::init(20), cl::Hidden, 288 cl::desc( 289 "The cost of a loop that is considered 'small' by the interleaver.")); 290 291 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 292 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 293 cl::desc("Enable the use of the block frequency analysis to access PGO " 294 "heuristics minimizing code growth in cold regions and being more " 295 "aggressive in hot regions.")); 296 297 // Runtime interleave loops for load/store throughput. 298 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 299 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 300 cl::desc( 301 "Enable runtime interleaving until load/store ports are saturated")); 302 303 /// Interleave small loops with scalar reductions. 304 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 305 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 306 cl::desc("Enable interleaving for loops with small iteration counts that " 307 "contain scalar reductions to expose ILP.")); 308 309 /// The number of stores in a loop that are allowed to need predication. 310 static cl::opt<unsigned> NumberOfStoresToPredicate( 311 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 312 cl::desc("Max number of stores to be predicated behind an if.")); 313 314 static cl::opt<bool> EnableIndVarRegisterHeur( 315 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 316 cl::desc("Count the induction variable only once when interleaving")); 317 318 static cl::opt<bool> EnableCondStoresVectorization( 319 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 320 cl::desc("Enable if predication of stores during vectorization.")); 321 322 static cl::opt<unsigned> MaxNestedScalarReductionIC( 323 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 324 cl::desc("The maximum interleave count to use when interleaving a scalar " 325 "reduction in a nested loop.")); 326 327 static cl::opt<bool> 328 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 329 cl::Hidden, 330 cl::desc("Prefer in-loop vector reductions, " 331 "overriding the targets preference.")); 332 333 static cl::opt<bool> ForceOrderedReductions( 334 "force-ordered-reductions", cl::init(false), cl::Hidden, 335 cl::desc("Enable the vectorisation of loops with in-order (strict) " 336 "FP reductions")); 337 338 static cl::opt<bool> PreferPredicatedReductionSelect( 339 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 340 cl::desc( 341 "Prefer predicating a reduction operation over an after loop select.")); 342 343 cl::opt<bool> EnableVPlanNativePath( 344 "enable-vplan-native-path", cl::init(false), cl::Hidden, 345 cl::desc("Enable VPlan-native vectorization path with " 346 "support for outer loop vectorization.")); 347 348 // FIXME: Remove this switch once we have divergence analysis. Currently we 349 // assume divergent non-backedge branches when this switch is true. 350 cl::opt<bool> EnableVPlanPredication( 351 "enable-vplan-predication", cl::init(false), cl::Hidden, 352 cl::desc("Enable VPlan-native vectorization path predicator with " 353 "support for outer loop vectorization.")); 354 355 // This flag enables the stress testing of the VPlan H-CFG construction in the 356 // VPlan-native vectorization path. It must be used in conjuction with 357 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 358 // verification of the H-CFGs built. 359 static cl::opt<bool> VPlanBuildStressTest( 360 "vplan-build-stress-test", cl::init(false), cl::Hidden, 361 cl::desc( 362 "Build VPlan for every supported loop nest in the function and bail " 363 "out right after the build (stress test the VPlan H-CFG construction " 364 "in the VPlan-native vectorization path).")); 365 366 cl::opt<bool> llvm::EnableLoopInterleaving( 367 "interleave-loops", cl::init(true), cl::Hidden, 368 cl::desc("Enable loop interleaving in Loop vectorization passes")); 369 cl::opt<bool> llvm::EnableLoopVectorization( 370 "vectorize-loops", cl::init(true), cl::Hidden, 371 cl::desc("Run the Loop vectorization passes")); 372 373 cl::opt<bool> PrintVPlansInDotFormat( 374 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 375 cl::desc("Use dot format instead of plain text when dumping VPlans")); 376 377 /// A helper function that returns true if the given type is irregular. The 378 /// type is irregular if its allocated size doesn't equal the store size of an 379 /// element of the corresponding vector type. 380 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 381 // Determine if an array of N elements of type Ty is "bitcast compatible" 382 // with a <N x Ty> vector. 383 // This is only true if there is no padding between the array elements. 384 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 385 } 386 387 /// A helper function that returns the reciprocal of the block probability of 388 /// predicated blocks. If we return X, we are assuming the predicated block 389 /// will execute once for every X iterations of the loop header. 390 /// 391 /// TODO: We should use actual block probability here, if available. Currently, 392 /// we always assume predicated blocks have a 50% chance of executing. 393 static unsigned getReciprocalPredBlockProb() { return 2; } 394 395 /// A helper function that returns an integer or floating-point constant with 396 /// value C. 397 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 398 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 399 : ConstantFP::get(Ty, C); 400 } 401 402 /// Returns "best known" trip count for the specified loop \p L as defined by 403 /// the following procedure: 404 /// 1) Returns exact trip count if it is known. 405 /// 2) Returns expected trip count according to profile data if any. 406 /// 3) Returns upper bound estimate if it is known. 407 /// 4) Returns None if all of the above failed. 408 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 409 // Check if exact trip count is known. 410 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 411 return ExpectedTC; 412 413 // Check if there is an expected trip count available from profile data. 414 if (LoopVectorizeWithBlockFrequency) 415 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 416 return EstimatedTC; 417 418 // Check if upper bound estimate is known. 419 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 420 return ExpectedTC; 421 422 return None; 423 } 424 425 // Forward declare GeneratedRTChecks. 426 class GeneratedRTChecks; 427 428 namespace llvm { 429 430 AnalysisKey ShouldRunExtraVectorPasses::Key; 431 432 /// InnerLoopVectorizer vectorizes loops which contain only one basic 433 /// block to a specified vectorization factor (VF). 434 /// This class performs the widening of scalars into vectors, or multiple 435 /// scalars. This class also implements the following features: 436 /// * It inserts an epilogue loop for handling loops that don't have iteration 437 /// counts that are known to be a multiple of the vectorization factor. 438 /// * It handles the code generation for reduction variables. 439 /// * Scalarization (implementation using scalars) of un-vectorizable 440 /// instructions. 441 /// InnerLoopVectorizer does not perform any vectorization-legality 442 /// checks, and relies on the caller to check for the different legality 443 /// aspects. The InnerLoopVectorizer relies on the 444 /// LoopVectorizationLegality class to provide information about the induction 445 /// and reduction variables that were found to a given vectorization factor. 446 class InnerLoopVectorizer { 447 public: 448 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 449 LoopInfo *LI, DominatorTree *DT, 450 const TargetLibraryInfo *TLI, 451 const TargetTransformInfo *TTI, AssumptionCache *AC, 452 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 453 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 454 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 455 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 456 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 457 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 458 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 459 PSI(PSI), RTChecks(RTChecks) { 460 // Query this against the original loop and save it here because the profile 461 // of the original loop header may change as the transformation happens. 462 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 463 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 464 } 465 466 virtual ~InnerLoopVectorizer() = default; 467 468 /// Create a new empty loop that will contain vectorized instructions later 469 /// on, while the old loop will be used as the scalar remainder. Control flow 470 /// is generated around the vectorized (and scalar epilogue) loops consisting 471 /// of various checks and bypasses. Return the pre-header block of the new 472 /// loop and the start value for the canonical induction, if it is != 0. The 473 /// latter is the case when vectorizing the epilogue loop. In the case of 474 /// epilogue vectorization, this function is overriden to handle the more 475 /// complex control flow around the loops. 476 virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton(); 477 478 /// Widen a single call instruction within the innermost loop. 479 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 480 VPTransformState &State); 481 482 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 483 void fixVectorizedLoop(VPTransformState &State); 484 485 // Return true if any runtime check is added. 486 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 487 488 /// A type for vectorized values in the new loop. Each value from the 489 /// original loop, when vectorized, is represented by UF vector values in the 490 /// new unrolled loop, where UF is the unroll factor. 491 using VectorParts = SmallVector<Value *, 2>; 492 493 /// Vectorize a single vector PHINode in a block in the VPlan-native path 494 /// only. 495 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 496 VPTransformState &State); 497 498 /// A helper function to scalarize a single Instruction in the innermost loop. 499 /// Generates a sequence of scalar instances for each lane between \p MinLane 500 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 501 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p 502 /// Instr's operands. 503 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe, 504 const VPIteration &Instance, bool IfPredicateInstr, 505 VPTransformState &State); 506 507 /// Construct the vector value of a scalarized value \p V one lane at a time. 508 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 509 VPTransformState &State); 510 511 /// Try to vectorize interleaved access group \p Group with the base address 512 /// given in \p Addr, optionally masking the vector operations if \p 513 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 514 /// values in the vectorized loop. 515 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 516 ArrayRef<VPValue *> VPDefs, 517 VPTransformState &State, VPValue *Addr, 518 ArrayRef<VPValue *> StoredValues, 519 VPValue *BlockInMask = nullptr); 520 521 /// Set the debug location in the builder \p Ptr using the debug location in 522 /// \p V. If \p Ptr is None then it uses the class member's Builder. 523 void setDebugLocFromInst(const Value *V, 524 Optional<IRBuilderBase *> CustomBuilder = None); 525 526 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 527 void fixNonInductionPHIs(VPTransformState &State); 528 529 /// Returns true if the reordering of FP operations is not allowed, but we are 530 /// able to vectorize with strict in-order reductions for the given RdxDesc. 531 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc); 532 533 /// Create a broadcast instruction. This method generates a broadcast 534 /// instruction (shuffle) for loop invariant values and for the induction 535 /// value. If this is the induction variable then we extend it to N, N+1, ... 536 /// this is needed because each iteration in the loop corresponds to a SIMD 537 /// element. 538 virtual Value *getBroadcastInstrs(Value *V); 539 540 /// Add metadata from one instruction to another. 541 /// 542 /// This includes both the original MDs from \p From and additional ones (\see 543 /// addNewMetadata). Use this for *newly created* instructions in the vector 544 /// loop. 545 void addMetadata(Instruction *To, Instruction *From); 546 547 /// Similar to the previous function but it adds the metadata to a 548 /// vector of instructions. 549 void addMetadata(ArrayRef<Value *> To, Instruction *From); 550 551 // Returns the resume value (bc.merge.rdx) for a reduction as 552 // generated by fixReduction. 553 PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc); 554 555 protected: 556 friend class LoopVectorizationPlanner; 557 558 /// A small list of PHINodes. 559 using PhiVector = SmallVector<PHINode *, 4>; 560 561 /// A type for scalarized values in the new loop. Each value from the 562 /// original loop, when scalarized, is represented by UF x VF scalar values 563 /// in the new unrolled loop, where UF is the unroll factor and VF is the 564 /// vectorization factor. 565 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 566 567 /// Set up the values of the IVs correctly when exiting the vector loop. 568 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 569 Value *CountRoundDown, Value *EndValue, 570 BasicBlock *MiddleBlock, BasicBlock *VectorHeader); 571 572 /// Handle all cross-iteration phis in the header. 573 void fixCrossIterationPHIs(VPTransformState &State); 574 575 /// Create the exit value of first order recurrences in the middle block and 576 /// update their users. 577 void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR, 578 VPTransformState &State); 579 580 /// Create code for the loop exit value of the reduction. 581 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 582 583 /// Clear NSW/NUW flags from reduction instructions if necessary. 584 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 585 VPTransformState &State); 586 587 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 588 /// means we need to add the appropriate incoming value from the middle 589 /// block as exiting edges from the scalar epilogue loop (if present) are 590 /// already in place, and we exit the vector loop exclusively to the middle 591 /// block. 592 void fixLCSSAPHIs(VPTransformState &State); 593 594 /// Iteratively sink the scalarized operands of a predicated instruction into 595 /// the block that was created for it. 596 void sinkScalarOperands(Instruction *PredInst); 597 598 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 599 /// represented as. 600 void truncateToMinimalBitwidths(VPTransformState &State); 601 602 /// Returns (and creates if needed) the original loop trip count. 603 Value *getOrCreateTripCount(BasicBlock *InsertBlock); 604 605 /// Returns (and creates if needed) the trip count of the widened loop. 606 Value *getOrCreateVectorTripCount(BasicBlock *InsertBlock); 607 608 /// Returns a bitcasted value to the requested vector type. 609 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 610 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 611 const DataLayout &DL); 612 613 /// Emit a bypass check to see if the vector trip count is zero, including if 614 /// it overflows. 615 void emitMinimumIterationCountCheck(BasicBlock *Bypass); 616 617 /// Emit a bypass check to see if all of the SCEV assumptions we've 618 /// had to make are correct. Returns the block containing the checks or 619 /// nullptr if no checks have been added. 620 BasicBlock *emitSCEVChecks(BasicBlock *Bypass); 621 622 /// Emit bypass checks to check any memory assumptions we may have made. 623 /// Returns the block containing the checks or nullptr if no checks have been 624 /// added. 625 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass); 626 627 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 628 /// vector loop preheader, middle block and scalar preheader. 629 void createVectorLoopSkeleton(StringRef Prefix); 630 631 /// Create new phi nodes for the induction variables to resume iteration count 632 /// in the scalar epilogue, from where the vectorized loop left off. 633 /// In cases where the loop skeleton is more complicated (eg. epilogue 634 /// vectorization) and the resume values can come from an additional bypass 635 /// block, the \p AdditionalBypass pair provides information about the bypass 636 /// block and the end value on the edge from bypass to this loop. 637 void createInductionResumeValues( 638 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 639 640 /// Complete the loop skeleton by adding debug MDs, creating appropriate 641 /// conditional branches in the middle block, preparing the builder and 642 /// running the verifier. Return the preheader of the completed vector loop. 643 BasicBlock *completeLoopSkeleton(MDNode *OrigLoopID); 644 645 /// Add additional metadata to \p To that was not present on \p Orig. 646 /// 647 /// Currently this is used to add the noalias annotations based on the 648 /// inserted memchecks. Use this for instructions that are *cloned* into the 649 /// vector loop. 650 void addNewMetadata(Instruction *To, const Instruction *Orig); 651 652 /// Collect poison-generating recipes that may generate a poison value that is 653 /// used after vectorization, even when their operands are not poison. Those 654 /// recipes meet the following conditions: 655 /// * Contribute to the address computation of a recipe generating a widen 656 /// memory load/store (VPWidenMemoryInstructionRecipe or 657 /// VPInterleaveRecipe). 658 /// * Such a widen memory load/store has at least one underlying Instruction 659 /// that is in a basic block that needs predication and after vectorization 660 /// the generated instruction won't be predicated. 661 void collectPoisonGeneratingRecipes(VPTransformState &State); 662 663 /// Allow subclasses to override and print debug traces before/after vplan 664 /// execution, when trace information is requested. 665 virtual void printDebugTracesAtStart(){}; 666 virtual void printDebugTracesAtEnd(){}; 667 668 /// The original loop. 669 Loop *OrigLoop; 670 671 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 672 /// dynamic knowledge to simplify SCEV expressions and converts them to a 673 /// more usable form. 674 PredicatedScalarEvolution &PSE; 675 676 /// Loop Info. 677 LoopInfo *LI; 678 679 /// Dominator Tree. 680 DominatorTree *DT; 681 682 /// Alias Analysis. 683 AAResults *AA; 684 685 /// Target Library Info. 686 const TargetLibraryInfo *TLI; 687 688 /// Target Transform Info. 689 const TargetTransformInfo *TTI; 690 691 /// Assumption Cache. 692 AssumptionCache *AC; 693 694 /// Interface to emit optimization remarks. 695 OptimizationRemarkEmitter *ORE; 696 697 /// LoopVersioning. It's only set up (non-null) if memchecks were 698 /// used. 699 /// 700 /// This is currently only used to add no-alias metadata based on the 701 /// memchecks. The actually versioning is performed manually. 702 std::unique_ptr<LoopVersioning> LVer; 703 704 /// The vectorization SIMD factor to use. Each vector will have this many 705 /// vector elements. 706 ElementCount VF; 707 708 /// The vectorization unroll factor to use. Each scalar is vectorized to this 709 /// many different vector instructions. 710 unsigned UF; 711 712 /// The builder that we use 713 IRBuilder<> Builder; 714 715 // --- Vectorization state --- 716 717 /// The vector-loop preheader. 718 BasicBlock *LoopVectorPreHeader; 719 720 /// The scalar-loop preheader. 721 BasicBlock *LoopScalarPreHeader; 722 723 /// Middle Block between the vector and the scalar. 724 BasicBlock *LoopMiddleBlock; 725 726 /// The unique ExitBlock of the scalar loop if one exists. Note that 727 /// there can be multiple exiting edges reaching this block. 728 BasicBlock *LoopExitBlock; 729 730 /// The scalar loop body. 731 BasicBlock *LoopScalarBody; 732 733 /// A list of all bypass blocks. The first block is the entry of the loop. 734 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 735 736 /// Store instructions that were predicated. 737 SmallVector<Instruction *, 4> PredicatedInstructions; 738 739 /// Trip count of the original loop. 740 Value *TripCount = nullptr; 741 742 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 743 Value *VectorTripCount = nullptr; 744 745 /// The legality analysis. 746 LoopVectorizationLegality *Legal; 747 748 /// The profitablity analysis. 749 LoopVectorizationCostModel *Cost; 750 751 // Record whether runtime checks are added. 752 bool AddedSafetyChecks = false; 753 754 // Holds the end values for each induction variable. We save the end values 755 // so we can later fix-up the external users of the induction variables. 756 DenseMap<PHINode *, Value *> IVEndValues; 757 758 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 759 // fixed up at the end of vector code generation. 760 SmallVector<PHINode *, 8> OrigPHIsToFix; 761 762 /// BFI and PSI are used to check for profile guided size optimizations. 763 BlockFrequencyInfo *BFI; 764 ProfileSummaryInfo *PSI; 765 766 // Whether this loop should be optimized for size based on profile guided size 767 // optimizatios. 768 bool OptForSizeBasedOnProfile; 769 770 /// Structure to hold information about generated runtime checks, responsible 771 /// for cleaning the checks, if vectorization turns out unprofitable. 772 GeneratedRTChecks &RTChecks; 773 774 // Holds the resume values for reductions in the loops, used to set the 775 // correct start value of reduction PHIs when vectorizing the epilogue. 776 SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4> 777 ReductionResumeValues; 778 }; 779 780 class InnerLoopUnroller : public InnerLoopVectorizer { 781 public: 782 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 783 LoopInfo *LI, DominatorTree *DT, 784 const TargetLibraryInfo *TLI, 785 const TargetTransformInfo *TTI, AssumptionCache *AC, 786 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 787 LoopVectorizationLegality *LVL, 788 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 789 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 790 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 791 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 792 BFI, PSI, Check) {} 793 794 private: 795 Value *getBroadcastInstrs(Value *V) override; 796 }; 797 798 /// Encapsulate information regarding vectorization of a loop and its epilogue. 799 /// This information is meant to be updated and used across two stages of 800 /// epilogue vectorization. 801 struct EpilogueLoopVectorizationInfo { 802 ElementCount MainLoopVF = ElementCount::getFixed(0); 803 unsigned MainLoopUF = 0; 804 ElementCount EpilogueVF = ElementCount::getFixed(0); 805 unsigned EpilogueUF = 0; 806 BasicBlock *MainLoopIterationCountCheck = nullptr; 807 BasicBlock *EpilogueIterationCountCheck = nullptr; 808 BasicBlock *SCEVSafetyCheck = nullptr; 809 BasicBlock *MemSafetyCheck = nullptr; 810 Value *TripCount = nullptr; 811 Value *VectorTripCount = nullptr; 812 813 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 814 ElementCount EVF, unsigned EUF) 815 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 816 assert(EUF == 1 && 817 "A high UF for the epilogue loop is likely not beneficial."); 818 } 819 }; 820 821 /// An extension of the inner loop vectorizer that creates a skeleton for a 822 /// vectorized loop that has its epilogue (residual) also vectorized. 823 /// The idea is to run the vplan on a given loop twice, firstly to setup the 824 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 825 /// from the first step and vectorize the epilogue. This is achieved by 826 /// deriving two concrete strategy classes from this base class and invoking 827 /// them in succession from the loop vectorizer planner. 828 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 829 public: 830 InnerLoopAndEpilogueVectorizer( 831 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 832 DominatorTree *DT, const TargetLibraryInfo *TLI, 833 const TargetTransformInfo *TTI, AssumptionCache *AC, 834 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 835 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 836 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 837 GeneratedRTChecks &Checks) 838 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 839 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 840 Checks), 841 EPI(EPI) {} 842 843 // Override this function to handle the more complex control flow around the 844 // three loops. 845 std::pair<BasicBlock *, Value *> 846 createVectorizedLoopSkeleton() final override { 847 return createEpilogueVectorizedLoopSkeleton(); 848 } 849 850 /// The interface for creating a vectorized skeleton using one of two 851 /// different strategies, each corresponding to one execution of the vplan 852 /// as described above. 853 virtual std::pair<BasicBlock *, Value *> 854 createEpilogueVectorizedLoopSkeleton() = 0; 855 856 /// Holds and updates state information required to vectorize the main loop 857 /// and its epilogue in two separate passes. This setup helps us avoid 858 /// regenerating and recomputing runtime safety checks. It also helps us to 859 /// shorten the iteration-count-check path length for the cases where the 860 /// iteration count of the loop is so small that the main vector loop is 861 /// completely skipped. 862 EpilogueLoopVectorizationInfo &EPI; 863 }; 864 865 /// A specialized derived class of inner loop vectorizer that performs 866 /// vectorization of *main* loops in the process of vectorizing loops and their 867 /// epilogues. 868 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 869 public: 870 EpilogueVectorizerMainLoop( 871 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 872 DominatorTree *DT, const TargetLibraryInfo *TLI, 873 const TargetTransformInfo *TTI, AssumptionCache *AC, 874 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 875 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 876 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 877 GeneratedRTChecks &Check) 878 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 879 EPI, LVL, CM, BFI, PSI, Check) {} 880 /// Implements the interface for creating a vectorized skeleton using the 881 /// *main loop* strategy (ie the first pass of vplan execution). 882 std::pair<BasicBlock *, Value *> 883 createEpilogueVectorizedLoopSkeleton() final override; 884 885 protected: 886 /// Emits an iteration count bypass check once for the main loop (when \p 887 /// ForEpilogue is false) and once for the epilogue loop (when \p 888 /// ForEpilogue is true). 889 BasicBlock *emitMinimumIterationCountCheck(BasicBlock *Bypass, 890 bool ForEpilogue); 891 void printDebugTracesAtStart() override; 892 void printDebugTracesAtEnd() override; 893 }; 894 895 // A specialized derived class of inner loop vectorizer that performs 896 // vectorization of *epilogue* loops in the process of vectorizing loops and 897 // their epilogues. 898 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 899 public: 900 EpilogueVectorizerEpilogueLoop( 901 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 902 DominatorTree *DT, const TargetLibraryInfo *TLI, 903 const TargetTransformInfo *TTI, AssumptionCache *AC, 904 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 905 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 906 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 907 GeneratedRTChecks &Checks) 908 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 909 EPI, LVL, CM, BFI, PSI, Checks) { 910 TripCount = EPI.TripCount; 911 } 912 /// Implements the interface for creating a vectorized skeleton using the 913 /// *epilogue loop* strategy (ie the second pass of vplan execution). 914 std::pair<BasicBlock *, Value *> 915 createEpilogueVectorizedLoopSkeleton() final override; 916 917 protected: 918 /// Emits an iteration count bypass check after the main vector loop has 919 /// finished to see if there are any iterations left to execute by either 920 /// the vector epilogue or the scalar epilogue. 921 BasicBlock *emitMinimumVectorEpilogueIterCountCheck( 922 BasicBlock *Bypass, 923 BasicBlock *Insert); 924 void printDebugTracesAtStart() override; 925 void printDebugTracesAtEnd() override; 926 }; 927 } // end namespace llvm 928 929 /// Look for a meaningful debug location on the instruction or it's 930 /// operands. 931 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 932 if (!I) 933 return I; 934 935 DebugLoc Empty; 936 if (I->getDebugLoc() != Empty) 937 return I; 938 939 for (Use &Op : I->operands()) { 940 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 941 if (OpInst->getDebugLoc() != Empty) 942 return OpInst; 943 } 944 945 return I; 946 } 947 948 void InnerLoopVectorizer::setDebugLocFromInst( 949 const Value *V, Optional<IRBuilderBase *> CustomBuilder) { 950 IRBuilderBase *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 951 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 952 const DILocation *DIL = Inst->getDebugLoc(); 953 954 // When a FSDiscriminator is enabled, we don't need to add the multiply 955 // factors to the discriminators. 956 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 957 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 958 // FIXME: For scalable vectors, assume vscale=1. 959 auto NewDIL = 960 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 961 if (NewDIL) 962 B->SetCurrentDebugLocation(NewDIL.getValue()); 963 else 964 LLVM_DEBUG(dbgs() 965 << "Failed to create new discriminator: " 966 << DIL->getFilename() << " Line: " << DIL->getLine()); 967 } else 968 B->SetCurrentDebugLocation(DIL); 969 } else 970 B->SetCurrentDebugLocation(DebugLoc()); 971 } 972 973 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 974 /// is passed, the message relates to that particular instruction. 975 #ifndef NDEBUG 976 static void debugVectorizationMessage(const StringRef Prefix, 977 const StringRef DebugMsg, 978 Instruction *I) { 979 dbgs() << "LV: " << Prefix << DebugMsg; 980 if (I != nullptr) 981 dbgs() << " " << *I; 982 else 983 dbgs() << '.'; 984 dbgs() << '\n'; 985 } 986 #endif 987 988 /// Create an analysis remark that explains why vectorization failed 989 /// 990 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 991 /// RemarkName is the identifier for the remark. If \p I is passed it is an 992 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 993 /// the location of the remark. \return the remark object that can be 994 /// streamed to. 995 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 996 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 997 Value *CodeRegion = TheLoop->getHeader(); 998 DebugLoc DL = TheLoop->getStartLoc(); 999 1000 if (I) { 1001 CodeRegion = I->getParent(); 1002 // If there is no debug location attached to the instruction, revert back to 1003 // using the loop's. 1004 if (I->getDebugLoc()) 1005 DL = I->getDebugLoc(); 1006 } 1007 1008 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1009 } 1010 1011 namespace llvm { 1012 1013 /// Return a value for Step multiplied by VF. 1014 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, 1015 int64_t Step) { 1016 assert(Ty->isIntegerTy() && "Expected an integer step"); 1017 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 1018 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1019 } 1020 1021 /// Return the runtime value for VF. 1022 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) { 1023 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1024 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1025 } 1026 1027 static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy, 1028 ElementCount VF) { 1029 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 1030 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 1031 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 1032 return B.CreateUIToFP(RuntimeVF, FTy); 1033 } 1034 1035 void reportVectorizationFailure(const StringRef DebugMsg, 1036 const StringRef OREMsg, const StringRef ORETag, 1037 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1038 Instruction *I) { 1039 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1040 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1041 ORE->emit( 1042 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1043 << "loop not vectorized: " << OREMsg); 1044 } 1045 1046 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1047 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1048 Instruction *I) { 1049 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1050 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1051 ORE->emit( 1052 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1053 << Msg); 1054 } 1055 1056 } // end namespace llvm 1057 1058 #ifndef NDEBUG 1059 /// \return string containing a file name and a line # for the given loop. 1060 static std::string getDebugLocString(const Loop *L) { 1061 std::string Result; 1062 if (L) { 1063 raw_string_ostream OS(Result); 1064 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1065 LoopDbgLoc.print(OS); 1066 else 1067 // Just print the module name. 1068 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1069 OS.flush(); 1070 } 1071 return Result; 1072 } 1073 #endif 1074 1075 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1076 const Instruction *Orig) { 1077 // If the loop was versioned with memchecks, add the corresponding no-alias 1078 // metadata. 1079 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1080 LVer->annotateInstWithNoAlias(To, Orig); 1081 } 1082 1083 void InnerLoopVectorizer::collectPoisonGeneratingRecipes( 1084 VPTransformState &State) { 1085 1086 // Collect recipes in the backward slice of `Root` that may generate a poison 1087 // value that is used after vectorization. 1088 SmallPtrSet<VPRecipeBase *, 16> Visited; 1089 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1090 SmallVector<VPRecipeBase *, 16> Worklist; 1091 Worklist.push_back(Root); 1092 1093 // Traverse the backward slice of Root through its use-def chain. 1094 while (!Worklist.empty()) { 1095 VPRecipeBase *CurRec = Worklist.back(); 1096 Worklist.pop_back(); 1097 1098 if (!Visited.insert(CurRec).second) 1099 continue; 1100 1101 // Prune search if we find another recipe generating a widen memory 1102 // instruction. Widen memory instructions involved in address computation 1103 // will lead to gather/scatter instructions, which don't need to be 1104 // handled. 1105 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) || 1106 isa<VPInterleaveRecipe>(CurRec) || 1107 isa<VPScalarIVStepsRecipe>(CurRec) || 1108 isa<VPCanonicalIVPHIRecipe>(CurRec)) 1109 continue; 1110 1111 // This recipe contributes to the address computation of a widen 1112 // load/store. Collect recipe if its underlying instruction has 1113 // poison-generating flags. 1114 Instruction *Instr = CurRec->getUnderlyingInstr(); 1115 if (Instr && Instr->hasPoisonGeneratingFlags()) 1116 State.MayGeneratePoisonRecipes.insert(CurRec); 1117 1118 // Add new definitions to the worklist. 1119 for (VPValue *operand : CurRec->operands()) 1120 if (VPDef *OpDef = operand->getDef()) 1121 Worklist.push_back(cast<VPRecipeBase>(OpDef)); 1122 } 1123 }); 1124 1125 // Traverse all the recipes in the VPlan and collect the poison-generating 1126 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1127 // VPInterleaveRecipe. 1128 auto Iter = depth_first( 1129 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry())); 1130 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1131 for (VPRecipeBase &Recipe : *VPBB) { 1132 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) { 1133 Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr(); 1134 VPDef *AddrDef = WidenRec->getAddr()->getDef(); 1135 if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr && 1136 Legal->blockNeedsPredication(UnderlyingInstr->getParent())) 1137 collectPoisonGeneratingInstrsInBackwardSlice( 1138 cast<VPRecipeBase>(AddrDef)); 1139 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1140 VPDef *AddrDef = InterleaveRec->getAddr()->getDef(); 1141 if (AddrDef) { 1142 // Check if any member of the interleave group needs predication. 1143 const InterleaveGroup<Instruction> *InterGroup = 1144 InterleaveRec->getInterleaveGroup(); 1145 bool NeedPredication = false; 1146 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1147 I < NumMembers; ++I) { 1148 Instruction *Member = InterGroup->getMember(I); 1149 if (Member) 1150 NeedPredication |= 1151 Legal->blockNeedsPredication(Member->getParent()); 1152 } 1153 1154 if (NeedPredication) 1155 collectPoisonGeneratingInstrsInBackwardSlice( 1156 cast<VPRecipeBase>(AddrDef)); 1157 } 1158 } 1159 } 1160 } 1161 } 1162 1163 void InnerLoopVectorizer::addMetadata(Instruction *To, 1164 Instruction *From) { 1165 propagateMetadata(To, From); 1166 addNewMetadata(To, From); 1167 } 1168 1169 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1170 Instruction *From) { 1171 for (Value *V : To) { 1172 if (Instruction *I = dyn_cast<Instruction>(V)) 1173 addMetadata(I, From); 1174 } 1175 } 1176 1177 PHINode *InnerLoopVectorizer::getReductionResumeValue( 1178 const RecurrenceDescriptor &RdxDesc) { 1179 auto It = ReductionResumeValues.find(&RdxDesc); 1180 assert(It != ReductionResumeValues.end() && 1181 "Expected to find a resume value for the reduction."); 1182 return It->second; 1183 } 1184 1185 namespace llvm { 1186 1187 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1188 // lowered. 1189 enum ScalarEpilogueLowering { 1190 1191 // The default: allowing scalar epilogues. 1192 CM_ScalarEpilogueAllowed, 1193 1194 // Vectorization with OptForSize: don't allow epilogues. 1195 CM_ScalarEpilogueNotAllowedOptSize, 1196 1197 // A special case of vectorisation with OptForSize: loops with a very small 1198 // trip count are considered for vectorization under OptForSize, thereby 1199 // making sure the cost of their loop body is dominant, free of runtime 1200 // guards and scalar iteration overheads. 1201 CM_ScalarEpilogueNotAllowedLowTripLoop, 1202 1203 // Loop hint predicate indicating an epilogue is undesired. 1204 CM_ScalarEpilogueNotNeededUsePredicate, 1205 1206 // Directive indicating we must either tail fold or not vectorize 1207 CM_ScalarEpilogueNotAllowedUsePredicate 1208 }; 1209 1210 /// ElementCountComparator creates a total ordering for ElementCount 1211 /// for the purposes of using it in a set structure. 1212 struct ElementCountComparator { 1213 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1214 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1215 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1216 } 1217 }; 1218 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1219 1220 /// LoopVectorizationCostModel - estimates the expected speedups due to 1221 /// vectorization. 1222 /// In many cases vectorization is not profitable. This can happen because of 1223 /// a number of reasons. In this class we mainly attempt to predict the 1224 /// expected speedup/slowdowns due to the supported instruction set. We use the 1225 /// TargetTransformInfo to query the different backends for the cost of 1226 /// different operations. 1227 class LoopVectorizationCostModel { 1228 public: 1229 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1230 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1231 LoopVectorizationLegality *Legal, 1232 const TargetTransformInfo &TTI, 1233 const TargetLibraryInfo *TLI, DemandedBits *DB, 1234 AssumptionCache *AC, 1235 OptimizationRemarkEmitter *ORE, const Function *F, 1236 const LoopVectorizeHints *Hints, 1237 InterleavedAccessInfo &IAI) 1238 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1239 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1240 Hints(Hints), InterleaveInfo(IAI) {} 1241 1242 /// \return An upper bound for the vectorization factors (both fixed and 1243 /// scalable). If the factors are 0, vectorization and interleaving should be 1244 /// avoided up front. 1245 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1246 1247 /// \return True if runtime checks are required for vectorization, and false 1248 /// otherwise. 1249 bool runtimeChecksRequired(); 1250 1251 /// \return The most profitable vectorization factor and the cost of that VF. 1252 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1253 /// then this vectorization factor will be selected if vectorization is 1254 /// possible. 1255 VectorizationFactor 1256 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1257 1258 VectorizationFactor 1259 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1260 const LoopVectorizationPlanner &LVP); 1261 1262 /// Setup cost-based decisions for user vectorization factor. 1263 /// \return true if the UserVF is a feasible VF to be chosen. 1264 bool selectUserVectorizationFactor(ElementCount UserVF) { 1265 collectUniformsAndScalars(UserVF); 1266 collectInstsToScalarize(UserVF); 1267 return expectedCost(UserVF).first.isValid(); 1268 } 1269 1270 /// \return The size (in bits) of the smallest and widest types in the code 1271 /// that needs to be vectorized. We ignore values that remain scalar such as 1272 /// 64 bit loop indices. 1273 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1274 1275 /// \return The desired interleave count. 1276 /// If interleave count has been specified by metadata it will be returned. 1277 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1278 /// are the selected vectorization factor and the cost of the selected VF. 1279 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1280 1281 /// Memory access instruction may be vectorized in more than one way. 1282 /// Form of instruction after vectorization depends on cost. 1283 /// This function takes cost-based decisions for Load/Store instructions 1284 /// and collects them in a map. This decisions map is used for building 1285 /// the lists of loop-uniform and loop-scalar instructions. 1286 /// The calculated cost is saved with widening decision in order to 1287 /// avoid redundant calculations. 1288 void setCostBasedWideningDecision(ElementCount VF); 1289 1290 /// A struct that represents some properties of the register usage 1291 /// of a loop. 1292 struct RegisterUsage { 1293 /// Holds the number of loop invariant values that are used in the loop. 1294 /// The key is ClassID of target-provided register class. 1295 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1296 /// Holds the maximum number of concurrent live intervals in the loop. 1297 /// The key is ClassID of target-provided register class. 1298 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1299 }; 1300 1301 /// \return Returns information about the register usages of the loop for the 1302 /// given vectorization factors. 1303 SmallVector<RegisterUsage, 8> 1304 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1305 1306 /// Collect values we want to ignore in the cost model. 1307 void collectValuesToIgnore(); 1308 1309 /// Collect all element types in the loop for which widening is needed. 1310 void collectElementTypesForWidening(); 1311 1312 /// Split reductions into those that happen in the loop, and those that happen 1313 /// outside. In loop reductions are collected into InLoopReductionChains. 1314 void collectInLoopReductions(); 1315 1316 /// Returns true if we should use strict in-order reductions for the given 1317 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1318 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1319 /// of FP operations. 1320 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1321 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1322 } 1323 1324 /// \returns The smallest bitwidth each instruction can be represented with. 1325 /// The vector equivalents of these instructions should be truncated to this 1326 /// type. 1327 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1328 return MinBWs; 1329 } 1330 1331 /// \returns True if it is more profitable to scalarize instruction \p I for 1332 /// vectorization factor \p VF. 1333 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1334 assert(VF.isVector() && 1335 "Profitable to scalarize relevant only for VF > 1."); 1336 1337 // Cost model is not run in the VPlan-native path - return conservative 1338 // result until this changes. 1339 if (EnableVPlanNativePath) 1340 return false; 1341 1342 auto Scalars = InstsToScalarize.find(VF); 1343 assert(Scalars != InstsToScalarize.end() && 1344 "VF not yet analyzed for scalarization profitability"); 1345 return Scalars->second.find(I) != Scalars->second.end(); 1346 } 1347 1348 /// Returns true if \p I is known to be uniform after vectorization. 1349 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1350 if (VF.isScalar()) 1351 return true; 1352 1353 // Cost model is not run in the VPlan-native path - return conservative 1354 // result until this changes. 1355 if (EnableVPlanNativePath) 1356 return false; 1357 1358 auto UniformsPerVF = Uniforms.find(VF); 1359 assert(UniformsPerVF != Uniforms.end() && 1360 "VF not yet analyzed for uniformity"); 1361 return UniformsPerVF->second.count(I); 1362 } 1363 1364 /// Returns true if \p I is known to be scalar after vectorization. 1365 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1366 if (VF.isScalar()) 1367 return true; 1368 1369 // Cost model is not run in the VPlan-native path - return conservative 1370 // result until this changes. 1371 if (EnableVPlanNativePath) 1372 return false; 1373 1374 auto ScalarsPerVF = Scalars.find(VF); 1375 assert(ScalarsPerVF != Scalars.end() && 1376 "Scalar values are not calculated for VF"); 1377 return ScalarsPerVF->second.count(I); 1378 } 1379 1380 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1381 /// for vectorization factor \p VF. 1382 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1383 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1384 !isProfitableToScalarize(I, VF) && 1385 !isScalarAfterVectorization(I, VF); 1386 } 1387 1388 /// Decision that was taken during cost calculation for memory instruction. 1389 enum InstWidening { 1390 CM_Unknown, 1391 CM_Widen, // For consecutive accesses with stride +1. 1392 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1393 CM_Interleave, 1394 CM_GatherScatter, 1395 CM_Scalarize 1396 }; 1397 1398 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1399 /// instruction \p I and vector width \p VF. 1400 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1401 InstructionCost Cost) { 1402 assert(VF.isVector() && "Expected VF >=2"); 1403 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1404 } 1405 1406 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1407 /// interleaving group \p Grp and vector width \p VF. 1408 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1409 ElementCount VF, InstWidening W, 1410 InstructionCost Cost) { 1411 assert(VF.isVector() && "Expected VF >=2"); 1412 /// Broadcast this decicion to all instructions inside the group. 1413 /// But the cost will be assigned to one instruction only. 1414 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1415 if (auto *I = Grp->getMember(i)) { 1416 if (Grp->getInsertPos() == I) 1417 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1418 else 1419 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1420 } 1421 } 1422 } 1423 1424 /// Return the cost model decision for the given instruction \p I and vector 1425 /// width \p VF. Return CM_Unknown if this instruction did not pass 1426 /// through the cost modeling. 1427 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1428 assert(VF.isVector() && "Expected VF to be a vector VF"); 1429 // Cost model is not run in the VPlan-native path - return conservative 1430 // result until this changes. 1431 if (EnableVPlanNativePath) 1432 return CM_GatherScatter; 1433 1434 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1435 auto Itr = WideningDecisions.find(InstOnVF); 1436 if (Itr == WideningDecisions.end()) 1437 return CM_Unknown; 1438 return Itr->second.first; 1439 } 1440 1441 /// Return the vectorization cost for the given instruction \p I and vector 1442 /// width \p VF. 1443 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1444 assert(VF.isVector() && "Expected VF >=2"); 1445 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1446 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1447 "The cost is not calculated"); 1448 return WideningDecisions[InstOnVF].second; 1449 } 1450 1451 /// Return True if instruction \p I is an optimizable truncate whose operand 1452 /// is an induction variable. Such a truncate will be removed by adding a new 1453 /// induction variable with the destination type. 1454 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1455 // If the instruction is not a truncate, return false. 1456 auto *Trunc = dyn_cast<TruncInst>(I); 1457 if (!Trunc) 1458 return false; 1459 1460 // Get the source and destination types of the truncate. 1461 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1462 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1463 1464 // If the truncate is free for the given types, return false. Replacing a 1465 // free truncate with an induction variable would add an induction variable 1466 // update instruction to each iteration of the loop. We exclude from this 1467 // check the primary induction variable since it will need an update 1468 // instruction regardless. 1469 Value *Op = Trunc->getOperand(0); 1470 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1471 return false; 1472 1473 // If the truncated value is not an induction variable, return false. 1474 return Legal->isInductionPhi(Op); 1475 } 1476 1477 /// Collects the instructions to scalarize for each predicated instruction in 1478 /// the loop. 1479 void collectInstsToScalarize(ElementCount VF); 1480 1481 /// Collect Uniform and Scalar values for the given \p VF. 1482 /// The sets depend on CM decision for Load/Store instructions 1483 /// that may be vectorized as interleave, gather-scatter or scalarized. 1484 void collectUniformsAndScalars(ElementCount VF) { 1485 // Do the analysis once. 1486 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1487 return; 1488 setCostBasedWideningDecision(VF); 1489 collectLoopUniforms(VF); 1490 collectLoopScalars(VF); 1491 } 1492 1493 /// Returns true if the target machine supports masked store operation 1494 /// for the given \p DataType and kind of access to \p Ptr. 1495 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1496 return Legal->isConsecutivePtr(DataType, Ptr) && 1497 TTI.isLegalMaskedStore(DataType, Alignment); 1498 } 1499 1500 /// Returns true if the target machine supports masked load operation 1501 /// for the given \p DataType and kind of access to \p Ptr. 1502 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1503 return Legal->isConsecutivePtr(DataType, Ptr) && 1504 TTI.isLegalMaskedLoad(DataType, Alignment); 1505 } 1506 1507 /// Returns true if the target machine can represent \p V as a masked gather 1508 /// or scatter operation. 1509 bool isLegalGatherOrScatter(Value *V, 1510 ElementCount VF = ElementCount::getFixed(1)) { 1511 bool LI = isa<LoadInst>(V); 1512 bool SI = isa<StoreInst>(V); 1513 if (!LI && !SI) 1514 return false; 1515 auto *Ty = getLoadStoreType(V); 1516 Align Align = getLoadStoreAlignment(V); 1517 if (VF.isVector()) 1518 Ty = VectorType::get(Ty, VF); 1519 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1520 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1521 } 1522 1523 /// Returns true if the target machine supports all of the reduction 1524 /// variables found for the given VF. 1525 bool canVectorizeReductions(ElementCount VF) const { 1526 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1527 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1528 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1529 })); 1530 } 1531 1532 /// Returns true if \p I is an instruction that will be scalarized with 1533 /// predication when vectorizing \p I with vectorization factor \p VF. Such 1534 /// instructions include conditional stores and instructions that may divide 1535 /// by zero. 1536 bool isScalarWithPredication(Instruction *I, ElementCount VF) const; 1537 1538 // Returns true if \p I is an instruction that will be predicated either 1539 // through scalar predication or masked load/store or masked gather/scatter. 1540 // \p VF is the vectorization factor that will be used to vectorize \p I. 1541 // Superset of instructions that return true for isScalarWithPredication. 1542 bool isPredicatedInst(Instruction *I, ElementCount VF, 1543 bool IsKnownUniform = false) { 1544 // When we know the load is uniform and the original scalar loop was not 1545 // predicated we don't need to mark it as a predicated instruction. Any 1546 // vectorised blocks created when tail-folding are something artificial we 1547 // have introduced and we know there is always at least one active lane. 1548 // That's why we call Legal->blockNeedsPredication here because it doesn't 1549 // query tail-folding. 1550 if (IsKnownUniform && isa<LoadInst>(I) && 1551 !Legal->blockNeedsPredication(I->getParent())) 1552 return false; 1553 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1554 return false; 1555 // Loads and stores that need some form of masked operation are predicated 1556 // instructions. 1557 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1558 return Legal->isMaskRequired(I); 1559 return isScalarWithPredication(I, VF); 1560 } 1561 1562 /// Returns true if \p I is a memory instruction with consecutive memory 1563 /// access that can be widened. 1564 bool 1565 memoryInstructionCanBeWidened(Instruction *I, 1566 ElementCount VF = ElementCount::getFixed(1)); 1567 1568 /// Returns true if \p I is a memory instruction in an interleaved-group 1569 /// of memory accesses that can be vectorized with wide vector loads/stores 1570 /// and shuffles. 1571 bool 1572 interleavedAccessCanBeWidened(Instruction *I, 1573 ElementCount VF = ElementCount::getFixed(1)); 1574 1575 /// Check if \p Instr belongs to any interleaved access group. 1576 bool isAccessInterleaved(Instruction *Instr) { 1577 return InterleaveInfo.isInterleaved(Instr); 1578 } 1579 1580 /// Get the interleaved access group that \p Instr belongs to. 1581 const InterleaveGroup<Instruction> * 1582 getInterleavedAccessGroup(Instruction *Instr) { 1583 return InterleaveInfo.getInterleaveGroup(Instr); 1584 } 1585 1586 /// Returns true if we're required to use a scalar epilogue for at least 1587 /// the final iteration of the original loop. 1588 bool requiresScalarEpilogue(ElementCount VF) const { 1589 if (!isScalarEpilogueAllowed()) 1590 return false; 1591 // If we might exit from anywhere but the latch, must run the exiting 1592 // iteration in scalar form. 1593 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1594 return true; 1595 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1596 } 1597 1598 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1599 /// loop hint annotation. 1600 bool isScalarEpilogueAllowed() const { 1601 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1602 } 1603 1604 /// Returns true if all loop blocks should be masked to fold tail loop. 1605 bool foldTailByMasking() const { return FoldTailByMasking; } 1606 1607 /// Returns true if the instructions in this block requires predication 1608 /// for any reason, e.g. because tail folding now requires a predicate 1609 /// or because the block in the original loop was predicated. 1610 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1611 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1612 } 1613 1614 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1615 /// nodes to the chain of instructions representing the reductions. Uses a 1616 /// MapVector to ensure deterministic iteration order. 1617 using ReductionChainMap = 1618 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1619 1620 /// Return the chain of instructions representing an inloop reduction. 1621 const ReductionChainMap &getInLoopReductionChains() const { 1622 return InLoopReductionChains; 1623 } 1624 1625 /// Returns true if the Phi is part of an inloop reduction. 1626 bool isInLoopReduction(PHINode *Phi) const { 1627 return InLoopReductionChains.count(Phi); 1628 } 1629 1630 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1631 /// with factor VF. Return the cost of the instruction, including 1632 /// scalarization overhead if it's needed. 1633 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1634 1635 /// Estimate cost of a call instruction CI if it were vectorized with factor 1636 /// VF. Return the cost of the instruction, including scalarization overhead 1637 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1638 /// scalarized - 1639 /// i.e. either vector version isn't available, or is too expensive. 1640 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1641 bool &NeedToScalarize) const; 1642 1643 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1644 /// that of B. 1645 bool isMoreProfitable(const VectorizationFactor &A, 1646 const VectorizationFactor &B) const; 1647 1648 /// Invalidates decisions already taken by the cost model. 1649 void invalidateCostModelingDecisions() { 1650 WideningDecisions.clear(); 1651 Uniforms.clear(); 1652 Scalars.clear(); 1653 } 1654 1655 private: 1656 unsigned NumPredStores = 0; 1657 1658 /// Convenience function that returns the value of vscale_range iff 1659 /// vscale_range.min == vscale_range.max or otherwise returns the value 1660 /// returned by the corresponding TLI method. 1661 Optional<unsigned> getVScaleForTuning() const; 1662 1663 /// \return An upper bound for the vectorization factors for both 1664 /// fixed and scalable vectorization, where the minimum-known number of 1665 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1666 /// disabled or unsupported, then the scalable part will be equal to 1667 /// ElementCount::getScalable(0). 1668 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1669 ElementCount UserVF, 1670 bool FoldTailByMasking); 1671 1672 /// \return the maximized element count based on the targets vector 1673 /// registers and the loop trip-count, but limited to a maximum safe VF. 1674 /// This is a helper function of computeFeasibleMaxVF. 1675 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1676 /// issue that occurred on one of the buildbots which cannot be reproduced 1677 /// without having access to the properietary compiler (see comments on 1678 /// D98509). The issue is currently under investigation and this workaround 1679 /// will be removed as soon as possible. 1680 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1681 unsigned SmallestType, 1682 unsigned WidestType, 1683 const ElementCount &MaxSafeVF, 1684 bool FoldTailByMasking); 1685 1686 /// \return the maximum legal scalable VF, based on the safe max number 1687 /// of elements. 1688 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1689 1690 /// The vectorization cost is a combination of the cost itself and a boolean 1691 /// indicating whether any of the contributing operations will actually 1692 /// operate on vector values after type legalization in the backend. If this 1693 /// latter value is false, then all operations will be scalarized (i.e. no 1694 /// vectorization has actually taken place). 1695 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1696 1697 /// Returns the expected execution cost. The unit of the cost does 1698 /// not matter because we use the 'cost' units to compare different 1699 /// vector widths. The cost that is returned is *not* normalized by 1700 /// the factor width. If \p Invalid is not nullptr, this function 1701 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1702 /// each instruction that has an Invalid cost for the given VF. 1703 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1704 VectorizationCostTy 1705 expectedCost(ElementCount VF, 1706 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1707 1708 /// Returns the execution time cost of an instruction for a given vector 1709 /// width. Vector width of one means scalar. 1710 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1711 1712 /// The cost-computation logic from getInstructionCost which provides 1713 /// the vector type as an output parameter. 1714 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1715 Type *&VectorTy); 1716 1717 /// Return the cost of instructions in an inloop reduction pattern, if I is 1718 /// part of that pattern. 1719 Optional<InstructionCost> 1720 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1721 TTI::TargetCostKind CostKind); 1722 1723 /// Calculate vectorization cost of memory instruction \p I. 1724 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1725 1726 /// The cost computation for scalarized memory instruction. 1727 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1728 1729 /// The cost computation for interleaving group of memory instructions. 1730 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1731 1732 /// The cost computation for Gather/Scatter instruction. 1733 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1734 1735 /// The cost computation for widening instruction \p I with consecutive 1736 /// memory access. 1737 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1738 1739 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1740 /// Load: scalar load + broadcast. 1741 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1742 /// element) 1743 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1744 1745 /// Estimate the overhead of scalarizing an instruction. This is a 1746 /// convenience wrapper for the type-based getScalarizationOverhead API. 1747 InstructionCost getScalarizationOverhead(Instruction *I, 1748 ElementCount VF) const; 1749 1750 /// Returns whether the instruction is a load or store and will be a emitted 1751 /// as a vector operation. 1752 bool isConsecutiveLoadOrStore(Instruction *I); 1753 1754 /// Returns true if an artificially high cost for emulated masked memrefs 1755 /// should be used. 1756 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF); 1757 1758 /// Map of scalar integer values to the smallest bitwidth they can be legally 1759 /// represented as. The vector equivalents of these values should be truncated 1760 /// to this type. 1761 MapVector<Instruction *, uint64_t> MinBWs; 1762 1763 /// A type representing the costs for instructions if they were to be 1764 /// scalarized rather than vectorized. The entries are Instruction-Cost 1765 /// pairs. 1766 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1767 1768 /// A set containing all BasicBlocks that are known to present after 1769 /// vectorization as a predicated block. 1770 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1771 1772 /// Records whether it is allowed to have the original scalar loop execute at 1773 /// least once. This may be needed as a fallback loop in case runtime 1774 /// aliasing/dependence checks fail, or to handle the tail/remainder 1775 /// iterations when the trip count is unknown or doesn't divide by the VF, 1776 /// or as a peel-loop to handle gaps in interleave-groups. 1777 /// Under optsize and when the trip count is very small we don't allow any 1778 /// iterations to execute in the scalar loop. 1779 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1780 1781 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1782 bool FoldTailByMasking = false; 1783 1784 /// A map holding scalar costs for different vectorization factors. The 1785 /// presence of a cost for an instruction in the mapping indicates that the 1786 /// instruction will be scalarized when vectorizing with the associated 1787 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1788 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1789 1790 /// Holds the instructions known to be uniform after vectorization. 1791 /// The data is collected per VF. 1792 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1793 1794 /// Holds the instructions known to be scalar after vectorization. 1795 /// The data is collected per VF. 1796 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1797 1798 /// Holds the instructions (address computations) that are forced to be 1799 /// scalarized. 1800 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1801 1802 /// PHINodes of the reductions that should be expanded in-loop along with 1803 /// their associated chains of reduction operations, in program order from top 1804 /// (PHI) to bottom 1805 ReductionChainMap InLoopReductionChains; 1806 1807 /// A Map of inloop reduction operations and their immediate chain operand. 1808 /// FIXME: This can be removed once reductions can be costed correctly in 1809 /// vplan. This was added to allow quick lookup to the inloop operations, 1810 /// without having to loop through InLoopReductionChains. 1811 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1812 1813 /// Returns the expected difference in cost from scalarizing the expression 1814 /// feeding a predicated instruction \p PredInst. The instructions to 1815 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1816 /// non-negative return value implies the expression will be scalarized. 1817 /// Currently, only single-use chains are considered for scalarization. 1818 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1819 ElementCount VF); 1820 1821 /// Collect the instructions that are uniform after vectorization. An 1822 /// instruction is uniform if we represent it with a single scalar value in 1823 /// the vectorized loop corresponding to each vector iteration. Examples of 1824 /// uniform instructions include pointer operands of consecutive or 1825 /// interleaved memory accesses. Note that although uniformity implies an 1826 /// instruction will be scalar, the reverse is not true. In general, a 1827 /// scalarized instruction will be represented by VF scalar values in the 1828 /// vectorized loop, each corresponding to an iteration of the original 1829 /// scalar loop. 1830 void collectLoopUniforms(ElementCount VF); 1831 1832 /// Collect the instructions that are scalar after vectorization. An 1833 /// instruction is scalar if it is known to be uniform or will be scalarized 1834 /// during vectorization. collectLoopScalars should only add non-uniform nodes 1835 /// to the list if they are used by a load/store instruction that is marked as 1836 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by 1837 /// VF values in the vectorized loop, each corresponding to an iteration of 1838 /// the original scalar loop. 1839 void collectLoopScalars(ElementCount VF); 1840 1841 /// Keeps cost model vectorization decision and cost for instructions. 1842 /// Right now it is used for memory instructions only. 1843 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1844 std::pair<InstWidening, InstructionCost>>; 1845 1846 DecisionList WideningDecisions; 1847 1848 /// Returns true if \p V is expected to be vectorized and it needs to be 1849 /// extracted. 1850 bool needsExtract(Value *V, ElementCount VF) const { 1851 Instruction *I = dyn_cast<Instruction>(V); 1852 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1853 TheLoop->isLoopInvariant(I)) 1854 return false; 1855 1856 // Assume we can vectorize V (and hence we need extraction) if the 1857 // scalars are not computed yet. This can happen, because it is called 1858 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1859 // the scalars are collected. That should be a safe assumption in most 1860 // cases, because we check if the operands have vectorizable types 1861 // beforehand in LoopVectorizationLegality. 1862 return Scalars.find(VF) == Scalars.end() || 1863 !isScalarAfterVectorization(I, VF); 1864 }; 1865 1866 /// Returns a range containing only operands needing to be extracted. 1867 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1868 ElementCount VF) const { 1869 return SmallVector<Value *, 4>(make_filter_range( 1870 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1871 } 1872 1873 /// Determines if we have the infrastructure to vectorize loop \p L and its 1874 /// epilogue, assuming the main loop is vectorized by \p VF. 1875 bool isCandidateForEpilogueVectorization(const Loop &L, 1876 const ElementCount VF) const; 1877 1878 /// Returns true if epilogue vectorization is considered profitable, and 1879 /// false otherwise. 1880 /// \p VF is the vectorization factor chosen for the original loop. 1881 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1882 1883 public: 1884 /// The loop that we evaluate. 1885 Loop *TheLoop; 1886 1887 /// Predicated scalar evolution analysis. 1888 PredicatedScalarEvolution &PSE; 1889 1890 /// Loop Info analysis. 1891 LoopInfo *LI; 1892 1893 /// Vectorization legality. 1894 LoopVectorizationLegality *Legal; 1895 1896 /// Vector target information. 1897 const TargetTransformInfo &TTI; 1898 1899 /// Target Library Info. 1900 const TargetLibraryInfo *TLI; 1901 1902 /// Demanded bits analysis. 1903 DemandedBits *DB; 1904 1905 /// Assumption cache. 1906 AssumptionCache *AC; 1907 1908 /// Interface to emit optimization remarks. 1909 OptimizationRemarkEmitter *ORE; 1910 1911 const Function *TheFunction; 1912 1913 /// Loop Vectorize Hint. 1914 const LoopVectorizeHints *Hints; 1915 1916 /// The interleave access information contains groups of interleaved accesses 1917 /// with the same stride and close to each other. 1918 InterleavedAccessInfo &InterleaveInfo; 1919 1920 /// Values to ignore in the cost model. 1921 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1922 1923 /// Values to ignore in the cost model when VF > 1. 1924 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1925 1926 /// All element types found in the loop. 1927 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1928 1929 /// Profitable vector factors. 1930 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1931 }; 1932 } // end namespace llvm 1933 1934 /// Helper struct to manage generating runtime checks for vectorization. 1935 /// 1936 /// The runtime checks are created up-front in temporary blocks to allow better 1937 /// estimating the cost and un-linked from the existing IR. After deciding to 1938 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1939 /// temporary blocks are completely removed. 1940 class GeneratedRTChecks { 1941 /// Basic block which contains the generated SCEV checks, if any. 1942 BasicBlock *SCEVCheckBlock = nullptr; 1943 1944 /// The value representing the result of the generated SCEV checks. If it is 1945 /// nullptr, either no SCEV checks have been generated or they have been used. 1946 Value *SCEVCheckCond = nullptr; 1947 1948 /// Basic block which contains the generated memory runtime checks, if any. 1949 BasicBlock *MemCheckBlock = nullptr; 1950 1951 /// The value representing the result of the generated memory runtime checks. 1952 /// If it is nullptr, either no memory runtime checks have been generated or 1953 /// they have been used. 1954 Value *MemRuntimeCheckCond = nullptr; 1955 1956 DominatorTree *DT; 1957 LoopInfo *LI; 1958 1959 SCEVExpander SCEVExp; 1960 SCEVExpander MemCheckExp; 1961 1962 public: 1963 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1964 const DataLayout &DL) 1965 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1966 MemCheckExp(SE, DL, "scev.check") {} 1967 1968 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1969 /// accurately estimate the cost of the runtime checks. The blocks are 1970 /// un-linked from the IR and is added back during vector code generation. If 1971 /// there is no vector code generation, the check blocks are removed 1972 /// completely. 1973 void Create(Loop *L, const LoopAccessInfo &LAI, 1974 const SCEVPredicate &Pred) { 1975 1976 BasicBlock *LoopHeader = L->getHeader(); 1977 BasicBlock *Preheader = L->getLoopPreheader(); 1978 1979 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1980 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1981 // may be used by SCEVExpander. The blocks will be un-linked from their 1982 // predecessors and removed from LI & DT at the end of the function. 1983 if (!Pred.isAlwaysTrue()) { 1984 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1985 nullptr, "vector.scevcheck"); 1986 1987 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1988 &Pred, SCEVCheckBlock->getTerminator()); 1989 } 1990 1991 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1992 if (RtPtrChecking.Need) { 1993 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1994 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1995 "vector.memcheck"); 1996 1997 MemRuntimeCheckCond = 1998 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 1999 RtPtrChecking.getChecks(), MemCheckExp); 2000 assert(MemRuntimeCheckCond && 2001 "no RT checks generated although RtPtrChecking " 2002 "claimed checks are required"); 2003 } 2004 2005 if (!MemCheckBlock && !SCEVCheckBlock) 2006 return; 2007 2008 // Unhook the temporary block with the checks, update various places 2009 // accordingly. 2010 if (SCEVCheckBlock) 2011 SCEVCheckBlock->replaceAllUsesWith(Preheader); 2012 if (MemCheckBlock) 2013 MemCheckBlock->replaceAllUsesWith(Preheader); 2014 2015 if (SCEVCheckBlock) { 2016 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2017 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 2018 Preheader->getTerminator()->eraseFromParent(); 2019 } 2020 if (MemCheckBlock) { 2021 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2022 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2023 Preheader->getTerminator()->eraseFromParent(); 2024 } 2025 2026 DT->changeImmediateDominator(LoopHeader, Preheader); 2027 if (MemCheckBlock) { 2028 DT->eraseNode(MemCheckBlock); 2029 LI->removeBlock(MemCheckBlock); 2030 } 2031 if (SCEVCheckBlock) { 2032 DT->eraseNode(SCEVCheckBlock); 2033 LI->removeBlock(SCEVCheckBlock); 2034 } 2035 } 2036 2037 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2038 /// unused. 2039 ~GeneratedRTChecks() { 2040 SCEVExpanderCleaner SCEVCleaner(SCEVExp); 2041 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp); 2042 if (!SCEVCheckCond) 2043 SCEVCleaner.markResultUsed(); 2044 2045 if (!MemRuntimeCheckCond) 2046 MemCheckCleaner.markResultUsed(); 2047 2048 if (MemRuntimeCheckCond) { 2049 auto &SE = *MemCheckExp.getSE(); 2050 // Memory runtime check generation creates compares that use expanded 2051 // values. Remove them before running the SCEVExpanderCleaners. 2052 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2053 if (MemCheckExp.isInsertedInstruction(&I)) 2054 continue; 2055 SE.forgetValue(&I); 2056 I.eraseFromParent(); 2057 } 2058 } 2059 MemCheckCleaner.cleanup(); 2060 SCEVCleaner.cleanup(); 2061 2062 if (SCEVCheckCond) 2063 SCEVCheckBlock->eraseFromParent(); 2064 if (MemRuntimeCheckCond) 2065 MemCheckBlock->eraseFromParent(); 2066 } 2067 2068 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2069 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2070 /// depending on the generated condition. 2071 BasicBlock *emitSCEVChecks(BasicBlock *Bypass, 2072 BasicBlock *LoopVectorPreHeader, 2073 BasicBlock *LoopExitBlock) { 2074 if (!SCEVCheckCond) 2075 return nullptr; 2076 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2077 if (C->isZero()) 2078 return nullptr; 2079 2080 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2081 2082 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2083 // Create new preheader for vector loop. 2084 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2085 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2086 2087 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2088 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2089 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2090 SCEVCheckBlock); 2091 2092 DT->addNewBlock(SCEVCheckBlock, Pred); 2093 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2094 2095 ReplaceInstWithInst( 2096 SCEVCheckBlock->getTerminator(), 2097 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2098 // Mark the check as used, to prevent it from being removed during cleanup. 2099 SCEVCheckCond = nullptr; 2100 return SCEVCheckBlock; 2101 } 2102 2103 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2104 /// the branches to branch to the vector preheader or \p Bypass, depending on 2105 /// the generated condition. 2106 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass, 2107 BasicBlock *LoopVectorPreHeader) { 2108 // Check if we generated code that checks in runtime if arrays overlap. 2109 if (!MemRuntimeCheckCond) 2110 return nullptr; 2111 2112 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2113 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2114 MemCheckBlock); 2115 2116 DT->addNewBlock(MemCheckBlock, Pred); 2117 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2118 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2119 2120 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2121 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2122 2123 ReplaceInstWithInst( 2124 MemCheckBlock->getTerminator(), 2125 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2126 MemCheckBlock->getTerminator()->setDebugLoc( 2127 Pred->getTerminator()->getDebugLoc()); 2128 2129 // Mark the check as used, to prevent it from being removed during cleanup. 2130 MemRuntimeCheckCond = nullptr; 2131 return MemCheckBlock; 2132 } 2133 }; 2134 2135 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2136 // vectorization. The loop needs to be annotated with #pragma omp simd 2137 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2138 // vector length information is not provided, vectorization is not considered 2139 // explicit. Interleave hints are not allowed either. These limitations will be 2140 // relaxed in the future. 2141 // Please, note that we are currently forced to abuse the pragma 'clang 2142 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2143 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2144 // provides *explicit vectorization hints* (LV can bypass legal checks and 2145 // assume that vectorization is legal). However, both hints are implemented 2146 // using the same metadata (llvm.loop.vectorize, processed by 2147 // LoopVectorizeHints). This will be fixed in the future when the native IR 2148 // representation for pragma 'omp simd' is introduced. 2149 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2150 OptimizationRemarkEmitter *ORE) { 2151 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2152 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2153 2154 // Only outer loops with an explicit vectorization hint are supported. 2155 // Unannotated outer loops are ignored. 2156 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2157 return false; 2158 2159 Function *Fn = OuterLp->getHeader()->getParent(); 2160 if (!Hints.allowVectorization(Fn, OuterLp, 2161 true /*VectorizeOnlyWhenForced*/)) { 2162 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2163 return false; 2164 } 2165 2166 if (Hints.getInterleave() > 1) { 2167 // TODO: Interleave support is future work. 2168 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2169 "outer loops.\n"); 2170 Hints.emitRemarkWithHints(); 2171 return false; 2172 } 2173 2174 return true; 2175 } 2176 2177 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2178 OptimizationRemarkEmitter *ORE, 2179 SmallVectorImpl<Loop *> &V) { 2180 // Collect inner loops and outer loops without irreducible control flow. For 2181 // now, only collect outer loops that have explicit vectorization hints. If we 2182 // are stress testing the VPlan H-CFG construction, we collect the outermost 2183 // loop of every loop nest. 2184 if (L.isInnermost() || VPlanBuildStressTest || 2185 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2186 LoopBlocksRPO RPOT(&L); 2187 RPOT.perform(LI); 2188 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2189 V.push_back(&L); 2190 // TODO: Collect inner loops inside marked outer loops in case 2191 // vectorization fails for the outer loop. Do not invoke 2192 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2193 // already known to be reducible. We can use an inherited attribute for 2194 // that. 2195 return; 2196 } 2197 } 2198 for (Loop *InnerL : L) 2199 collectSupportedLoops(*InnerL, LI, ORE, V); 2200 } 2201 2202 namespace { 2203 2204 /// The LoopVectorize Pass. 2205 struct LoopVectorize : public FunctionPass { 2206 /// Pass identification, replacement for typeid 2207 static char ID; 2208 2209 LoopVectorizePass Impl; 2210 2211 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2212 bool VectorizeOnlyWhenForced = false) 2213 : FunctionPass(ID), 2214 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2215 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2216 } 2217 2218 bool runOnFunction(Function &F) override { 2219 if (skipFunction(F)) 2220 return false; 2221 2222 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2223 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2224 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2225 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2226 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2227 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2228 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2229 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2230 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2231 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2232 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2233 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2234 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2235 2236 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2237 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2238 2239 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2240 GetLAA, *ORE, PSI).MadeAnyChange; 2241 } 2242 2243 void getAnalysisUsage(AnalysisUsage &AU) const override { 2244 AU.addRequired<AssumptionCacheTracker>(); 2245 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2246 AU.addRequired<DominatorTreeWrapperPass>(); 2247 AU.addRequired<LoopInfoWrapperPass>(); 2248 AU.addRequired<ScalarEvolutionWrapperPass>(); 2249 AU.addRequired<TargetTransformInfoWrapperPass>(); 2250 AU.addRequired<AAResultsWrapperPass>(); 2251 AU.addRequired<LoopAccessLegacyAnalysis>(); 2252 AU.addRequired<DemandedBitsWrapperPass>(); 2253 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2254 AU.addRequired<InjectTLIMappingsLegacy>(); 2255 2256 // We currently do not preserve loopinfo/dominator analyses with outer loop 2257 // vectorization. Until this is addressed, mark these analyses as preserved 2258 // only for non-VPlan-native path. 2259 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2260 if (!EnableVPlanNativePath) { 2261 AU.addPreserved<LoopInfoWrapperPass>(); 2262 AU.addPreserved<DominatorTreeWrapperPass>(); 2263 } 2264 2265 AU.addPreserved<BasicAAWrapperPass>(); 2266 AU.addPreserved<GlobalsAAWrapperPass>(); 2267 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2268 } 2269 }; 2270 2271 } // end anonymous namespace 2272 2273 //===----------------------------------------------------------------------===// 2274 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2275 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2276 //===----------------------------------------------------------------------===// 2277 2278 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2279 // We need to place the broadcast of invariant variables outside the loop, 2280 // but only if it's proven safe to do so. Else, broadcast will be inside 2281 // vector loop body. 2282 Instruction *Instr = dyn_cast<Instruction>(V); 2283 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2284 (!Instr || 2285 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2286 // Place the code for broadcasting invariant variables in the new preheader. 2287 IRBuilder<>::InsertPointGuard Guard(Builder); 2288 if (SafeToHoist) 2289 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2290 2291 // Broadcast the scalar into all locations in the vector. 2292 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2293 2294 return Shuf; 2295 } 2296 2297 /// This function adds 2298 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 2299 /// to each vector element of Val. The sequence starts at StartIndex. 2300 /// \p Opcode is relevant for FP induction variable. 2301 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step, 2302 Instruction::BinaryOps BinOp, ElementCount VF, 2303 IRBuilderBase &Builder) { 2304 assert(VF.isVector() && "only vector VFs are supported"); 2305 2306 // Create and check the types. 2307 auto *ValVTy = cast<VectorType>(Val->getType()); 2308 ElementCount VLen = ValVTy->getElementCount(); 2309 2310 Type *STy = Val->getType()->getScalarType(); 2311 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2312 "Induction Step must be an integer or FP"); 2313 assert(Step->getType() == STy && "Step has wrong type"); 2314 2315 SmallVector<Constant *, 8> Indices; 2316 2317 // Create a vector of consecutive numbers from zero to VF. 2318 VectorType *InitVecValVTy = ValVTy; 2319 if (STy->isFloatingPointTy()) { 2320 Type *InitVecValSTy = 2321 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2322 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2323 } 2324 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2325 2326 // Splat the StartIdx 2327 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2328 2329 if (STy->isIntegerTy()) { 2330 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2331 Step = Builder.CreateVectorSplat(VLen, Step); 2332 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2333 // FIXME: The newly created binary instructions should contain nsw/nuw 2334 // flags, which can be found from the original scalar operations. 2335 Step = Builder.CreateMul(InitVec, Step); 2336 return Builder.CreateAdd(Val, Step, "induction"); 2337 } 2338 2339 // Floating point induction. 2340 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2341 "Binary Opcode should be specified for FP induction"); 2342 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2343 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2344 2345 Step = Builder.CreateVectorSplat(VLen, Step); 2346 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2347 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2348 } 2349 2350 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 2351 /// variable on which to base the steps, \p Step is the size of the step. 2352 static void buildScalarSteps(Value *ScalarIV, Value *Step, 2353 const InductionDescriptor &ID, VPValue *Def, 2354 VPTransformState &State) { 2355 IRBuilderBase &Builder = State.Builder; 2356 // We shouldn't have to build scalar steps if we aren't vectorizing. 2357 assert(State.VF.isVector() && "VF should be greater than one"); 2358 // Get the value type and ensure it and the step have the same integer type. 2359 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2360 assert(ScalarIVTy == Step->getType() && 2361 "Val and Step should have the same type"); 2362 2363 // We build scalar steps for both integer and floating-point induction 2364 // variables. Here, we determine the kind of arithmetic we will perform. 2365 Instruction::BinaryOps AddOp; 2366 Instruction::BinaryOps MulOp; 2367 if (ScalarIVTy->isIntegerTy()) { 2368 AddOp = Instruction::Add; 2369 MulOp = Instruction::Mul; 2370 } else { 2371 AddOp = ID.getInductionOpcode(); 2372 MulOp = Instruction::FMul; 2373 } 2374 2375 // Determine the number of scalars we need to generate for each unroll 2376 // iteration. 2377 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def); 2378 unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue(); 2379 // Compute the scalar steps and save the results in State. 2380 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2381 ScalarIVTy->getScalarSizeInBits()); 2382 Type *VecIVTy = nullptr; 2383 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2384 if (!FirstLaneOnly && State.VF.isScalable()) { 2385 VecIVTy = VectorType::get(ScalarIVTy, State.VF); 2386 UnitStepVec = 2387 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF)); 2388 SplatStep = Builder.CreateVectorSplat(State.VF, Step); 2389 SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV); 2390 } 2391 2392 for (unsigned Part = 0; Part < State.UF; ++Part) { 2393 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part); 2394 2395 if (!FirstLaneOnly && State.VF.isScalable()) { 2396 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0); 2397 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2398 if (ScalarIVTy->isFloatingPointTy()) 2399 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2400 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2401 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2402 State.set(Def, Add, Part); 2403 // It's useful to record the lane values too for the known minimum number 2404 // of elements so we do those below. This improves the code quality when 2405 // trying to extract the first element, for example. 2406 } 2407 2408 if (ScalarIVTy->isFloatingPointTy()) 2409 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2410 2411 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2412 Value *StartIdx = Builder.CreateBinOp( 2413 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2414 // The step returned by `createStepForVF` is a runtime-evaluated value 2415 // when VF is scalable. Otherwise, it should be folded into a Constant. 2416 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) && 2417 "Expected StartIdx to be folded to a constant when VF is not " 2418 "scalable"); 2419 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2420 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2421 State.set(Def, Add, VPIteration(Part, Lane)); 2422 } 2423 } 2424 } 2425 2426 // Generate code for the induction step. Note that induction steps are 2427 // required to be loop-invariant 2428 static Value *CreateStepValue(const SCEV *Step, ScalarEvolution &SE, 2429 Instruction *InsertBefore, 2430 Loop *OrigLoop = nullptr) { 2431 const DataLayout &DL = SE.getDataLayout(); 2432 assert((!OrigLoop || SE.isLoopInvariant(Step, OrigLoop)) && 2433 "Induction step should be loop invariant"); 2434 if (auto *E = dyn_cast<SCEVUnknown>(Step)) 2435 return E->getValue(); 2436 2437 SCEVExpander Exp(SE, DL, "induction"); 2438 return Exp.expandCodeFor(Step, Step->getType(), InsertBefore); 2439 } 2440 2441 /// Compute the transformed value of Index at offset StartValue using step 2442 /// StepValue. 2443 /// For integer induction, returns StartValue + Index * StepValue. 2444 /// For pointer induction, returns StartValue[Index * StepValue]. 2445 /// FIXME: The newly created binary instructions should contain nsw/nuw 2446 /// flags, which can be found from the original scalar operations. 2447 static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index, 2448 Value *StartValue, Value *Step, 2449 const InductionDescriptor &ID) { 2450 assert(Index->getType()->getScalarType() == Step->getType() && 2451 "Index scalar type does not match StepValue type"); 2452 2453 // Note: the IR at this point is broken. We cannot use SE to create any new 2454 // SCEV and then expand it, hoping that SCEV's simplification will give us 2455 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2456 // lead to various SCEV crashes. So all we can do is to use builder and rely 2457 // on InstCombine for future simplifications. Here we handle some trivial 2458 // cases only. 2459 auto CreateAdd = [&B](Value *X, Value *Y) { 2460 assert(X->getType() == Y->getType() && "Types don't match!"); 2461 if (auto *CX = dyn_cast<ConstantInt>(X)) 2462 if (CX->isZero()) 2463 return Y; 2464 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2465 if (CY->isZero()) 2466 return X; 2467 return B.CreateAdd(X, Y); 2468 }; 2469 2470 // We allow X to be a vector type, in which case Y will potentially be 2471 // splatted into a vector with the same element count. 2472 auto CreateMul = [&B](Value *X, Value *Y) { 2473 assert(X->getType()->getScalarType() == Y->getType() && 2474 "Types don't match!"); 2475 if (auto *CX = dyn_cast<ConstantInt>(X)) 2476 if (CX->isOne()) 2477 return Y; 2478 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2479 if (CY->isOne()) 2480 return X; 2481 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 2482 if (XVTy && !isa<VectorType>(Y->getType())) 2483 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 2484 return B.CreateMul(X, Y); 2485 }; 2486 2487 switch (ID.getKind()) { 2488 case InductionDescriptor::IK_IntInduction: { 2489 assert(!isa<VectorType>(Index->getType()) && 2490 "Vector indices not supported for integer inductions yet"); 2491 assert(Index->getType() == StartValue->getType() && 2492 "Index type does not match StartValue type"); 2493 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne()) 2494 return B.CreateSub(StartValue, Index); 2495 auto *Offset = CreateMul(Index, Step); 2496 return CreateAdd(StartValue, Offset); 2497 } 2498 case InductionDescriptor::IK_PtrInduction: { 2499 assert(isa<Constant>(Step) && 2500 "Expected constant step for pointer induction"); 2501 return B.CreateGEP(ID.getElementType(), StartValue, CreateMul(Index, Step)); 2502 } 2503 case InductionDescriptor::IK_FpInduction: { 2504 assert(!isa<VectorType>(Index->getType()) && 2505 "Vector indices not supported for FP inductions yet"); 2506 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2507 auto InductionBinOp = ID.getInductionBinOp(); 2508 assert(InductionBinOp && 2509 (InductionBinOp->getOpcode() == Instruction::FAdd || 2510 InductionBinOp->getOpcode() == Instruction::FSub) && 2511 "Original bin op should be defined for FP induction"); 2512 2513 Value *MulExp = B.CreateFMul(Step, Index); 2514 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2515 "induction"); 2516 } 2517 case InductionDescriptor::IK_NoInduction: 2518 return nullptr; 2519 } 2520 llvm_unreachable("invalid enum"); 2521 } 2522 2523 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2524 const VPIteration &Instance, 2525 VPTransformState &State) { 2526 Value *ScalarInst = State.get(Def, Instance); 2527 Value *VectorValue = State.get(Def, Instance.Part); 2528 VectorValue = Builder.CreateInsertElement( 2529 VectorValue, ScalarInst, 2530 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2531 State.set(Def, VectorValue, Instance.Part); 2532 } 2533 2534 // Return whether we allow using masked interleave-groups (for dealing with 2535 // strided loads/stores that reside in predicated blocks, or for dealing 2536 // with gaps). 2537 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2538 // If an override option has been passed in for interleaved accesses, use it. 2539 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2540 return EnableMaskedInterleavedMemAccesses; 2541 2542 return TTI.enableMaskedInterleavedAccessVectorization(); 2543 } 2544 2545 // Try to vectorize the interleave group that \p Instr belongs to. 2546 // 2547 // E.g. Translate following interleaved load group (factor = 3): 2548 // for (i = 0; i < N; i+=3) { 2549 // R = Pic[i]; // Member of index 0 2550 // G = Pic[i+1]; // Member of index 1 2551 // B = Pic[i+2]; // Member of index 2 2552 // ... // do something to R, G, B 2553 // } 2554 // To: 2555 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2556 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2557 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2558 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2559 // 2560 // Or translate following interleaved store group (factor = 3): 2561 // for (i = 0; i < N; i+=3) { 2562 // ... do something to R, G, B 2563 // Pic[i] = R; // Member of index 0 2564 // Pic[i+1] = G; // Member of index 1 2565 // Pic[i+2] = B; // Member of index 2 2566 // } 2567 // To: 2568 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2569 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2570 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2571 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2572 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2573 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2574 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2575 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2576 VPValue *BlockInMask) { 2577 Instruction *Instr = Group->getInsertPos(); 2578 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2579 2580 // Prepare for the vector type of the interleaved load/store. 2581 Type *ScalarTy = getLoadStoreType(Instr); 2582 unsigned InterleaveFactor = Group->getFactor(); 2583 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2584 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2585 2586 // Prepare for the new pointers. 2587 SmallVector<Value *, 2> AddrParts; 2588 unsigned Index = Group->getIndex(Instr); 2589 2590 // TODO: extend the masked interleaved-group support to reversed access. 2591 assert((!BlockInMask || !Group->isReverse()) && 2592 "Reversed masked interleave-group not supported."); 2593 2594 // If the group is reverse, adjust the index to refer to the last vector lane 2595 // instead of the first. We adjust the index from the first vector lane, 2596 // rather than directly getting the pointer for lane VF - 1, because the 2597 // pointer operand of the interleaved access is supposed to be uniform. For 2598 // uniform instructions, we're only required to generate a value for the 2599 // first vector lane in each unroll iteration. 2600 if (Group->isReverse()) 2601 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2602 2603 for (unsigned Part = 0; Part < UF; Part++) { 2604 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2605 setDebugLocFromInst(AddrPart); 2606 2607 // Notice current instruction could be any index. Need to adjust the address 2608 // to the member of index 0. 2609 // 2610 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2611 // b = A[i]; // Member of index 0 2612 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2613 // 2614 // E.g. A[i+1] = a; // Member of index 1 2615 // A[i] = b; // Member of index 0 2616 // A[i+2] = c; // Member of index 2 (Current instruction) 2617 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2618 2619 bool InBounds = false; 2620 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2621 InBounds = gep->isInBounds(); 2622 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2623 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2624 2625 // Cast to the vector pointer type. 2626 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2627 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2628 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2629 } 2630 2631 setDebugLocFromInst(Instr); 2632 Value *PoisonVec = PoisonValue::get(VecTy); 2633 2634 Value *MaskForGaps = nullptr; 2635 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2636 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2637 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2638 } 2639 2640 // Vectorize the interleaved load group. 2641 if (isa<LoadInst>(Instr)) { 2642 // For each unroll part, create a wide load for the group. 2643 SmallVector<Value *, 2> NewLoads; 2644 for (unsigned Part = 0; Part < UF; Part++) { 2645 Instruction *NewLoad; 2646 if (BlockInMask || MaskForGaps) { 2647 assert(useMaskedInterleavedAccesses(*TTI) && 2648 "masked interleaved groups are not allowed."); 2649 Value *GroupMask = MaskForGaps; 2650 if (BlockInMask) { 2651 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2652 Value *ShuffledMask = Builder.CreateShuffleVector( 2653 BlockInMaskPart, 2654 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2655 "interleaved.mask"); 2656 GroupMask = MaskForGaps 2657 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2658 MaskForGaps) 2659 : ShuffledMask; 2660 } 2661 NewLoad = 2662 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2663 GroupMask, PoisonVec, "wide.masked.vec"); 2664 } 2665 else 2666 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2667 Group->getAlign(), "wide.vec"); 2668 Group->addMetadata(NewLoad); 2669 NewLoads.push_back(NewLoad); 2670 } 2671 2672 // For each member in the group, shuffle out the appropriate data from the 2673 // wide loads. 2674 unsigned J = 0; 2675 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2676 Instruction *Member = Group->getMember(I); 2677 2678 // Skip the gaps in the group. 2679 if (!Member) 2680 continue; 2681 2682 auto StrideMask = 2683 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2684 for (unsigned Part = 0; Part < UF; Part++) { 2685 Value *StridedVec = Builder.CreateShuffleVector( 2686 NewLoads[Part], StrideMask, "strided.vec"); 2687 2688 // If this member has different type, cast the result type. 2689 if (Member->getType() != ScalarTy) { 2690 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2691 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2692 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2693 } 2694 2695 if (Group->isReverse()) 2696 StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse"); 2697 2698 State.set(VPDefs[J], StridedVec, Part); 2699 } 2700 ++J; 2701 } 2702 return; 2703 } 2704 2705 // The sub vector type for current instruction. 2706 auto *SubVT = VectorType::get(ScalarTy, VF); 2707 2708 // Vectorize the interleaved store group. 2709 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2710 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2711 "masked interleaved groups are not allowed."); 2712 assert((!MaskForGaps || !VF.isScalable()) && 2713 "masking gaps for scalable vectors is not yet supported."); 2714 for (unsigned Part = 0; Part < UF; Part++) { 2715 // Collect the stored vector from each member. 2716 SmallVector<Value *, 4> StoredVecs; 2717 for (unsigned i = 0; i < InterleaveFactor; i++) { 2718 assert((Group->getMember(i) || MaskForGaps) && 2719 "Fail to get a member from an interleaved store group"); 2720 Instruction *Member = Group->getMember(i); 2721 2722 // Skip the gaps in the group. 2723 if (!Member) { 2724 Value *Undef = PoisonValue::get(SubVT); 2725 StoredVecs.push_back(Undef); 2726 continue; 2727 } 2728 2729 Value *StoredVec = State.get(StoredValues[i], Part); 2730 2731 if (Group->isReverse()) 2732 StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse"); 2733 2734 // If this member has different type, cast it to a unified type. 2735 2736 if (StoredVec->getType() != SubVT) 2737 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2738 2739 StoredVecs.push_back(StoredVec); 2740 } 2741 2742 // Concatenate all vectors into a wide vector. 2743 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2744 2745 // Interleave the elements in the wide vector. 2746 Value *IVec = Builder.CreateShuffleVector( 2747 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2748 "interleaved.vec"); 2749 2750 Instruction *NewStoreInstr; 2751 if (BlockInMask || MaskForGaps) { 2752 Value *GroupMask = MaskForGaps; 2753 if (BlockInMask) { 2754 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2755 Value *ShuffledMask = Builder.CreateShuffleVector( 2756 BlockInMaskPart, 2757 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2758 "interleaved.mask"); 2759 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2760 ShuffledMask, MaskForGaps) 2761 : ShuffledMask; 2762 } 2763 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2764 Group->getAlign(), GroupMask); 2765 } else 2766 NewStoreInstr = 2767 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2768 2769 Group->addMetadata(NewStoreInstr); 2770 } 2771 } 2772 2773 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2774 VPReplicateRecipe *RepRecipe, 2775 const VPIteration &Instance, 2776 bool IfPredicateInstr, 2777 VPTransformState &State) { 2778 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2779 2780 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2781 // the first lane and part. 2782 if (isa<NoAliasScopeDeclInst>(Instr)) 2783 if (!Instance.isFirstIteration()) 2784 return; 2785 2786 // Does this instruction return a value ? 2787 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2788 2789 Instruction *Cloned = Instr->clone(); 2790 if (!IsVoidRetTy) 2791 Cloned->setName(Instr->getName() + ".cloned"); 2792 2793 // If the scalarized instruction contributes to the address computation of a 2794 // widen masked load/store which was in a basic block that needed predication 2795 // and is not predicated after vectorization, we can't propagate 2796 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized 2797 // instruction could feed a poison value to the base address of the widen 2798 // load/store. 2799 if (State.MayGeneratePoisonRecipes.contains(RepRecipe)) 2800 Cloned->dropPoisonGeneratingFlags(); 2801 2802 if (Instr->getDebugLoc()) 2803 setDebugLocFromInst(Instr); 2804 2805 // Replace the operands of the cloned instructions with their scalar 2806 // equivalents in the new loop. 2807 for (auto &I : enumerate(RepRecipe->operands())) { 2808 auto InputInstance = Instance; 2809 VPValue *Operand = I.value(); 2810 VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand); 2811 if (OperandR && OperandR->isUniform()) 2812 InputInstance.Lane = VPLane::getFirstLane(); 2813 Cloned->setOperand(I.index(), State.get(Operand, InputInstance)); 2814 } 2815 addNewMetadata(Cloned, Instr); 2816 2817 // Place the cloned scalar in the new loop. 2818 State.Builder.Insert(Cloned); 2819 2820 State.set(RepRecipe, Cloned, Instance); 2821 2822 // If we just cloned a new assumption, add it the assumption cache. 2823 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 2824 AC->registerAssumption(II); 2825 2826 // End if-block. 2827 if (IfPredicateInstr) 2828 PredicatedInstructions.push_back(Cloned); 2829 } 2830 2831 Value *InnerLoopVectorizer::getOrCreateTripCount(BasicBlock *InsertBlock) { 2832 if (TripCount) 2833 return TripCount; 2834 2835 assert(InsertBlock); 2836 IRBuilder<> Builder(InsertBlock->getTerminator()); 2837 // Find the loop boundaries. 2838 ScalarEvolution *SE = PSE.getSE(); 2839 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2840 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 2841 "Invalid loop count"); 2842 2843 Type *IdxTy = Legal->getWidestInductionType(); 2844 assert(IdxTy && "No type for induction"); 2845 2846 // The exit count might have the type of i64 while the phi is i32. This can 2847 // happen if we have an induction variable that is sign extended before the 2848 // compare. The only way that we get a backedge taken count is that the 2849 // induction variable was signed and as such will not overflow. In such a case 2850 // truncation is legal. 2851 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2852 IdxTy->getPrimitiveSizeInBits()) 2853 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2854 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2855 2856 // Get the total trip count from the count by adding 1. 2857 const SCEV *ExitCount = SE->getAddExpr( 2858 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2859 2860 const DataLayout &DL = InsertBlock->getModule()->getDataLayout(); 2861 2862 // Expand the trip count and place the new instructions in the preheader. 2863 // Notice that the pre-header does not change, only the loop body. 2864 SCEVExpander Exp(*SE, DL, "induction"); 2865 2866 // Count holds the overall loop count (N). 2867 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2868 InsertBlock->getTerminator()); 2869 2870 if (TripCount->getType()->isPointerTy()) 2871 TripCount = 2872 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2873 InsertBlock->getTerminator()); 2874 2875 return TripCount; 2876 } 2877 2878 Value * 2879 InnerLoopVectorizer::getOrCreateVectorTripCount(BasicBlock *InsertBlock) { 2880 if (VectorTripCount) 2881 return VectorTripCount; 2882 2883 Value *TC = getOrCreateTripCount(InsertBlock); 2884 IRBuilder<> Builder(InsertBlock->getTerminator()); 2885 2886 Type *Ty = TC->getType(); 2887 // This is where we can make the step a runtime constant. 2888 Value *Step = createStepForVF(Builder, Ty, VF, UF); 2889 2890 // If the tail is to be folded by masking, round the number of iterations N 2891 // up to a multiple of Step instead of rounding down. This is done by first 2892 // adding Step-1 and then rounding down. Note that it's ok if this addition 2893 // overflows: the vector induction variable will eventually wrap to zero given 2894 // that it starts at zero and its Step is a power of two; the loop will then 2895 // exit, with the last early-exit vector comparison also producing all-true. 2896 if (Cost->foldTailByMasking()) { 2897 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 2898 "VF*UF must be a power of 2 when folding tail by masking"); 2899 Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF); 2900 TC = Builder.CreateAdd( 2901 TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up"); 2902 } 2903 2904 // Now we need to generate the expression for the part of the loop that the 2905 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2906 // iterations are not required for correctness, or N - Step, otherwise. Step 2907 // is equal to the vectorization factor (number of SIMD elements) times the 2908 // unroll factor (number of SIMD instructions). 2909 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2910 2911 // There are cases where we *must* run at least one iteration in the remainder 2912 // loop. See the cost model for when this can happen. If the step evenly 2913 // divides the trip count, we set the remainder to be equal to the step. If 2914 // the step does not evenly divide the trip count, no adjustment is necessary 2915 // since there will already be scalar iterations. Note that the minimum 2916 // iterations check ensures that N >= Step. 2917 if (Cost->requiresScalarEpilogue(VF)) { 2918 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2919 R = Builder.CreateSelect(IsZero, Step, R); 2920 } 2921 2922 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2923 2924 return VectorTripCount; 2925 } 2926 2927 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2928 const DataLayout &DL) { 2929 // Verify that V is a vector type with same number of elements as DstVTy. 2930 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 2931 unsigned VF = DstFVTy->getNumElements(); 2932 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 2933 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2934 Type *SrcElemTy = SrcVecTy->getElementType(); 2935 Type *DstElemTy = DstFVTy->getElementType(); 2936 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2937 "Vector elements must have same size"); 2938 2939 // Do a direct cast if element types are castable. 2940 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2941 return Builder.CreateBitOrPointerCast(V, DstFVTy); 2942 } 2943 // V cannot be directly casted to desired vector type. 2944 // May happen when V is a floating point vector but DstVTy is a vector of 2945 // pointers or vice-versa. Handle this using a two-step bitcast using an 2946 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2947 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2948 "Only one type should be a pointer type"); 2949 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2950 "Only one type should be a floating point type"); 2951 Type *IntTy = 2952 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2953 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 2954 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2955 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 2956 } 2957 2958 void InnerLoopVectorizer::emitMinimumIterationCountCheck(BasicBlock *Bypass) { 2959 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 2960 // Reuse existing vector loop preheader for TC checks. 2961 // Note that new preheader block is generated for vector loop. 2962 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 2963 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 2964 2965 // Generate code to check if the loop's trip count is less than VF * UF, or 2966 // equal to it in case a scalar epilogue is required; this implies that the 2967 // vector trip count is zero. This check also covers the case where adding one 2968 // to the backedge-taken count overflowed leading to an incorrect trip count 2969 // of zero. In this case we will also jump to the scalar loop. 2970 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 2971 : ICmpInst::ICMP_ULT; 2972 2973 // If tail is to be folded, vector loop takes care of all iterations. 2974 Value *CheckMinIters = Builder.getFalse(); 2975 if (!Cost->foldTailByMasking()) { 2976 Value *Step = createStepForVF(Builder, Count->getType(), VF, UF); 2977 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 2978 } 2979 // Create new preheader for vector loop. 2980 LoopVectorPreHeader = 2981 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 2982 "vector.ph"); 2983 2984 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 2985 DT->getNode(Bypass)->getIDom()) && 2986 "TC check is expected to dominate Bypass"); 2987 2988 // Update dominator for Bypass & LoopExit (if needed). 2989 DT->changeImmediateDominator(Bypass, TCCheckBlock); 2990 if (!Cost->requiresScalarEpilogue(VF)) 2991 // If there is an epilogue which must run, there's no edge from the 2992 // middle block to exit blocks and thus no need to update the immediate 2993 // dominator of the exit blocks. 2994 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 2995 2996 ReplaceInstWithInst( 2997 TCCheckBlock->getTerminator(), 2998 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 2999 LoopBypassBlocks.push_back(TCCheckBlock); 3000 } 3001 3002 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) { 3003 3004 BasicBlock *const SCEVCheckBlock = 3005 RTChecks.emitSCEVChecks(Bypass, LoopVectorPreHeader, LoopExitBlock); 3006 if (!SCEVCheckBlock) 3007 return nullptr; 3008 3009 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3010 (OptForSizeBasedOnProfile && 3011 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3012 "Cannot SCEV check stride or overflow when optimizing for size"); 3013 3014 3015 // Update dominator only if this is first RT check. 3016 if (LoopBypassBlocks.empty()) { 3017 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3018 if (!Cost->requiresScalarEpilogue(VF)) 3019 // If there is an epilogue which must run, there's no edge from the 3020 // middle block to exit blocks and thus no need to update the immediate 3021 // dominator of the exit blocks. 3022 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3023 } 3024 3025 LoopBypassBlocks.push_back(SCEVCheckBlock); 3026 AddedSafetyChecks = true; 3027 return SCEVCheckBlock; 3028 } 3029 3030 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) { 3031 // VPlan-native path does not do any analysis for runtime checks currently. 3032 if (EnableVPlanNativePath) 3033 return nullptr; 3034 3035 BasicBlock *const MemCheckBlock = 3036 RTChecks.emitMemRuntimeChecks(Bypass, LoopVectorPreHeader); 3037 3038 // Check if we generated code that checks in runtime if arrays overlap. We put 3039 // the checks into a separate block to make the more common case of few 3040 // elements faster. 3041 if (!MemCheckBlock) 3042 return nullptr; 3043 3044 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3045 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3046 "Cannot emit memory checks when optimizing for size, unless forced " 3047 "to vectorize."); 3048 ORE->emit([&]() { 3049 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3050 OrigLoop->getStartLoc(), 3051 OrigLoop->getHeader()) 3052 << "Code-size may be reduced by not forcing " 3053 "vectorization, or by source-code modifications " 3054 "eliminating the need for runtime checks " 3055 "(e.g., adding 'restrict')."; 3056 }); 3057 } 3058 3059 LoopBypassBlocks.push_back(MemCheckBlock); 3060 3061 AddedSafetyChecks = true; 3062 3063 // We currently don't use LoopVersioning for the actual loop cloning but we 3064 // still use it to add the noalias metadata. 3065 LVer = std::make_unique<LoopVersioning>( 3066 *Legal->getLAI(), 3067 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3068 DT, PSE.getSE()); 3069 LVer->prepareNoAliasMetadata(); 3070 return MemCheckBlock; 3071 } 3072 3073 void InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3074 LoopScalarBody = OrigLoop->getHeader(); 3075 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3076 assert(LoopVectorPreHeader && "Invalid loop structure"); 3077 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3078 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3079 "multiple exit loop without required epilogue?"); 3080 3081 LoopMiddleBlock = 3082 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3083 LI, nullptr, Twine(Prefix) + "middle.block"); 3084 LoopScalarPreHeader = 3085 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3086 nullptr, Twine(Prefix) + "scalar.ph"); 3087 3088 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3089 3090 // Set up the middle block terminator. Two cases: 3091 // 1) If we know that we must execute the scalar epilogue, emit an 3092 // unconditional branch. 3093 // 2) Otherwise, we must have a single unique exit block (due to how we 3094 // implement the multiple exit case). In this case, set up a conditonal 3095 // branch from the middle block to the loop scalar preheader, and the 3096 // exit block. completeLoopSkeleton will update the condition to use an 3097 // iteration check, if required to decide whether to execute the remainder. 3098 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3099 BranchInst::Create(LoopScalarPreHeader) : 3100 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3101 Builder.getTrue()); 3102 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3103 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3104 3105 // Update dominator for loop exit. During skeleton creation, only the vector 3106 // pre-header and the middle block are created. The vector loop is entirely 3107 // created during VPlan exection. 3108 if (!Cost->requiresScalarEpilogue(VF)) 3109 // If there is an epilogue which must run, there's no edge from the 3110 // middle block to exit blocks and thus no need to update the immediate 3111 // dominator of the exit blocks. 3112 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3113 } 3114 3115 void InnerLoopVectorizer::createInductionResumeValues( 3116 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3117 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3118 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3119 "Inconsistent information about additional bypass."); 3120 3121 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3122 assert(VectorTripCount && "Expected valid arguments"); 3123 // We are going to resume the execution of the scalar loop. 3124 // Go over all of the induction variables that we found and fix the 3125 // PHIs that are left in the scalar version of the loop. 3126 // The starting values of PHI nodes depend on the counter of the last 3127 // iteration in the vectorized loop. 3128 // If we come from a bypass edge then we need to start from the original 3129 // start value. 3130 Instruction *OldInduction = Legal->getPrimaryInduction(); 3131 for (auto &InductionEntry : Legal->getInductionVars()) { 3132 PHINode *OrigPhi = InductionEntry.first; 3133 InductionDescriptor II = InductionEntry.second; 3134 3135 // Create phi nodes to merge from the backedge-taken check block. 3136 PHINode *BCResumeVal = 3137 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3138 LoopScalarPreHeader->getTerminator()); 3139 // Copy original phi DL over to the new one. 3140 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3141 Value *&EndValue = IVEndValues[OrigPhi]; 3142 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3143 if (OrigPhi == OldInduction) { 3144 // We know what the end value is. 3145 EndValue = VectorTripCount; 3146 } else { 3147 IRBuilder<> B(LoopVectorPreHeader->getTerminator()); 3148 3149 // Fast-math-flags propagate from the original induction instruction. 3150 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3151 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3152 3153 Type *StepType = II.getStep()->getType(); 3154 Instruction::CastOps CastOp = 3155 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3156 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3157 Value *Step = 3158 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3159 EndValue = emitTransformedIndex(B, CRD, II.getStartValue(), Step, II); 3160 EndValue->setName("ind.end"); 3161 3162 // Compute the end value for the additional bypass (if applicable). 3163 if (AdditionalBypass.first) { 3164 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3165 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3166 StepType, true); 3167 Value *Step = 3168 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3169 CRD = 3170 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3171 EndValueFromAdditionalBypass = 3172 emitTransformedIndex(B, CRD, II.getStartValue(), Step, II); 3173 EndValueFromAdditionalBypass->setName("ind.end"); 3174 } 3175 } 3176 // The new PHI merges the original incoming value, in case of a bypass, 3177 // or the value at the end of the vectorized loop. 3178 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3179 3180 // Fix the scalar body counter (PHI node). 3181 // The old induction's phi node in the scalar body needs the truncated 3182 // value. 3183 for (BasicBlock *BB : LoopBypassBlocks) 3184 BCResumeVal->addIncoming(II.getStartValue(), BB); 3185 3186 if (AdditionalBypass.first) 3187 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3188 EndValueFromAdditionalBypass); 3189 3190 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3191 } 3192 } 3193 3194 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(MDNode *OrigLoopID) { 3195 // The trip counts should be cached by now. 3196 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 3197 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3198 3199 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3200 3201 // Add a check in the middle block to see if we have completed 3202 // all of the iterations in the first vector loop. Three cases: 3203 // 1) If we require a scalar epilogue, there is no conditional branch as 3204 // we unconditionally branch to the scalar preheader. Do nothing. 3205 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3206 // Thus if tail is to be folded, we know we don't need to run the 3207 // remainder and we can use the previous value for the condition (true). 3208 // 3) Otherwise, construct a runtime check. 3209 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3210 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3211 Count, VectorTripCount, "cmp.n", 3212 LoopMiddleBlock->getTerminator()); 3213 3214 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3215 // of the corresponding compare because they may have ended up with 3216 // different line numbers and we want to avoid awkward line stepping while 3217 // debugging. Eg. if the compare has got a line number inside the loop. 3218 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3219 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3220 } 3221 3222 #ifdef EXPENSIVE_CHECKS 3223 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3224 #endif 3225 3226 return LoopVectorPreHeader; 3227 } 3228 3229 std::pair<BasicBlock *, Value *> 3230 InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3231 /* 3232 In this function we generate a new loop. The new loop will contain 3233 the vectorized instructions while the old loop will continue to run the 3234 scalar remainder. 3235 3236 [ ] <-- loop iteration number check. 3237 / | 3238 / v 3239 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3240 | / | 3241 | / v 3242 || [ ] <-- vector pre header. 3243 |/ | 3244 | v 3245 | [ ] \ 3246 | [ ]_| <-- vector loop (created during VPlan execution). 3247 | | 3248 | v 3249 \ -[ ] <--- middle-block. 3250 \/ | 3251 /\ v 3252 | ->[ ] <--- new preheader. 3253 | | 3254 (opt) v <-- edge from middle to exit iff epilogue is not required. 3255 | [ ] \ 3256 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3257 \ | 3258 \ v 3259 >[ ] <-- exit block(s). 3260 ... 3261 */ 3262 3263 // Get the metadata of the original loop before it gets modified. 3264 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3265 3266 // Workaround! Compute the trip count of the original loop and cache it 3267 // before we start modifying the CFG. This code has a systemic problem 3268 // wherein it tries to run analysis over partially constructed IR; this is 3269 // wrong, and not simply for SCEV. The trip count of the original loop 3270 // simply happens to be prone to hitting this in practice. In theory, we 3271 // can hit the same issue for any SCEV, or ValueTracking query done during 3272 // mutation. See PR49900. 3273 getOrCreateTripCount(OrigLoop->getLoopPreheader()); 3274 3275 // Create an empty vector loop, and prepare basic blocks for the runtime 3276 // checks. 3277 createVectorLoopSkeleton(""); 3278 3279 // Now, compare the new count to zero. If it is zero skip the vector loop and 3280 // jump to the scalar loop. This check also covers the case where the 3281 // backedge-taken count is uint##_max: adding one to it will overflow leading 3282 // to an incorrect trip count of zero. In this (rare) case we will also jump 3283 // to the scalar loop. 3284 emitMinimumIterationCountCheck(LoopScalarPreHeader); 3285 3286 // Generate the code to check any assumptions that we've made for SCEV 3287 // expressions. 3288 emitSCEVChecks(LoopScalarPreHeader); 3289 3290 // Generate the code that checks in runtime if arrays overlap. We put the 3291 // checks into a separate block to make the more common case of few elements 3292 // faster. 3293 emitMemRuntimeChecks(LoopScalarPreHeader); 3294 3295 // Emit phis for the new starting index of the scalar loop. 3296 createInductionResumeValues(); 3297 3298 return {completeLoopSkeleton(OrigLoopID), nullptr}; 3299 } 3300 3301 // Fix up external users of the induction variable. At this point, we are 3302 // in LCSSA form, with all external PHIs that use the IV having one input value, 3303 // coming from the remainder loop. We need those PHIs to also have a correct 3304 // value for the IV when arriving directly from the middle block. 3305 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3306 const InductionDescriptor &II, 3307 Value *CountRoundDown, Value *EndValue, 3308 BasicBlock *MiddleBlock, 3309 BasicBlock *VectorHeader) { 3310 // There are two kinds of external IV usages - those that use the value 3311 // computed in the last iteration (the PHI) and those that use the penultimate 3312 // value (the value that feeds into the phi from the loop latch). 3313 // We allow both, but they, obviously, have different values. 3314 3315 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3316 3317 DenseMap<Value *, Value *> MissingVals; 3318 3319 // An external user of the last iteration's value should see the value that 3320 // the remainder loop uses to initialize its own IV. 3321 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3322 for (User *U : PostInc->users()) { 3323 Instruction *UI = cast<Instruction>(U); 3324 if (!OrigLoop->contains(UI)) { 3325 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3326 MissingVals[UI] = EndValue; 3327 } 3328 } 3329 3330 // An external user of the penultimate value need to see EndValue - Step. 3331 // The simplest way to get this is to recompute it from the constituent SCEVs, 3332 // that is Start + (Step * (CRD - 1)). 3333 for (User *U : OrigPhi->users()) { 3334 auto *UI = cast<Instruction>(U); 3335 if (!OrigLoop->contains(UI)) { 3336 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3337 3338 IRBuilder<> B(MiddleBlock->getTerminator()); 3339 3340 // Fast-math-flags propagate from the original induction instruction. 3341 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3342 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3343 3344 Value *CountMinusOne = B.CreateSub( 3345 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3346 Value *CMO = 3347 !II.getStep()->getType()->isIntegerTy() 3348 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3349 II.getStep()->getType()) 3350 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3351 CMO->setName("cast.cmo"); 3352 3353 Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(), 3354 VectorHeader->getTerminator()); 3355 Value *Escape = 3356 emitTransformedIndex(B, CMO, II.getStartValue(), Step, II); 3357 Escape->setName("ind.escape"); 3358 MissingVals[UI] = Escape; 3359 } 3360 } 3361 3362 for (auto &I : MissingVals) { 3363 PHINode *PHI = cast<PHINode>(I.first); 3364 // One corner case we have to handle is two IVs "chasing" each-other, 3365 // that is %IV2 = phi [...], [ %IV1, %latch ] 3366 // In this case, if IV1 has an external use, we need to avoid adding both 3367 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3368 // don't already have an incoming value for the middle block. 3369 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3370 PHI->addIncoming(I.second, MiddleBlock); 3371 } 3372 } 3373 3374 namespace { 3375 3376 struct CSEDenseMapInfo { 3377 static bool canHandle(const Instruction *I) { 3378 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3379 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3380 } 3381 3382 static inline Instruction *getEmptyKey() { 3383 return DenseMapInfo<Instruction *>::getEmptyKey(); 3384 } 3385 3386 static inline Instruction *getTombstoneKey() { 3387 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3388 } 3389 3390 static unsigned getHashValue(const Instruction *I) { 3391 assert(canHandle(I) && "Unknown instruction!"); 3392 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3393 I->value_op_end())); 3394 } 3395 3396 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3397 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3398 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3399 return LHS == RHS; 3400 return LHS->isIdenticalTo(RHS); 3401 } 3402 }; 3403 3404 } // end anonymous namespace 3405 3406 ///Perform cse of induction variable instructions. 3407 static void cse(BasicBlock *BB) { 3408 // Perform simple cse. 3409 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3410 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3411 if (!CSEDenseMapInfo::canHandle(&In)) 3412 continue; 3413 3414 // Check if we can replace this instruction with any of the 3415 // visited instructions. 3416 if (Instruction *V = CSEMap.lookup(&In)) { 3417 In.replaceAllUsesWith(V); 3418 In.eraseFromParent(); 3419 continue; 3420 } 3421 3422 CSEMap[&In] = &In; 3423 } 3424 } 3425 3426 InstructionCost 3427 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3428 bool &NeedToScalarize) const { 3429 Function *F = CI->getCalledFunction(); 3430 Type *ScalarRetTy = CI->getType(); 3431 SmallVector<Type *, 4> Tys, ScalarTys; 3432 for (auto &ArgOp : CI->args()) 3433 ScalarTys.push_back(ArgOp->getType()); 3434 3435 // Estimate cost of scalarized vector call. The source operands are assumed 3436 // to be vectors, so we need to extract individual elements from there, 3437 // execute VF scalar calls, and then gather the result into the vector return 3438 // value. 3439 InstructionCost ScalarCallCost = 3440 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3441 if (VF.isScalar()) 3442 return ScalarCallCost; 3443 3444 // Compute corresponding vector type for return value and arguments. 3445 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3446 for (Type *ScalarTy : ScalarTys) 3447 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3448 3449 // Compute costs of unpacking argument values for the scalar calls and 3450 // packing the return values to a vector. 3451 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3452 3453 InstructionCost Cost = 3454 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3455 3456 // If we can't emit a vector call for this function, then the currently found 3457 // cost is the cost we need to return. 3458 NeedToScalarize = true; 3459 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3460 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3461 3462 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3463 return Cost; 3464 3465 // If the corresponding vector cost is cheaper, return its cost. 3466 InstructionCost VectorCallCost = 3467 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3468 if (VectorCallCost < Cost) { 3469 NeedToScalarize = false; 3470 Cost = VectorCallCost; 3471 } 3472 return Cost; 3473 } 3474 3475 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3476 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3477 return Elt; 3478 return VectorType::get(Elt, VF); 3479 } 3480 3481 InstructionCost 3482 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3483 ElementCount VF) const { 3484 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3485 assert(ID && "Expected intrinsic call!"); 3486 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3487 FastMathFlags FMF; 3488 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3489 FMF = FPMO->getFastMathFlags(); 3490 3491 SmallVector<const Value *> Arguments(CI->args()); 3492 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3493 SmallVector<Type *> ParamTys; 3494 std::transform(FTy->param_begin(), FTy->param_end(), 3495 std::back_inserter(ParamTys), 3496 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3497 3498 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3499 dyn_cast<IntrinsicInst>(CI)); 3500 return TTI.getIntrinsicInstrCost(CostAttrs, 3501 TargetTransformInfo::TCK_RecipThroughput); 3502 } 3503 3504 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3505 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3506 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3507 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3508 } 3509 3510 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3511 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3512 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3513 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3514 } 3515 3516 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3517 // For every instruction `I` in MinBWs, truncate the operands, create a 3518 // truncated version of `I` and reextend its result. InstCombine runs 3519 // later and will remove any ext/trunc pairs. 3520 SmallPtrSet<Value *, 4> Erased; 3521 for (const auto &KV : Cost->getMinimalBitwidths()) { 3522 // If the value wasn't vectorized, we must maintain the original scalar 3523 // type. The absence of the value from State indicates that it 3524 // wasn't vectorized. 3525 // FIXME: Should not rely on getVPValue at this point. 3526 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3527 if (!State.hasAnyVectorValue(Def)) 3528 continue; 3529 for (unsigned Part = 0; Part < UF; ++Part) { 3530 Value *I = State.get(Def, Part); 3531 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3532 continue; 3533 Type *OriginalTy = I->getType(); 3534 Type *ScalarTruncatedTy = 3535 IntegerType::get(OriginalTy->getContext(), KV.second); 3536 auto *TruncatedTy = VectorType::get( 3537 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3538 if (TruncatedTy == OriginalTy) 3539 continue; 3540 3541 IRBuilder<> B(cast<Instruction>(I)); 3542 auto ShrinkOperand = [&](Value *V) -> Value * { 3543 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3544 if (ZI->getSrcTy() == TruncatedTy) 3545 return ZI->getOperand(0); 3546 return B.CreateZExtOrTrunc(V, TruncatedTy); 3547 }; 3548 3549 // The actual instruction modification depends on the instruction type, 3550 // unfortunately. 3551 Value *NewI = nullptr; 3552 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3553 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3554 ShrinkOperand(BO->getOperand(1))); 3555 3556 // Any wrapping introduced by shrinking this operation shouldn't be 3557 // considered undefined behavior. So, we can't unconditionally copy 3558 // arithmetic wrapping flags to NewI. 3559 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3560 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3561 NewI = 3562 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3563 ShrinkOperand(CI->getOperand(1))); 3564 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3565 NewI = B.CreateSelect(SI->getCondition(), 3566 ShrinkOperand(SI->getTrueValue()), 3567 ShrinkOperand(SI->getFalseValue())); 3568 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3569 switch (CI->getOpcode()) { 3570 default: 3571 llvm_unreachable("Unhandled cast!"); 3572 case Instruction::Trunc: 3573 NewI = ShrinkOperand(CI->getOperand(0)); 3574 break; 3575 case Instruction::SExt: 3576 NewI = B.CreateSExtOrTrunc( 3577 CI->getOperand(0), 3578 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3579 break; 3580 case Instruction::ZExt: 3581 NewI = B.CreateZExtOrTrunc( 3582 CI->getOperand(0), 3583 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3584 break; 3585 } 3586 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3587 auto Elements0 = 3588 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 3589 auto *O0 = B.CreateZExtOrTrunc( 3590 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3591 auto Elements1 = 3592 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 3593 auto *O1 = B.CreateZExtOrTrunc( 3594 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3595 3596 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3597 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3598 // Don't do anything with the operands, just extend the result. 3599 continue; 3600 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3601 auto Elements = 3602 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 3603 auto *O0 = B.CreateZExtOrTrunc( 3604 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3605 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3606 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3607 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3608 auto Elements = 3609 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 3610 auto *O0 = B.CreateZExtOrTrunc( 3611 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3612 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3613 } else { 3614 // If we don't know what to do, be conservative and don't do anything. 3615 continue; 3616 } 3617 3618 // Lastly, extend the result. 3619 NewI->takeName(cast<Instruction>(I)); 3620 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3621 I->replaceAllUsesWith(Res); 3622 cast<Instruction>(I)->eraseFromParent(); 3623 Erased.insert(I); 3624 State.reset(Def, Res, Part); 3625 } 3626 } 3627 3628 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3629 for (const auto &KV : Cost->getMinimalBitwidths()) { 3630 // If the value wasn't vectorized, we must maintain the original scalar 3631 // type. The absence of the value from State indicates that it 3632 // wasn't vectorized. 3633 // FIXME: Should not rely on getVPValue at this point. 3634 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3635 if (!State.hasAnyVectorValue(Def)) 3636 continue; 3637 for (unsigned Part = 0; Part < UF; ++Part) { 3638 Value *I = State.get(Def, Part); 3639 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3640 if (Inst && Inst->use_empty()) { 3641 Value *NewI = Inst->getOperand(0); 3642 Inst->eraseFromParent(); 3643 State.reset(Def, NewI, Part); 3644 } 3645 } 3646 } 3647 } 3648 3649 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 3650 // Insert truncates and extends for any truncated instructions as hints to 3651 // InstCombine. 3652 if (VF.isVector()) 3653 truncateToMinimalBitwidths(State); 3654 3655 // Fix widened non-induction PHIs by setting up the PHI operands. 3656 if (OrigPHIsToFix.size()) { 3657 assert(EnableVPlanNativePath && 3658 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3659 fixNonInductionPHIs(State); 3660 } 3661 3662 // At this point every instruction in the original loop is widened to a 3663 // vector form. Now we need to fix the recurrences in the loop. These PHI 3664 // nodes are currently empty because we did not want to introduce cycles. 3665 // This is the second stage of vectorizing recurrences. 3666 fixCrossIterationPHIs(State); 3667 3668 // Forget the original basic block. 3669 PSE.getSE()->forgetLoop(OrigLoop); 3670 3671 Loop *VectorLoop = LI->getLoopFor(State.CFG.PrevBB); 3672 // If we inserted an edge from the middle block to the unique exit block, 3673 // update uses outside the loop (phis) to account for the newly inserted 3674 // edge. 3675 if (!Cost->requiresScalarEpilogue(VF)) { 3676 // Fix-up external users of the induction variables. 3677 for (auto &Entry : Legal->getInductionVars()) 3678 fixupIVUsers(Entry.first, Entry.second, 3679 getOrCreateVectorTripCount(VectorLoop->getLoopPreheader()), 3680 IVEndValues[Entry.first], LoopMiddleBlock, 3681 VectorLoop->getHeader()); 3682 3683 fixLCSSAPHIs(State); 3684 } 3685 3686 for (Instruction *PI : PredicatedInstructions) 3687 sinkScalarOperands(&*PI); 3688 3689 // Remove redundant induction instructions. 3690 cse(VectorLoop->getHeader()); 3691 3692 // Set/update profile weights for the vector and remainder loops as original 3693 // loop iterations are now distributed among them. Note that original loop 3694 // represented by LoopScalarBody becomes remainder loop after vectorization. 3695 // 3696 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3697 // end up getting slightly roughened result but that should be OK since 3698 // profile is not inherently precise anyway. Note also possible bypass of 3699 // vector code caused by legality checks is ignored, assigning all the weight 3700 // to the vector loop, optimistically. 3701 // 3702 // For scalable vectorization we can't know at compile time how many iterations 3703 // of the loop are handled in one vector iteration, so instead assume a pessimistic 3704 // vscale of '1'. 3705 setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), VectorLoop, 3706 LI->getLoopFor(LoopScalarBody), 3707 VF.getKnownMinValue() * UF); 3708 } 3709 3710 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 3711 // In order to support recurrences we need to be able to vectorize Phi nodes. 3712 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3713 // stage #2: We now need to fix the recurrences by adding incoming edges to 3714 // the currently empty PHI nodes. At this point every instruction in the 3715 // original loop is widened to a vector form so we can use them to construct 3716 // the incoming edges. 3717 VPBasicBlock *Header = 3718 State.Plan->getVectorLoopRegion()->getEntryBasicBlock(); 3719 for (VPRecipeBase &R : Header->phis()) { 3720 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 3721 fixReduction(ReductionPhi, State); 3722 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 3723 fixFirstOrderRecurrence(FOR, State); 3724 } 3725 } 3726 3727 void InnerLoopVectorizer::fixFirstOrderRecurrence( 3728 VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) { 3729 // This is the second phase of vectorizing first-order recurrences. An 3730 // overview of the transformation is described below. Suppose we have the 3731 // following loop. 3732 // 3733 // for (int i = 0; i < n; ++i) 3734 // b[i] = a[i] - a[i - 1]; 3735 // 3736 // There is a first-order recurrence on "a". For this loop, the shorthand 3737 // scalar IR looks like: 3738 // 3739 // scalar.ph: 3740 // s_init = a[-1] 3741 // br scalar.body 3742 // 3743 // scalar.body: 3744 // i = phi [0, scalar.ph], [i+1, scalar.body] 3745 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3746 // s2 = a[i] 3747 // b[i] = s2 - s1 3748 // br cond, scalar.body, ... 3749 // 3750 // In this example, s1 is a recurrence because it's value depends on the 3751 // previous iteration. In the first phase of vectorization, we created a 3752 // vector phi v1 for s1. We now complete the vectorization and produce the 3753 // shorthand vector IR shown below (for VF = 4, UF = 1). 3754 // 3755 // vector.ph: 3756 // v_init = vector(..., ..., ..., a[-1]) 3757 // br vector.body 3758 // 3759 // vector.body 3760 // i = phi [0, vector.ph], [i+4, vector.body] 3761 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3762 // v2 = a[i, i+1, i+2, i+3]; 3763 // v3 = vector(v1(3), v2(0, 1, 2)) 3764 // b[i, i+1, i+2, i+3] = v2 - v3 3765 // br cond, vector.body, middle.block 3766 // 3767 // middle.block: 3768 // x = v2(3) 3769 // br scalar.ph 3770 // 3771 // scalar.ph: 3772 // s_init = phi [x, middle.block], [a[-1], otherwise] 3773 // br scalar.body 3774 // 3775 // After execution completes the vector loop, we extract the next value of 3776 // the recurrence (x) to use as the initial value in the scalar loop. 3777 3778 // Extract the last vector element in the middle block. This will be the 3779 // initial value for the recurrence when jumping to the scalar loop. 3780 VPValue *PreviousDef = PhiR->getBackedgeValue(); 3781 Value *Incoming = State.get(PreviousDef, UF - 1); 3782 auto *ExtractForScalar = Incoming; 3783 auto *IdxTy = Builder.getInt32Ty(); 3784 if (VF.isVector()) { 3785 auto *One = ConstantInt::get(IdxTy, 1); 3786 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3787 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3788 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 3789 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 3790 "vector.recur.extract"); 3791 } 3792 // Extract the second last element in the middle block if the 3793 // Phi is used outside the loop. We need to extract the phi itself 3794 // and not the last element (the phi update in the current iteration). This 3795 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3796 // when the scalar loop is not run at all. 3797 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3798 if (VF.isVector()) { 3799 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3800 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 3801 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3802 Incoming, Idx, "vector.recur.extract.for.phi"); 3803 } else if (UF > 1) 3804 // When loop is unrolled without vectorizing, initialize 3805 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 3806 // of `Incoming`. This is analogous to the vectorized case above: extracting 3807 // the second last element when VF > 1. 3808 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 3809 3810 // Fix the initial value of the original recurrence in the scalar loop. 3811 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3812 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 3813 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3814 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 3815 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3816 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3817 Start->addIncoming(Incoming, BB); 3818 } 3819 3820 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 3821 Phi->setName("scalar.recur"); 3822 3823 // Finally, fix users of the recurrence outside the loop. The users will need 3824 // either the last value of the scalar recurrence or the last value of the 3825 // vector recurrence we extracted in the middle block. Since the loop is in 3826 // LCSSA form, we just need to find all the phi nodes for the original scalar 3827 // recurrence in the exit block, and then add an edge for the middle block. 3828 // Note that LCSSA does not imply single entry when the original scalar loop 3829 // had multiple exiting edges (as we always run the last iteration in the 3830 // scalar epilogue); in that case, there is no edge from middle to exit and 3831 // and thus no phis which needed updated. 3832 if (!Cost->requiresScalarEpilogue(VF)) 3833 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 3834 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) 3835 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3836 } 3837 3838 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 3839 VPTransformState &State) { 3840 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 3841 // Get it's reduction variable descriptor. 3842 assert(Legal->isReductionVariable(OrigPhi) && 3843 "Unable to find the reduction variable"); 3844 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 3845 3846 RecurKind RK = RdxDesc.getRecurrenceKind(); 3847 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3848 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3849 setDebugLocFromInst(ReductionStartValue); 3850 3851 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 3852 // This is the vector-clone of the value that leaves the loop. 3853 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 3854 3855 // Wrap flags are in general invalid after vectorization, clear them. 3856 clearReductionWrapFlags(RdxDesc, State); 3857 3858 // Before each round, move the insertion point right between 3859 // the PHIs and the values we are going to write. 3860 // This allows us to write both PHINodes and the extractelement 3861 // instructions. 3862 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3863 3864 setDebugLocFromInst(LoopExitInst); 3865 3866 Type *PhiTy = OrigPhi->getType(); 3867 BasicBlock *VectorLoopLatch = 3868 LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch(); 3869 // If tail is folded by masking, the vector value to leave the loop should be 3870 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 3871 // instead of the former. For an inloop reduction the reduction will already 3872 // be predicated, and does not need to be handled here. 3873 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 3874 for (unsigned Part = 0; Part < UF; ++Part) { 3875 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 3876 Value *Sel = nullptr; 3877 for (User *U : VecLoopExitInst->users()) { 3878 if (isa<SelectInst>(U)) { 3879 assert(!Sel && "Reduction exit feeding two selects"); 3880 Sel = U; 3881 } else 3882 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 3883 } 3884 assert(Sel && "Reduction exit feeds no select"); 3885 State.reset(LoopExitInstDef, Sel, Part); 3886 3887 // If the target can create a predicated operator for the reduction at no 3888 // extra cost in the loop (for example a predicated vadd), it can be 3889 // cheaper for the select to remain in the loop than be sunk out of it, 3890 // and so use the select value for the phi instead of the old 3891 // LoopExitValue. 3892 if (PreferPredicatedReductionSelect || 3893 TTI->preferPredicatedReductionSelect( 3894 RdxDesc.getOpcode(), PhiTy, 3895 TargetTransformInfo::ReductionFlags())) { 3896 auto *VecRdxPhi = 3897 cast<PHINode>(State.get(PhiR, Part)); 3898 VecRdxPhi->setIncomingValueForBlock(VectorLoopLatch, Sel); 3899 } 3900 } 3901 } 3902 3903 // If the vector reduction can be performed in a smaller type, we truncate 3904 // then extend the loop exit value to enable InstCombine to evaluate the 3905 // entire expression in the smaller type. 3906 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 3907 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 3908 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3909 Builder.SetInsertPoint(VectorLoopLatch->getTerminator()); 3910 VectorParts RdxParts(UF); 3911 for (unsigned Part = 0; Part < UF; ++Part) { 3912 RdxParts[Part] = State.get(LoopExitInstDef, Part); 3913 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3914 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3915 : Builder.CreateZExt(Trunc, VecTy); 3916 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 3917 if (U != Trunc) { 3918 U->replaceUsesOfWith(RdxParts[Part], Extnd); 3919 RdxParts[Part] = Extnd; 3920 } 3921 } 3922 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3923 for (unsigned Part = 0; Part < UF; ++Part) { 3924 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3925 State.reset(LoopExitInstDef, RdxParts[Part], Part); 3926 } 3927 } 3928 3929 // Reduce all of the unrolled parts into a single vector. 3930 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 3931 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 3932 3933 // The middle block terminator has already been assigned a DebugLoc here (the 3934 // OrigLoop's single latch terminator). We want the whole middle block to 3935 // appear to execute on this line because: (a) it is all compiler generated, 3936 // (b) these instructions are always executed after evaluating the latch 3937 // conditional branch, and (c) other passes may add new predecessors which 3938 // terminate on this line. This is the easiest way to ensure we don't 3939 // accidentally cause an extra step back into the loop while debugging. 3940 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 3941 if (PhiR->isOrdered()) 3942 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 3943 else { 3944 // Floating-point operations should have some FMF to enable the reduction. 3945 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 3946 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 3947 for (unsigned Part = 1; Part < UF; ++Part) { 3948 Value *RdxPart = State.get(LoopExitInstDef, Part); 3949 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 3950 ReducedPartRdx = Builder.CreateBinOp( 3951 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 3952 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 3953 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 3954 ReducedPartRdx, RdxPart); 3955 else 3956 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 3957 } 3958 } 3959 3960 // Create the reduction after the loop. Note that inloop reductions create the 3961 // target reduction in the loop using a Reduction recipe. 3962 if (VF.isVector() && !PhiR->isInLoop()) { 3963 ReducedPartRdx = 3964 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 3965 // If the reduction can be performed in a smaller type, we need to extend 3966 // the reduction to the wider type before we branch to the original loop. 3967 if (PhiTy != RdxDesc.getRecurrenceType()) 3968 ReducedPartRdx = RdxDesc.isSigned() 3969 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 3970 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 3971 } 3972 3973 PHINode *ResumePhi = 3974 dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue()); 3975 3976 // Create a phi node that merges control-flow from the backedge-taken check 3977 // block and the middle block. 3978 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 3979 LoopScalarPreHeader->getTerminator()); 3980 3981 // If we are fixing reductions in the epilogue loop then we should already 3982 // have created a bc.merge.rdx Phi after the main vector body. Ensure that 3983 // we carry over the incoming values correctly. 3984 for (auto *Incoming : predecessors(LoopScalarPreHeader)) { 3985 if (Incoming == LoopMiddleBlock) 3986 BCBlockPhi->addIncoming(ReducedPartRdx, Incoming); 3987 else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming)) 3988 BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming), 3989 Incoming); 3990 else 3991 BCBlockPhi->addIncoming(ReductionStartValue, Incoming); 3992 } 3993 3994 // Set the resume value for this reduction 3995 ReductionResumeValues.insert({&RdxDesc, BCBlockPhi}); 3996 3997 // Now, we need to fix the users of the reduction variable 3998 // inside and outside of the scalar remainder loop. 3999 4000 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4001 // in the exit blocks. See comment on analogous loop in 4002 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4003 if (!Cost->requiresScalarEpilogue(VF)) 4004 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4005 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) 4006 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4007 4008 // Fix the scalar loop reduction variable with the incoming reduction sum 4009 // from the vector body and from the backedge value. 4010 int IncomingEdgeBlockIdx = 4011 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4012 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4013 // Pick the other block. 4014 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4015 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4016 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4017 } 4018 4019 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4020 VPTransformState &State) { 4021 RecurKind RK = RdxDesc.getRecurrenceKind(); 4022 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4023 return; 4024 4025 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4026 assert(LoopExitInstr && "null loop exit instruction"); 4027 SmallVector<Instruction *, 8> Worklist; 4028 SmallPtrSet<Instruction *, 8> Visited; 4029 Worklist.push_back(LoopExitInstr); 4030 Visited.insert(LoopExitInstr); 4031 4032 while (!Worklist.empty()) { 4033 Instruction *Cur = Worklist.pop_back_val(); 4034 if (isa<OverflowingBinaryOperator>(Cur)) 4035 for (unsigned Part = 0; Part < UF; ++Part) { 4036 // FIXME: Should not rely on getVPValue at this point. 4037 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4038 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4039 } 4040 4041 for (User *U : Cur->users()) { 4042 Instruction *UI = cast<Instruction>(U); 4043 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4044 Visited.insert(UI).second) 4045 Worklist.push_back(UI); 4046 } 4047 } 4048 } 4049 4050 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4051 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4052 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4053 // Some phis were already hand updated by the reduction and recurrence 4054 // code above, leave them alone. 4055 continue; 4056 4057 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4058 // Non-instruction incoming values will have only one value. 4059 4060 VPLane Lane = VPLane::getFirstLane(); 4061 if (isa<Instruction>(IncomingValue) && 4062 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4063 VF)) 4064 Lane = VPLane::getLastLaneForVF(VF); 4065 4066 // Can be a loop invariant incoming value or the last scalar value to be 4067 // extracted from the vectorized loop. 4068 // FIXME: Should not rely on getVPValue at this point. 4069 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4070 Value *lastIncomingValue = 4071 OrigLoop->isLoopInvariant(IncomingValue) 4072 ? IncomingValue 4073 : State.get(State.Plan->getVPValue(IncomingValue, true), 4074 VPIteration(UF - 1, Lane)); 4075 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4076 } 4077 } 4078 4079 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4080 // The basic block and loop containing the predicated instruction. 4081 auto *PredBB = PredInst->getParent(); 4082 auto *VectorLoop = LI->getLoopFor(PredBB); 4083 4084 // Initialize a worklist with the operands of the predicated instruction. 4085 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4086 4087 // Holds instructions that we need to analyze again. An instruction may be 4088 // reanalyzed if we don't yet know if we can sink it or not. 4089 SmallVector<Instruction *, 8> InstsToReanalyze; 4090 4091 // Returns true if a given use occurs in the predicated block. Phi nodes use 4092 // their operands in their corresponding predecessor blocks. 4093 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4094 auto *I = cast<Instruction>(U.getUser()); 4095 BasicBlock *BB = I->getParent(); 4096 if (auto *Phi = dyn_cast<PHINode>(I)) 4097 BB = Phi->getIncomingBlock( 4098 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4099 return BB == PredBB; 4100 }; 4101 4102 // Iteratively sink the scalarized operands of the predicated instruction 4103 // into the block we created for it. When an instruction is sunk, it's 4104 // operands are then added to the worklist. The algorithm ends after one pass 4105 // through the worklist doesn't sink a single instruction. 4106 bool Changed; 4107 do { 4108 // Add the instructions that need to be reanalyzed to the worklist, and 4109 // reset the changed indicator. 4110 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4111 InstsToReanalyze.clear(); 4112 Changed = false; 4113 4114 while (!Worklist.empty()) { 4115 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4116 4117 // We can't sink an instruction if it is a phi node, is not in the loop, 4118 // or may have side effects. 4119 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4120 I->mayHaveSideEffects()) 4121 continue; 4122 4123 // If the instruction is already in PredBB, check if we can sink its 4124 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4125 // sinking the scalar instruction I, hence it appears in PredBB; but it 4126 // may have failed to sink I's operands (recursively), which we try 4127 // (again) here. 4128 if (I->getParent() == PredBB) { 4129 Worklist.insert(I->op_begin(), I->op_end()); 4130 continue; 4131 } 4132 4133 // It's legal to sink the instruction if all its uses occur in the 4134 // predicated block. Otherwise, there's nothing to do yet, and we may 4135 // need to reanalyze the instruction. 4136 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4137 InstsToReanalyze.push_back(I); 4138 continue; 4139 } 4140 4141 // Move the instruction to the beginning of the predicated block, and add 4142 // it's operands to the worklist. 4143 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4144 Worklist.insert(I->op_begin(), I->op_end()); 4145 4146 // The sinking may have enabled other instructions to be sunk, so we will 4147 // need to iterate. 4148 Changed = true; 4149 } 4150 } while (Changed); 4151 } 4152 4153 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4154 for (PHINode *OrigPhi : OrigPHIsToFix) { 4155 VPWidenPHIRecipe *VPPhi = 4156 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4157 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4158 // Make sure the builder has a valid insert point. 4159 Builder.SetInsertPoint(NewPhi); 4160 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4161 VPValue *Inc = VPPhi->getIncomingValue(i); 4162 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4163 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4164 } 4165 } 4166 } 4167 4168 bool InnerLoopVectorizer::useOrderedReductions( 4169 const RecurrenceDescriptor &RdxDesc) { 4170 return Cost->useOrderedReductions(RdxDesc); 4171 } 4172 4173 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4174 VPWidenPHIRecipe *PhiR, 4175 VPTransformState &State) { 4176 assert(EnableVPlanNativePath && 4177 "Non-native vplans are not expected to have VPWidenPHIRecipes."); 4178 // Currently we enter here in the VPlan-native path for non-induction 4179 // PHIs where all control flow is uniform. We simply widen these PHIs. 4180 // Create a vector phi with no operands - the vector phi operands will be 4181 // set at the end of vector code generation. 4182 Type *VecTy = (State.VF.isScalar()) 4183 ? PN->getType() 4184 : VectorType::get(PN->getType(), State.VF); 4185 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4186 State.set(PhiR, VecPhi, 0); 4187 OrigPHIsToFix.push_back(cast<PHINode>(PN)); 4188 } 4189 4190 /// A helper function for checking whether an integer division-related 4191 /// instruction may divide by zero (in which case it must be predicated if 4192 /// executed conditionally in the scalar code). 4193 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4194 /// Non-zero divisors that are non compile-time constants will not be 4195 /// converted into multiplication, so we will still end up scalarizing 4196 /// the division, but can do so w/o predication. 4197 static bool mayDivideByZero(Instruction &I) { 4198 assert((I.getOpcode() == Instruction::UDiv || 4199 I.getOpcode() == Instruction::SDiv || 4200 I.getOpcode() == Instruction::URem || 4201 I.getOpcode() == Instruction::SRem) && 4202 "Unexpected instruction"); 4203 Value *Divisor = I.getOperand(1); 4204 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4205 return !CInt || CInt->isZero(); 4206 } 4207 4208 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4209 VPUser &ArgOperands, 4210 VPTransformState &State) { 4211 assert(!isa<DbgInfoIntrinsic>(I) && 4212 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4213 setDebugLocFromInst(&I); 4214 4215 Module *M = I.getParent()->getParent()->getParent(); 4216 auto *CI = cast<CallInst>(&I); 4217 4218 SmallVector<Type *, 4> Tys; 4219 for (Value *ArgOperand : CI->args()) 4220 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4221 4222 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4223 4224 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4225 // version of the instruction. 4226 // Is it beneficial to perform intrinsic call compared to lib call? 4227 bool NeedToScalarize = false; 4228 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4229 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4230 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4231 assert((UseVectorIntrinsic || !NeedToScalarize) && 4232 "Instruction should be scalarized elsewhere."); 4233 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4234 "Either the intrinsic cost or vector call cost must be valid"); 4235 4236 for (unsigned Part = 0; Part < UF; ++Part) { 4237 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 4238 SmallVector<Value *, 4> Args; 4239 for (auto &I : enumerate(ArgOperands.operands())) { 4240 // Some intrinsics have a scalar argument - don't replace it with a 4241 // vector. 4242 Value *Arg; 4243 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4244 Arg = State.get(I.value(), Part); 4245 else { 4246 Arg = State.get(I.value(), VPIteration(0, 0)); 4247 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 4248 TysForDecl.push_back(Arg->getType()); 4249 } 4250 Args.push_back(Arg); 4251 } 4252 4253 Function *VectorF; 4254 if (UseVectorIntrinsic) { 4255 // Use vector version of the intrinsic. 4256 if (VF.isVector()) 4257 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4258 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4259 assert(VectorF && "Can't retrieve vector intrinsic."); 4260 } else { 4261 // Use vector version of the function call. 4262 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4263 #ifndef NDEBUG 4264 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4265 "Can't create vector function."); 4266 #endif 4267 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4268 } 4269 SmallVector<OperandBundleDef, 1> OpBundles; 4270 CI->getOperandBundlesAsDefs(OpBundles); 4271 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4272 4273 if (isa<FPMathOperator>(V)) 4274 V->copyFastMathFlags(CI); 4275 4276 State.set(Def, V, Part); 4277 addMetadata(V, &I); 4278 } 4279 } 4280 4281 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4282 // We should not collect Scalars more than once per VF. Right now, this 4283 // function is called from collectUniformsAndScalars(), which already does 4284 // this check. Collecting Scalars for VF=1 does not make any sense. 4285 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4286 "This function should not be visited twice for the same VF"); 4287 4288 // This avoids any chances of creating a REPLICATE recipe during planning 4289 // since that would result in generation of scalarized code during execution, 4290 // which is not supported for scalable vectors. 4291 if (VF.isScalable()) { 4292 Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4293 return; 4294 } 4295 4296 SmallSetVector<Instruction *, 8> Worklist; 4297 4298 // These sets are used to seed the analysis with pointers used by memory 4299 // accesses that will remain scalar. 4300 SmallSetVector<Instruction *, 8> ScalarPtrs; 4301 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4302 auto *Latch = TheLoop->getLoopLatch(); 4303 4304 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4305 // The pointer operands of loads and stores will be scalar as long as the 4306 // memory access is not a gather or scatter operation. The value operand of a 4307 // store will remain scalar if the store is scalarized. 4308 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4309 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4310 assert(WideningDecision != CM_Unknown && 4311 "Widening decision should be ready at this moment"); 4312 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4313 if (Ptr == Store->getValueOperand()) 4314 return WideningDecision == CM_Scalarize; 4315 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4316 "Ptr is neither a value or pointer operand"); 4317 return WideningDecision != CM_GatherScatter; 4318 }; 4319 4320 // A helper that returns true if the given value is a bitcast or 4321 // getelementptr instruction contained in the loop. 4322 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4323 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4324 isa<GetElementPtrInst>(V)) && 4325 !TheLoop->isLoopInvariant(V); 4326 }; 4327 4328 // A helper that evaluates a memory access's use of a pointer. If the use will 4329 // be a scalar use and the pointer is only used by memory accesses, we place 4330 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4331 // PossibleNonScalarPtrs. 4332 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4333 // We only care about bitcast and getelementptr instructions contained in 4334 // the loop. 4335 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4336 return; 4337 4338 // If the pointer has already been identified as scalar (e.g., if it was 4339 // also identified as uniform), there's nothing to do. 4340 auto *I = cast<Instruction>(Ptr); 4341 if (Worklist.count(I)) 4342 return; 4343 4344 // If the use of the pointer will be a scalar use, and all users of the 4345 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4346 // place the pointer in PossibleNonScalarPtrs. 4347 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4348 return isa<LoadInst>(U) || isa<StoreInst>(U); 4349 })) 4350 ScalarPtrs.insert(I); 4351 else 4352 PossibleNonScalarPtrs.insert(I); 4353 }; 4354 4355 // We seed the scalars analysis with three classes of instructions: (1) 4356 // instructions marked uniform-after-vectorization and (2) bitcast, 4357 // getelementptr and (pointer) phi instructions used by memory accesses 4358 // requiring a scalar use. 4359 // 4360 // (1) Add to the worklist all instructions that have been identified as 4361 // uniform-after-vectorization. 4362 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4363 4364 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4365 // memory accesses requiring a scalar use. The pointer operands of loads and 4366 // stores will be scalar as long as the memory accesses is not a gather or 4367 // scatter operation. The value operand of a store will remain scalar if the 4368 // store is scalarized. 4369 for (auto *BB : TheLoop->blocks()) 4370 for (auto &I : *BB) { 4371 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4372 evaluatePtrUse(Load, Load->getPointerOperand()); 4373 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4374 evaluatePtrUse(Store, Store->getPointerOperand()); 4375 evaluatePtrUse(Store, Store->getValueOperand()); 4376 } 4377 } 4378 for (auto *I : ScalarPtrs) 4379 if (!PossibleNonScalarPtrs.count(I)) { 4380 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4381 Worklist.insert(I); 4382 } 4383 4384 // Insert the forced scalars. 4385 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4386 // induction variable when the PHI user is scalarized. 4387 auto ForcedScalar = ForcedScalars.find(VF); 4388 if (ForcedScalar != ForcedScalars.end()) 4389 for (auto *I : ForcedScalar->second) 4390 Worklist.insert(I); 4391 4392 // Expand the worklist by looking through any bitcasts and getelementptr 4393 // instructions we've already identified as scalar. This is similar to the 4394 // expansion step in collectLoopUniforms(); however, here we're only 4395 // expanding to include additional bitcasts and getelementptr instructions. 4396 unsigned Idx = 0; 4397 while (Idx != Worklist.size()) { 4398 Instruction *Dst = Worklist[Idx++]; 4399 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4400 continue; 4401 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4402 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4403 auto *J = cast<Instruction>(U); 4404 return !TheLoop->contains(J) || Worklist.count(J) || 4405 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4406 isScalarUse(J, Src)); 4407 })) { 4408 Worklist.insert(Src); 4409 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4410 } 4411 } 4412 4413 // An induction variable will remain scalar if all users of the induction 4414 // variable and induction variable update remain scalar. 4415 for (auto &Induction : Legal->getInductionVars()) { 4416 auto *Ind = Induction.first; 4417 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4418 4419 // If tail-folding is applied, the primary induction variable will be used 4420 // to feed a vector compare. 4421 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 4422 continue; 4423 4424 // Returns true if \p Indvar is a pointer induction that is used directly by 4425 // load/store instruction \p I. 4426 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar, 4427 Instruction *I) { 4428 return Induction.second.getKind() == 4429 InductionDescriptor::IK_PtrInduction && 4430 (isa<LoadInst>(I) || isa<StoreInst>(I)) && 4431 Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar); 4432 }; 4433 4434 // Determine if all users of the induction variable are scalar after 4435 // vectorization. 4436 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4437 auto *I = cast<Instruction>(U); 4438 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4439 IsDirectLoadStoreFromPtrIndvar(Ind, I); 4440 }); 4441 if (!ScalarInd) 4442 continue; 4443 4444 // Determine if all users of the induction variable update instruction are 4445 // scalar after vectorization. 4446 auto ScalarIndUpdate = 4447 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4448 auto *I = cast<Instruction>(U); 4449 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4450 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I); 4451 }); 4452 if (!ScalarIndUpdate) 4453 continue; 4454 4455 // The induction variable and its update instruction will remain scalar. 4456 Worklist.insert(Ind); 4457 Worklist.insert(IndUpdate); 4458 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4459 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4460 << "\n"); 4461 } 4462 4463 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4464 } 4465 4466 bool LoopVectorizationCostModel::isScalarWithPredication( 4467 Instruction *I, ElementCount VF) const { 4468 if (!blockNeedsPredicationForAnyReason(I->getParent())) 4469 return false; 4470 switch(I->getOpcode()) { 4471 default: 4472 break; 4473 case Instruction::Load: 4474 case Instruction::Store: { 4475 if (!Legal->isMaskRequired(I)) 4476 return false; 4477 auto *Ptr = getLoadStorePointerOperand(I); 4478 auto *Ty = getLoadStoreType(I); 4479 Type *VTy = Ty; 4480 if (VF.isVector()) 4481 VTy = VectorType::get(Ty, VF); 4482 const Align Alignment = getLoadStoreAlignment(I); 4483 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4484 TTI.isLegalMaskedGather(VTy, Alignment)) 4485 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4486 TTI.isLegalMaskedScatter(VTy, Alignment)); 4487 } 4488 case Instruction::UDiv: 4489 case Instruction::SDiv: 4490 case Instruction::SRem: 4491 case Instruction::URem: 4492 return mayDivideByZero(*I); 4493 } 4494 return false; 4495 } 4496 4497 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 4498 Instruction *I, ElementCount VF) { 4499 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4500 assert(getWideningDecision(I, VF) == CM_Unknown && 4501 "Decision should not be set yet."); 4502 auto *Group = getInterleavedAccessGroup(I); 4503 assert(Group && "Must have a group."); 4504 4505 // If the instruction's allocated size doesn't equal it's type size, it 4506 // requires padding and will be scalarized. 4507 auto &DL = I->getModule()->getDataLayout(); 4508 auto *ScalarTy = getLoadStoreType(I); 4509 if (hasIrregularType(ScalarTy, DL)) 4510 return false; 4511 4512 // If the group involves a non-integral pointer, we may not be able to 4513 // losslessly cast all values to a common type. 4514 unsigned InterleaveFactor = Group->getFactor(); 4515 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy); 4516 for (unsigned i = 0; i < InterleaveFactor; i++) { 4517 Instruction *Member = Group->getMember(i); 4518 if (!Member) 4519 continue; 4520 auto *MemberTy = getLoadStoreType(Member); 4521 bool MemberNI = DL.isNonIntegralPointerType(MemberTy); 4522 // Don't coerce non-integral pointers to integers or vice versa. 4523 if (MemberNI != ScalarNI) { 4524 // TODO: Consider adding special nullptr value case here 4525 return false; 4526 } else if (MemberNI && ScalarNI && 4527 ScalarTy->getPointerAddressSpace() != 4528 MemberTy->getPointerAddressSpace()) { 4529 return false; 4530 } 4531 } 4532 4533 // Check if masking is required. 4534 // A Group may need masking for one of two reasons: it resides in a block that 4535 // needs predication, or it was decided to use masking to deal with gaps 4536 // (either a gap at the end of a load-access that may result in a speculative 4537 // load, or any gaps in a store-access). 4538 bool PredicatedAccessRequiresMasking = 4539 blockNeedsPredicationForAnyReason(I->getParent()) && 4540 Legal->isMaskRequired(I); 4541 bool LoadAccessWithGapsRequiresEpilogMasking = 4542 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 4543 !isScalarEpilogueAllowed(); 4544 bool StoreAccessWithGapsRequiresMasking = 4545 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 4546 if (!PredicatedAccessRequiresMasking && 4547 !LoadAccessWithGapsRequiresEpilogMasking && 4548 !StoreAccessWithGapsRequiresMasking) 4549 return true; 4550 4551 // If masked interleaving is required, we expect that the user/target had 4552 // enabled it, because otherwise it either wouldn't have been created or 4553 // it should have been invalidated by the CostModel. 4554 assert(useMaskedInterleavedAccesses(TTI) && 4555 "Masked interleave-groups for predicated accesses are not enabled."); 4556 4557 if (Group->isReverse()) 4558 return false; 4559 4560 auto *Ty = getLoadStoreType(I); 4561 const Align Alignment = getLoadStoreAlignment(I); 4562 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 4563 : TTI.isLegalMaskedStore(Ty, Alignment); 4564 } 4565 4566 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 4567 Instruction *I, ElementCount VF) { 4568 // Get and ensure we have a valid memory instruction. 4569 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 4570 4571 auto *Ptr = getLoadStorePointerOperand(I); 4572 auto *ScalarTy = getLoadStoreType(I); 4573 4574 // In order to be widened, the pointer should be consecutive, first of all. 4575 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 4576 return false; 4577 4578 // If the instruction is a store located in a predicated block, it will be 4579 // scalarized. 4580 if (isScalarWithPredication(I, VF)) 4581 return false; 4582 4583 // If the instruction's allocated size doesn't equal it's type size, it 4584 // requires padding and will be scalarized. 4585 auto &DL = I->getModule()->getDataLayout(); 4586 if (hasIrregularType(ScalarTy, DL)) 4587 return false; 4588 4589 return true; 4590 } 4591 4592 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 4593 // We should not collect Uniforms more than once per VF. Right now, 4594 // this function is called from collectUniformsAndScalars(), which 4595 // already does this check. Collecting Uniforms for VF=1 does not make any 4596 // sense. 4597 4598 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 4599 "This function should not be visited twice for the same VF"); 4600 4601 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4602 // not analyze again. Uniforms.count(VF) will return 1. 4603 Uniforms[VF].clear(); 4604 4605 // We now know that the loop is vectorizable! 4606 // Collect instructions inside the loop that will remain uniform after 4607 // vectorization. 4608 4609 // Global values, params and instructions outside of current loop are out of 4610 // scope. 4611 auto isOutOfScope = [&](Value *V) -> bool { 4612 Instruction *I = dyn_cast<Instruction>(V); 4613 return (!I || !TheLoop->contains(I)); 4614 }; 4615 4616 // Worklist containing uniform instructions demanding lane 0. 4617 SetVector<Instruction *> Worklist; 4618 BasicBlock *Latch = TheLoop->getLoopLatch(); 4619 4620 // Add uniform instructions demanding lane 0 to the worklist. Instructions 4621 // that are scalar with predication must not be considered uniform after 4622 // vectorization, because that would create an erroneous replicating region 4623 // where only a single instance out of VF should be formed. 4624 // TODO: optimize such seldom cases if found important, see PR40816. 4625 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 4626 if (isOutOfScope(I)) { 4627 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 4628 << *I << "\n"); 4629 return; 4630 } 4631 if (isScalarWithPredication(I, VF)) { 4632 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 4633 << *I << "\n"); 4634 return; 4635 } 4636 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 4637 Worklist.insert(I); 4638 }; 4639 4640 // Start with the conditional branch. If the branch condition is an 4641 // instruction contained in the loop that is only used by the branch, it is 4642 // uniform. 4643 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4644 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 4645 addToWorklistIfAllowed(Cmp); 4646 4647 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 4648 InstWidening WideningDecision = getWideningDecision(I, VF); 4649 assert(WideningDecision != CM_Unknown && 4650 "Widening decision should be ready at this moment"); 4651 4652 // A uniform memory op is itself uniform. We exclude uniform stores 4653 // here as they demand the last lane, not the first one. 4654 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 4655 assert(WideningDecision == CM_Scalarize); 4656 return true; 4657 } 4658 4659 return (WideningDecision == CM_Widen || 4660 WideningDecision == CM_Widen_Reverse || 4661 WideningDecision == CM_Interleave); 4662 }; 4663 4664 4665 // Returns true if Ptr is the pointer operand of a memory access instruction 4666 // I, and I is known to not require scalarization. 4667 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4668 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4669 }; 4670 4671 // Holds a list of values which are known to have at least one uniform use. 4672 // Note that there may be other uses which aren't uniform. A "uniform use" 4673 // here is something which only demands lane 0 of the unrolled iterations; 4674 // it does not imply that all lanes produce the same value (e.g. this is not 4675 // the usual meaning of uniform) 4676 SetVector<Value *> HasUniformUse; 4677 4678 // Scan the loop for instructions which are either a) known to have only 4679 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 4680 for (auto *BB : TheLoop->blocks()) 4681 for (auto &I : *BB) { 4682 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 4683 switch (II->getIntrinsicID()) { 4684 case Intrinsic::sideeffect: 4685 case Intrinsic::experimental_noalias_scope_decl: 4686 case Intrinsic::assume: 4687 case Intrinsic::lifetime_start: 4688 case Intrinsic::lifetime_end: 4689 if (TheLoop->hasLoopInvariantOperands(&I)) 4690 addToWorklistIfAllowed(&I); 4691 break; 4692 default: 4693 break; 4694 } 4695 } 4696 4697 // ExtractValue instructions must be uniform, because the operands are 4698 // known to be loop-invariant. 4699 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 4700 assert(isOutOfScope(EVI->getAggregateOperand()) && 4701 "Expected aggregate value to be loop invariant"); 4702 addToWorklistIfAllowed(EVI); 4703 continue; 4704 } 4705 4706 // If there's no pointer operand, there's nothing to do. 4707 auto *Ptr = getLoadStorePointerOperand(&I); 4708 if (!Ptr) 4709 continue; 4710 4711 // A uniform memory op is itself uniform. We exclude uniform stores 4712 // here as they demand the last lane, not the first one. 4713 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 4714 addToWorklistIfAllowed(&I); 4715 4716 if (isUniformDecision(&I, VF)) { 4717 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 4718 HasUniformUse.insert(Ptr); 4719 } 4720 } 4721 4722 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 4723 // demanding) users. Since loops are assumed to be in LCSSA form, this 4724 // disallows uses outside the loop as well. 4725 for (auto *V : HasUniformUse) { 4726 if (isOutOfScope(V)) 4727 continue; 4728 auto *I = cast<Instruction>(V); 4729 auto UsersAreMemAccesses = 4730 llvm::all_of(I->users(), [&](User *U) -> bool { 4731 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 4732 }); 4733 if (UsersAreMemAccesses) 4734 addToWorklistIfAllowed(I); 4735 } 4736 4737 // Expand Worklist in topological order: whenever a new instruction 4738 // is added , its users should be already inside Worklist. It ensures 4739 // a uniform instruction will only be used by uniform instructions. 4740 unsigned idx = 0; 4741 while (idx != Worklist.size()) { 4742 Instruction *I = Worklist[idx++]; 4743 4744 for (auto OV : I->operand_values()) { 4745 // isOutOfScope operands cannot be uniform instructions. 4746 if (isOutOfScope(OV)) 4747 continue; 4748 // First order recurrence Phi's should typically be considered 4749 // non-uniform. 4750 auto *OP = dyn_cast<PHINode>(OV); 4751 if (OP && Legal->isFirstOrderRecurrence(OP)) 4752 continue; 4753 // If all the users of the operand are uniform, then add the 4754 // operand into the uniform worklist. 4755 auto *OI = cast<Instruction>(OV); 4756 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4757 auto *J = cast<Instruction>(U); 4758 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 4759 })) 4760 addToWorklistIfAllowed(OI); 4761 } 4762 } 4763 4764 // For an instruction to be added into Worklist above, all its users inside 4765 // the loop should also be in Worklist. However, this condition cannot be 4766 // true for phi nodes that form a cyclic dependence. We must process phi 4767 // nodes separately. An induction variable will remain uniform if all users 4768 // of the induction variable and induction variable update remain uniform. 4769 // The code below handles both pointer and non-pointer induction variables. 4770 for (auto &Induction : Legal->getInductionVars()) { 4771 auto *Ind = Induction.first; 4772 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4773 4774 // Determine if all users of the induction variable are uniform after 4775 // vectorization. 4776 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4777 auto *I = cast<Instruction>(U); 4778 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4779 isVectorizedMemAccessUse(I, Ind); 4780 }); 4781 if (!UniformInd) 4782 continue; 4783 4784 // Determine if all users of the induction variable update instruction are 4785 // uniform after vectorization. 4786 auto UniformIndUpdate = 4787 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4788 auto *I = cast<Instruction>(U); 4789 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4790 isVectorizedMemAccessUse(I, IndUpdate); 4791 }); 4792 if (!UniformIndUpdate) 4793 continue; 4794 4795 // The induction variable and its update instruction will remain uniform. 4796 addToWorklistIfAllowed(Ind); 4797 addToWorklistIfAllowed(IndUpdate); 4798 } 4799 4800 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4801 } 4802 4803 bool LoopVectorizationCostModel::runtimeChecksRequired() { 4804 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 4805 4806 if (Legal->getRuntimePointerChecking()->Need) { 4807 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 4808 "runtime pointer checks needed. Enable vectorization of this " 4809 "loop with '#pragma clang loop vectorize(enable)' when " 4810 "compiling with -Os/-Oz", 4811 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4812 return true; 4813 } 4814 4815 if (!PSE.getPredicate().isAlwaysTrue()) { 4816 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 4817 "runtime SCEV checks needed. Enable vectorization of this " 4818 "loop with '#pragma clang loop vectorize(enable)' when " 4819 "compiling with -Os/-Oz", 4820 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4821 return true; 4822 } 4823 4824 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4825 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4826 reportVectorizationFailure("Runtime stride check for small trip count", 4827 "runtime stride == 1 checks needed. Enable vectorization of " 4828 "this loop without such check by compiling with -Os/-Oz", 4829 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4830 return true; 4831 } 4832 4833 return false; 4834 } 4835 4836 ElementCount 4837 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 4838 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 4839 return ElementCount::getScalable(0); 4840 4841 if (Hints->isScalableVectorizationDisabled()) { 4842 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 4843 "ScalableVectorizationDisabled", ORE, TheLoop); 4844 return ElementCount::getScalable(0); 4845 } 4846 4847 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 4848 4849 auto MaxScalableVF = ElementCount::getScalable( 4850 std::numeric_limits<ElementCount::ScalarTy>::max()); 4851 4852 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 4853 // FIXME: While for scalable vectors this is currently sufficient, this should 4854 // be replaced by a more detailed mechanism that filters out specific VFs, 4855 // instead of invalidating vectorization for a whole set of VFs based on the 4856 // MaxVF. 4857 4858 // Disable scalable vectorization if the loop contains unsupported reductions. 4859 if (!canVectorizeReductions(MaxScalableVF)) { 4860 reportVectorizationInfo( 4861 "Scalable vectorization not supported for the reduction " 4862 "operations found in this loop.", 4863 "ScalableVFUnfeasible", ORE, TheLoop); 4864 return ElementCount::getScalable(0); 4865 } 4866 4867 // Disable scalable vectorization if the loop contains any instructions 4868 // with element types not supported for scalable vectors. 4869 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 4870 return !Ty->isVoidTy() && 4871 !this->TTI.isElementTypeLegalForScalableVector(Ty); 4872 })) { 4873 reportVectorizationInfo("Scalable vectorization is not supported " 4874 "for all element types found in this loop.", 4875 "ScalableVFUnfeasible", ORE, TheLoop); 4876 return ElementCount::getScalable(0); 4877 } 4878 4879 if (Legal->isSafeForAnyVectorWidth()) 4880 return MaxScalableVF; 4881 4882 // Limit MaxScalableVF by the maximum safe dependence distance. 4883 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 4884 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) 4885 MaxVScale = 4886 TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax(); 4887 MaxScalableVF = ElementCount::getScalable( 4888 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 4889 if (!MaxScalableVF) 4890 reportVectorizationInfo( 4891 "Max legal vector width too small, scalable vectorization " 4892 "unfeasible.", 4893 "ScalableVFUnfeasible", ORE, TheLoop); 4894 4895 return MaxScalableVF; 4896 } 4897 4898 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF( 4899 unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) { 4900 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4901 unsigned SmallestType, WidestType; 4902 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 4903 4904 // Get the maximum safe dependence distance in bits computed by LAA. 4905 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 4906 // the memory accesses that is most restrictive (involved in the smallest 4907 // dependence distance). 4908 unsigned MaxSafeElements = 4909 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 4910 4911 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 4912 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 4913 4914 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 4915 << ".\n"); 4916 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 4917 << ".\n"); 4918 4919 // First analyze the UserVF, fall back if the UserVF should be ignored. 4920 if (UserVF) { 4921 auto MaxSafeUserVF = 4922 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 4923 4924 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 4925 // If `VF=vscale x N` is safe, then so is `VF=N` 4926 if (UserVF.isScalable()) 4927 return FixedScalableVFPair( 4928 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 4929 else 4930 return UserVF; 4931 } 4932 4933 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 4934 4935 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 4936 // is better to ignore the hint and let the compiler choose a suitable VF. 4937 if (!UserVF.isScalable()) { 4938 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4939 << " is unsafe, clamping to max safe VF=" 4940 << MaxSafeFixedVF << ".\n"); 4941 ORE->emit([&]() { 4942 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4943 TheLoop->getStartLoc(), 4944 TheLoop->getHeader()) 4945 << "User-specified vectorization factor " 4946 << ore::NV("UserVectorizationFactor", UserVF) 4947 << " is unsafe, clamping to maximum safe vectorization factor " 4948 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 4949 }); 4950 return MaxSafeFixedVF; 4951 } 4952 4953 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 4954 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4955 << " is ignored because scalable vectors are not " 4956 "available.\n"); 4957 ORE->emit([&]() { 4958 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4959 TheLoop->getStartLoc(), 4960 TheLoop->getHeader()) 4961 << "User-specified vectorization factor " 4962 << ore::NV("UserVectorizationFactor", UserVF) 4963 << " is ignored because the target does not support scalable " 4964 "vectors. The compiler will pick a more suitable value."; 4965 }); 4966 } else { 4967 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4968 << " is unsafe. Ignoring scalable UserVF.\n"); 4969 ORE->emit([&]() { 4970 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4971 TheLoop->getStartLoc(), 4972 TheLoop->getHeader()) 4973 << "User-specified vectorization factor " 4974 << ore::NV("UserVectorizationFactor", UserVF) 4975 << " is unsafe. Ignoring the hint to let the compiler pick a " 4976 "more suitable value."; 4977 }); 4978 } 4979 } 4980 4981 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 4982 << " / " << WidestType << " bits.\n"); 4983 4984 FixedScalableVFPair Result(ElementCount::getFixed(1), 4985 ElementCount::getScalable(0)); 4986 if (auto MaxVF = 4987 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 4988 MaxSafeFixedVF, FoldTailByMasking)) 4989 Result.FixedVF = MaxVF; 4990 4991 if (auto MaxVF = 4992 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 4993 MaxSafeScalableVF, FoldTailByMasking)) 4994 if (MaxVF.isScalable()) { 4995 Result.ScalableVF = MaxVF; 4996 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 4997 << "\n"); 4998 } 4999 5000 return Result; 5001 } 5002 5003 FixedScalableVFPair 5004 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5005 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5006 // TODO: It may by useful to do since it's still likely to be dynamically 5007 // uniform if the target can skip. 5008 reportVectorizationFailure( 5009 "Not inserting runtime ptr check for divergent target", 5010 "runtime pointer checks needed. Not enabled for divergent target", 5011 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5012 return FixedScalableVFPair::getNone(); 5013 } 5014 5015 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5016 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5017 if (TC == 1) { 5018 reportVectorizationFailure("Single iteration (non) loop", 5019 "loop trip count is one, irrelevant for vectorization", 5020 "SingleIterationLoop", ORE, TheLoop); 5021 return FixedScalableVFPair::getNone(); 5022 } 5023 5024 switch (ScalarEpilogueStatus) { 5025 case CM_ScalarEpilogueAllowed: 5026 return computeFeasibleMaxVF(TC, UserVF, false); 5027 case CM_ScalarEpilogueNotAllowedUsePredicate: 5028 LLVM_FALLTHROUGH; 5029 case CM_ScalarEpilogueNotNeededUsePredicate: 5030 LLVM_DEBUG( 5031 dbgs() << "LV: vector predicate hint/switch found.\n" 5032 << "LV: Not allowing scalar epilogue, creating predicated " 5033 << "vector loop.\n"); 5034 break; 5035 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5036 // fallthrough as a special case of OptForSize 5037 case CM_ScalarEpilogueNotAllowedOptSize: 5038 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5039 LLVM_DEBUG( 5040 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5041 else 5042 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5043 << "count.\n"); 5044 5045 // Bail if runtime checks are required, which are not good when optimising 5046 // for size. 5047 if (runtimeChecksRequired()) 5048 return FixedScalableVFPair::getNone(); 5049 5050 break; 5051 } 5052 5053 // The only loops we can vectorize without a scalar epilogue, are loops with 5054 // a bottom-test and a single exiting block. We'd have to handle the fact 5055 // that not every instruction executes on the last iteration. This will 5056 // require a lane mask which varies through the vector loop body. (TODO) 5057 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5058 // If there was a tail-folding hint/switch, but we can't fold the tail by 5059 // masking, fallback to a vectorization with a scalar epilogue. 5060 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5061 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5062 "scalar epilogue instead.\n"); 5063 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5064 return computeFeasibleMaxVF(TC, UserVF, false); 5065 } 5066 return FixedScalableVFPair::getNone(); 5067 } 5068 5069 // Now try the tail folding 5070 5071 // Invalidate interleave groups that require an epilogue if we can't mask 5072 // the interleave-group. 5073 if (!useMaskedInterleavedAccesses(TTI)) { 5074 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5075 "No decisions should have been taken at this point"); 5076 // Note: There is no need to invalidate any cost modeling decisions here, as 5077 // non where taken so far. 5078 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5079 } 5080 5081 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true); 5082 // Avoid tail folding if the trip count is known to be a multiple of any VF 5083 // we chose. 5084 // FIXME: The condition below pessimises the case for fixed-width vectors, 5085 // when scalable VFs are also candidates for vectorization. 5086 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5087 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5088 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5089 "MaxFixedVF must be a power of 2"); 5090 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5091 : MaxFixedVF.getFixedValue(); 5092 ScalarEvolution *SE = PSE.getSE(); 5093 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5094 const SCEV *ExitCount = SE->getAddExpr( 5095 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5096 const SCEV *Rem = SE->getURemExpr( 5097 SE->applyLoopGuards(ExitCount, TheLoop), 5098 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5099 if (Rem->isZero()) { 5100 // Accept MaxFixedVF if we do not have a tail. 5101 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5102 return MaxFactors; 5103 } 5104 } 5105 5106 // For scalable vectors don't use tail folding for low trip counts or 5107 // optimizing for code size. We only permit this if the user has explicitly 5108 // requested it. 5109 if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate && 5110 ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate && 5111 MaxFactors.ScalableVF.isVector()) 5112 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5113 5114 // If we don't know the precise trip count, or if the trip count that we 5115 // found modulo the vectorization factor is not zero, try to fold the tail 5116 // by masking. 5117 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5118 if (Legal->prepareToFoldTailByMasking()) { 5119 FoldTailByMasking = true; 5120 return MaxFactors; 5121 } 5122 5123 // If there was a tail-folding hint/switch, but we can't fold the tail by 5124 // masking, fallback to a vectorization with a scalar epilogue. 5125 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5126 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5127 "scalar epilogue instead.\n"); 5128 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5129 return MaxFactors; 5130 } 5131 5132 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5133 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5134 return FixedScalableVFPair::getNone(); 5135 } 5136 5137 if (TC == 0) { 5138 reportVectorizationFailure( 5139 "Unable to calculate the loop count due to complex control flow", 5140 "unable to calculate the loop count due to complex control flow", 5141 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5142 return FixedScalableVFPair::getNone(); 5143 } 5144 5145 reportVectorizationFailure( 5146 "Cannot optimize for size and vectorize at the same time.", 5147 "cannot optimize for size and vectorize at the same time. " 5148 "Enable vectorization of this loop with '#pragma clang loop " 5149 "vectorize(enable)' when compiling with -Os/-Oz", 5150 "NoTailLoopWithOptForSize", ORE, TheLoop); 5151 return FixedScalableVFPair::getNone(); 5152 } 5153 5154 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5155 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5156 const ElementCount &MaxSafeVF, bool FoldTailByMasking) { 5157 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5158 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5159 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5160 : TargetTransformInfo::RGK_FixedWidthVector); 5161 5162 // Convenience function to return the minimum of two ElementCounts. 5163 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5164 assert((LHS.isScalable() == RHS.isScalable()) && 5165 "Scalable flags must match"); 5166 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5167 }; 5168 5169 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5170 // Note that both WidestRegister and WidestType may not be a powers of 2. 5171 auto MaxVectorElementCount = ElementCount::get( 5172 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5173 ComputeScalableMaxVF); 5174 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5175 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5176 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5177 5178 if (!MaxVectorElementCount) { 5179 LLVM_DEBUG(dbgs() << "LV: The target has no " 5180 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5181 << " vector registers.\n"); 5182 return ElementCount::getFixed(1); 5183 } 5184 5185 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5186 if (ConstTripCount && 5187 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5188 (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) { 5189 // If loop trip count (TC) is known at compile time there is no point in 5190 // choosing VF greater than TC (as done in the loop below). Select maximum 5191 // power of two which doesn't exceed TC. 5192 // If MaxVectorElementCount is scalable, we only fall back on a fixed VF 5193 // when the TC is less than or equal to the known number of lanes. 5194 auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount); 5195 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not " 5196 "exceeding the constant trip count: " 5197 << ClampedConstTripCount << "\n"); 5198 return ElementCount::getFixed(ClampedConstTripCount); 5199 } 5200 5201 TargetTransformInfo::RegisterKind RegKind = 5202 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5203 : TargetTransformInfo::RGK_FixedWidthVector; 5204 ElementCount MaxVF = MaxVectorElementCount; 5205 if (MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 && 5206 TTI.shouldMaximizeVectorBandwidth(RegKind))) { 5207 auto MaxVectorElementCountMaxBW = ElementCount::get( 5208 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5209 ComputeScalableMaxVF); 5210 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5211 5212 // Collect all viable vectorization factors larger than the default MaxVF 5213 // (i.e. MaxVectorElementCount). 5214 SmallVector<ElementCount, 8> VFs; 5215 for (ElementCount VS = MaxVectorElementCount * 2; 5216 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5217 VFs.push_back(VS); 5218 5219 // For each VF calculate its register usage. 5220 auto RUs = calculateRegisterUsage(VFs); 5221 5222 // Select the largest VF which doesn't require more registers than existing 5223 // ones. 5224 for (int i = RUs.size() - 1; i >= 0; --i) { 5225 bool Selected = true; 5226 for (auto &pair : RUs[i].MaxLocalUsers) { 5227 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5228 if (pair.second > TargetNumRegisters) 5229 Selected = false; 5230 } 5231 if (Selected) { 5232 MaxVF = VFs[i]; 5233 break; 5234 } 5235 } 5236 if (ElementCount MinVF = 5237 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5238 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5239 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5240 << ") with target's minimum: " << MinVF << '\n'); 5241 MaxVF = MinVF; 5242 } 5243 } 5244 5245 // Invalidate any widening decisions we might have made, in case the loop 5246 // requires prediction (decided later), but we have already made some 5247 // load/store widening decisions. 5248 invalidateCostModelingDecisions(); 5249 } 5250 return MaxVF; 5251 } 5252 5253 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const { 5254 if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) { 5255 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange); 5256 auto Min = Attr.getVScaleRangeMin(); 5257 auto Max = Attr.getVScaleRangeMax(); 5258 if (Max && Min == Max) 5259 return Max; 5260 } 5261 5262 return TTI.getVScaleForTuning(); 5263 } 5264 5265 bool LoopVectorizationCostModel::isMoreProfitable( 5266 const VectorizationFactor &A, const VectorizationFactor &B) const { 5267 InstructionCost CostA = A.Cost; 5268 InstructionCost CostB = B.Cost; 5269 5270 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 5271 5272 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 5273 MaxTripCount) { 5274 // If we are folding the tail and the trip count is a known (possibly small) 5275 // constant, the trip count will be rounded up to an integer number of 5276 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 5277 // which we compare directly. When not folding the tail, the total cost will 5278 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 5279 // approximated with the per-lane cost below instead of using the tripcount 5280 // as here. 5281 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 5282 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 5283 return RTCostA < RTCostB; 5284 } 5285 5286 // Improve estimate for the vector width if it is scalable. 5287 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 5288 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 5289 if (Optional<unsigned> VScale = getVScaleForTuning()) { 5290 if (A.Width.isScalable()) 5291 EstimatedWidthA *= VScale.getValue(); 5292 if (B.Width.isScalable()) 5293 EstimatedWidthB *= VScale.getValue(); 5294 } 5295 5296 // Assume vscale may be larger than 1 (or the value being tuned for), 5297 // so that scalable vectorization is slightly favorable over fixed-width 5298 // vectorization. 5299 if (A.Width.isScalable() && !B.Width.isScalable()) 5300 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 5301 5302 // To avoid the need for FP division: 5303 // (CostA / A.Width) < (CostB / B.Width) 5304 // <=> (CostA * B.Width) < (CostB * A.Width) 5305 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 5306 } 5307 5308 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 5309 const ElementCountSet &VFCandidates) { 5310 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5311 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5312 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5313 assert(VFCandidates.count(ElementCount::getFixed(1)) && 5314 "Expected Scalar VF to be a candidate"); 5315 5316 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 5317 VectorizationFactor ChosenFactor = ScalarCost; 5318 5319 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5320 if (ForceVectorization && VFCandidates.size() > 1) { 5321 // Ignore scalar width, because the user explicitly wants vectorization. 5322 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5323 // evaluation. 5324 ChosenFactor.Cost = InstructionCost::getMax(); 5325 } 5326 5327 SmallVector<InstructionVFPair> InvalidCosts; 5328 for (const auto &i : VFCandidates) { 5329 // The cost for scalar VF=1 is already calculated, so ignore it. 5330 if (i.isScalar()) 5331 continue; 5332 5333 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 5334 VectorizationFactor Candidate(i, C.first); 5335 5336 #ifndef NDEBUG 5337 unsigned AssumedMinimumVscale = 1; 5338 if (Optional<unsigned> VScale = getVScaleForTuning()) 5339 AssumedMinimumVscale = VScale.getValue(); 5340 unsigned Width = 5341 Candidate.Width.isScalable() 5342 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 5343 : Candidate.Width.getFixedValue(); 5344 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5345 << " costs: " << (Candidate.Cost / Width)); 5346 if (i.isScalable()) 5347 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 5348 << AssumedMinimumVscale << ")"); 5349 LLVM_DEBUG(dbgs() << ".\n"); 5350 #endif 5351 5352 if (!C.second && !ForceVectorization) { 5353 LLVM_DEBUG( 5354 dbgs() << "LV: Not considering vector loop of width " << i 5355 << " because it will not generate any vector instructions.\n"); 5356 continue; 5357 } 5358 5359 // If profitable add it to ProfitableVF list. 5360 if (isMoreProfitable(Candidate, ScalarCost)) 5361 ProfitableVFs.push_back(Candidate); 5362 5363 if (isMoreProfitable(Candidate, ChosenFactor)) 5364 ChosenFactor = Candidate; 5365 } 5366 5367 // Emit a report of VFs with invalid costs in the loop. 5368 if (!InvalidCosts.empty()) { 5369 // Group the remarks per instruction, keeping the instruction order from 5370 // InvalidCosts. 5371 std::map<Instruction *, unsigned> Numbering; 5372 unsigned I = 0; 5373 for (auto &Pair : InvalidCosts) 5374 if (!Numbering.count(Pair.first)) 5375 Numbering[Pair.first] = I++; 5376 5377 // Sort the list, first on instruction(number) then on VF. 5378 llvm::sort(InvalidCosts, 5379 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 5380 if (Numbering[A.first] != Numbering[B.first]) 5381 return Numbering[A.first] < Numbering[B.first]; 5382 ElementCountComparator ECC; 5383 return ECC(A.second, B.second); 5384 }); 5385 5386 // For a list of ordered instruction-vf pairs: 5387 // [(load, vf1), (load, vf2), (store, vf1)] 5388 // Group the instructions together to emit separate remarks for: 5389 // load (vf1, vf2) 5390 // store (vf1) 5391 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 5392 auto Subset = ArrayRef<InstructionVFPair>(); 5393 do { 5394 if (Subset.empty()) 5395 Subset = Tail.take_front(1); 5396 5397 Instruction *I = Subset.front().first; 5398 5399 // If the next instruction is different, or if there are no other pairs, 5400 // emit a remark for the collated subset. e.g. 5401 // [(load, vf1), (load, vf2))] 5402 // to emit: 5403 // remark: invalid costs for 'load' at VF=(vf, vf2) 5404 if (Subset == Tail || Tail[Subset.size()].first != I) { 5405 std::string OutString; 5406 raw_string_ostream OS(OutString); 5407 assert(!Subset.empty() && "Unexpected empty range"); 5408 OS << "Instruction with invalid costs prevented vectorization at VF=("; 5409 for (auto &Pair : Subset) 5410 OS << (Pair.second == Subset.front().second ? "" : ", ") 5411 << Pair.second; 5412 OS << "):"; 5413 if (auto *CI = dyn_cast<CallInst>(I)) 5414 OS << " call to " << CI->getCalledFunction()->getName(); 5415 else 5416 OS << " " << I->getOpcodeName(); 5417 OS.flush(); 5418 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 5419 Tail = Tail.drop_front(Subset.size()); 5420 Subset = {}; 5421 } else 5422 // Grow the subset by one element 5423 Subset = Tail.take_front(Subset.size() + 1); 5424 } while (!Tail.empty()); 5425 } 5426 5427 if (!EnableCondStoresVectorization && NumPredStores) { 5428 reportVectorizationFailure("There are conditional stores.", 5429 "store that is conditionally executed prevents vectorization", 5430 "ConditionalStore", ORE, TheLoop); 5431 ChosenFactor = ScalarCost; 5432 } 5433 5434 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 5435 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 5436 << "LV: Vectorization seems to be not beneficial, " 5437 << "but was forced by a user.\n"); 5438 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 5439 return ChosenFactor; 5440 } 5441 5442 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5443 const Loop &L, ElementCount VF) const { 5444 // Cross iteration phis such as reductions need special handling and are 5445 // currently unsupported. 5446 if (any_of(L.getHeader()->phis(), 5447 [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); })) 5448 return false; 5449 5450 // Phis with uses outside of the loop require special handling and are 5451 // currently unsupported. 5452 for (auto &Entry : Legal->getInductionVars()) { 5453 // Look for uses of the value of the induction at the last iteration. 5454 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5455 for (User *U : PostInc->users()) 5456 if (!L.contains(cast<Instruction>(U))) 5457 return false; 5458 // Look for uses of penultimate value of the induction. 5459 for (User *U : Entry.first->users()) 5460 if (!L.contains(cast<Instruction>(U))) 5461 return false; 5462 } 5463 5464 // Induction variables that are widened require special handling that is 5465 // currently not supported. 5466 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5467 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5468 this->isProfitableToScalarize(Entry.first, VF)); 5469 })) 5470 return false; 5471 5472 // Epilogue vectorization code has not been auditted to ensure it handles 5473 // non-latch exits properly. It may be fine, but it needs auditted and 5474 // tested. 5475 if (L.getExitingBlock() != L.getLoopLatch()) 5476 return false; 5477 5478 return true; 5479 } 5480 5481 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5482 const ElementCount VF) const { 5483 // FIXME: We need a much better cost-model to take different parameters such 5484 // as register pressure, code size increase and cost of extra branches into 5485 // account. For now we apply a very crude heuristic and only consider loops 5486 // with vectorization factors larger than a certain value. 5487 // We also consider epilogue vectorization unprofitable for targets that don't 5488 // consider interleaving beneficial (eg. MVE). 5489 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5490 return false; 5491 // FIXME: We should consider changing the threshold for scalable 5492 // vectors to take VScaleForTuning into account. 5493 if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF) 5494 return true; 5495 return false; 5496 } 5497 5498 VectorizationFactor 5499 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5500 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5501 VectorizationFactor Result = VectorizationFactor::Disabled(); 5502 if (!EnableEpilogueVectorization) { 5503 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5504 return Result; 5505 } 5506 5507 if (!isScalarEpilogueAllowed()) { 5508 LLVM_DEBUG( 5509 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5510 "allowed.\n";); 5511 return Result; 5512 } 5513 5514 // Not really a cost consideration, but check for unsupported cases here to 5515 // simplify the logic. 5516 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5517 LLVM_DEBUG( 5518 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5519 "not a supported candidate.\n";); 5520 return Result; 5521 } 5522 5523 if (EpilogueVectorizationForceVF > 1) { 5524 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5525 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 5526 if (LVP.hasPlanWithVF(ForcedEC)) 5527 return {ForcedEC, 0}; 5528 else { 5529 LLVM_DEBUG( 5530 dbgs() 5531 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5532 return Result; 5533 } 5534 } 5535 5536 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5537 TheLoop->getHeader()->getParent()->hasMinSize()) { 5538 LLVM_DEBUG( 5539 dbgs() 5540 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5541 return Result; 5542 } 5543 5544 if (!isEpilogueVectorizationProfitable(MainLoopVF)) { 5545 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 5546 "this loop\n"); 5547 return Result; 5548 } 5549 5550 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know 5551 // the main loop handles 8 lanes per iteration. We could still benefit from 5552 // vectorizing the epilogue loop with VF=4. 5553 ElementCount EstimatedRuntimeVF = MainLoopVF; 5554 if (MainLoopVF.isScalable()) { 5555 EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 5556 if (Optional<unsigned> VScale = getVScaleForTuning()) 5557 EstimatedRuntimeVF *= VScale.getValue(); 5558 } 5559 5560 for (auto &NextVF : ProfitableVFs) 5561 if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() && 5562 ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) || 5563 ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) && 5564 (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) && 5565 LVP.hasPlanWithVF(NextVF.Width)) 5566 Result = NextVF; 5567 5568 if (Result != VectorizationFactor::Disabled()) 5569 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5570 << Result.Width << "\n";); 5571 return Result; 5572 } 5573 5574 std::pair<unsigned, unsigned> 5575 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5576 unsigned MinWidth = -1U; 5577 unsigned MaxWidth = 8; 5578 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5579 // For in-loop reductions, no element types are added to ElementTypesInLoop 5580 // if there are no loads/stores in the loop. In this case, check through the 5581 // reduction variables to determine the maximum width. 5582 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) { 5583 // Reset MaxWidth so that we can find the smallest type used by recurrences 5584 // in the loop. 5585 MaxWidth = -1U; 5586 for (auto &PhiDescriptorPair : Legal->getReductionVars()) { 5587 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second; 5588 // When finding the min width used by the recurrence we need to account 5589 // for casts on the input operands of the recurrence. 5590 MaxWidth = std::min<unsigned>( 5591 MaxWidth, std::min<unsigned>( 5592 RdxDesc.getMinWidthCastToRecurrenceTypeInBits(), 5593 RdxDesc.getRecurrenceType()->getScalarSizeInBits())); 5594 } 5595 } else { 5596 for (Type *T : ElementTypesInLoop) { 5597 MinWidth = std::min<unsigned>( 5598 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5599 MaxWidth = std::max<unsigned>( 5600 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5601 } 5602 } 5603 return {MinWidth, MaxWidth}; 5604 } 5605 5606 void LoopVectorizationCostModel::collectElementTypesForWidening() { 5607 ElementTypesInLoop.clear(); 5608 // For each block. 5609 for (BasicBlock *BB : TheLoop->blocks()) { 5610 // For each instruction in the loop. 5611 for (Instruction &I : BB->instructionsWithoutDebug()) { 5612 Type *T = I.getType(); 5613 5614 // Skip ignored values. 5615 if (ValuesToIgnore.count(&I)) 5616 continue; 5617 5618 // Only examine Loads, Stores and PHINodes. 5619 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5620 continue; 5621 5622 // Examine PHI nodes that are reduction variables. Update the type to 5623 // account for the recurrence type. 5624 if (auto *PN = dyn_cast<PHINode>(&I)) { 5625 if (!Legal->isReductionVariable(PN)) 5626 continue; 5627 const RecurrenceDescriptor &RdxDesc = 5628 Legal->getReductionVars().find(PN)->second; 5629 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 5630 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 5631 RdxDesc.getRecurrenceType(), 5632 TargetTransformInfo::ReductionFlags())) 5633 continue; 5634 T = RdxDesc.getRecurrenceType(); 5635 } 5636 5637 // Examine the stored values. 5638 if (auto *ST = dyn_cast<StoreInst>(&I)) 5639 T = ST->getValueOperand()->getType(); 5640 5641 assert(T->isSized() && 5642 "Expected the load/store/recurrence type to be sized"); 5643 5644 ElementTypesInLoop.insert(T); 5645 } 5646 } 5647 } 5648 5649 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 5650 unsigned LoopCost) { 5651 // -- The interleave heuristics -- 5652 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5653 // There are many micro-architectural considerations that we can't predict 5654 // at this level. For example, frontend pressure (on decode or fetch) due to 5655 // code size, or the number and capabilities of the execution ports. 5656 // 5657 // We use the following heuristics to select the interleave count: 5658 // 1. If the code has reductions, then we interleave to break the cross 5659 // iteration dependency. 5660 // 2. If the loop is really small, then we interleave to reduce the loop 5661 // overhead. 5662 // 3. We don't interleave if we think that we will spill registers to memory 5663 // due to the increased register pressure. 5664 5665 if (!isScalarEpilogueAllowed()) 5666 return 1; 5667 5668 // We used the distance for the interleave count. 5669 if (Legal->getMaxSafeDepDistBytes() != -1U) 5670 return 1; 5671 5672 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 5673 const bool HasReductions = !Legal->getReductionVars().empty(); 5674 // Do not interleave loops with a relatively small known or estimated trip 5675 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 5676 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 5677 // because with the above conditions interleaving can expose ILP and break 5678 // cross iteration dependences for reductions. 5679 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 5680 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 5681 return 1; 5682 5683 // If we did not calculate the cost for VF (because the user selected the VF) 5684 // then we calculate the cost of VF here. 5685 if (LoopCost == 0) { 5686 InstructionCost C = expectedCost(VF).first; 5687 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 5688 LoopCost = *C.getValue(); 5689 5690 // Loop body is free and there is no need for interleaving. 5691 if (LoopCost == 0) 5692 return 1; 5693 } 5694 5695 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5696 // We divide by these constants so assume that we have at least one 5697 // instruction that uses at least one register. 5698 for (auto& pair : R.MaxLocalUsers) { 5699 pair.second = std::max(pair.second, 1U); 5700 } 5701 5702 // We calculate the interleave count using the following formula. 5703 // Subtract the number of loop invariants from the number of available 5704 // registers. These registers are used by all of the interleaved instances. 5705 // Next, divide the remaining registers by the number of registers that is 5706 // required by the loop, in order to estimate how many parallel instances 5707 // fit without causing spills. All of this is rounded down if necessary to be 5708 // a power of two. We want power of two interleave count to simplify any 5709 // addressing operations or alignment considerations. 5710 // We also want power of two interleave counts to ensure that the induction 5711 // variable of the vector loop wraps to zero, when tail is folded by masking; 5712 // this currently happens when OptForSize, in which case IC is set to 1 above. 5713 unsigned IC = UINT_MAX; 5714 5715 for (auto& pair : R.MaxLocalUsers) { 5716 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5717 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5718 << " registers of " 5719 << TTI.getRegisterClassName(pair.first) << " register class\n"); 5720 if (VF.isScalar()) { 5721 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5722 TargetNumRegisters = ForceTargetNumScalarRegs; 5723 } else { 5724 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5725 TargetNumRegisters = ForceTargetNumVectorRegs; 5726 } 5727 unsigned MaxLocalUsers = pair.second; 5728 unsigned LoopInvariantRegs = 0; 5729 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 5730 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 5731 5732 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 5733 // Don't count the induction variable as interleaved. 5734 if (EnableIndVarRegisterHeur) { 5735 TmpIC = 5736 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 5737 std::max(1U, (MaxLocalUsers - 1))); 5738 } 5739 5740 IC = std::min(IC, TmpIC); 5741 } 5742 5743 // Clamp the interleave ranges to reasonable counts. 5744 unsigned MaxInterleaveCount = 5745 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 5746 5747 // Check if the user has overridden the max. 5748 if (VF.isScalar()) { 5749 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5750 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5751 } else { 5752 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5753 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5754 } 5755 5756 // If trip count is known or estimated compile time constant, limit the 5757 // interleave count to be less than the trip count divided by VF, provided it 5758 // is at least 1. 5759 // 5760 // For scalable vectors we can't know if interleaving is beneficial. It may 5761 // not be beneficial for small loops if none of the lanes in the second vector 5762 // iterations is enabled. However, for larger loops, there is likely to be a 5763 // similar benefit as for fixed-width vectors. For now, we choose to leave 5764 // the InterleaveCount as if vscale is '1', although if some information about 5765 // the vector is known (e.g. min vector size), we can make a better decision. 5766 if (BestKnownTC) { 5767 MaxInterleaveCount = 5768 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 5769 // Make sure MaxInterleaveCount is greater than 0. 5770 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 5771 } 5772 5773 assert(MaxInterleaveCount > 0 && 5774 "Maximum interleave count must be greater than 0"); 5775 5776 // Clamp the calculated IC to be between the 1 and the max interleave count 5777 // that the target and trip count allows. 5778 if (IC > MaxInterleaveCount) 5779 IC = MaxInterleaveCount; 5780 else 5781 // Make sure IC is greater than 0. 5782 IC = std::max(1u, IC); 5783 5784 assert(IC > 0 && "Interleave count must be greater than 0."); 5785 5786 // Interleave if we vectorized this loop and there is a reduction that could 5787 // benefit from interleaving. 5788 if (VF.isVector() && HasReductions) { 5789 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5790 return IC; 5791 } 5792 5793 // For any scalar loop that either requires runtime checks or predication we 5794 // are better off leaving this to the unroller. Note that if we've already 5795 // vectorized the loop we will have done the runtime check and so interleaving 5796 // won't require further checks. 5797 bool ScalarInterleavingRequiresPredication = 5798 (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) { 5799 return Legal->blockNeedsPredication(BB); 5800 })); 5801 bool ScalarInterleavingRequiresRuntimePointerCheck = 5802 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 5803 5804 // We want to interleave small loops in order to reduce the loop overhead and 5805 // potentially expose ILP opportunities. 5806 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 5807 << "LV: IC is " << IC << '\n' 5808 << "LV: VF is " << VF << '\n'); 5809 const bool AggressivelyInterleaveReductions = 5810 TTI.enableAggressiveInterleaving(HasReductions); 5811 if (!ScalarInterleavingRequiresRuntimePointerCheck && 5812 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) { 5813 // We assume that the cost overhead is 1 and we use the cost model 5814 // to estimate the cost of the loop and interleave until the cost of the 5815 // loop overhead is about 5% of the cost of the loop. 5816 unsigned SmallIC = 5817 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5818 5819 // Interleave until store/load ports (estimated by max interleave count) are 5820 // saturated. 5821 unsigned NumStores = Legal->getNumStores(); 5822 unsigned NumLoads = Legal->getNumLoads(); 5823 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5824 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5825 5826 // There is little point in interleaving for reductions containing selects 5827 // and compares when VF=1 since it may just create more overhead than it's 5828 // worth for loops with small trip counts. This is because we still have to 5829 // do the final reduction after the loop. 5830 bool HasSelectCmpReductions = 5831 HasReductions && 5832 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5833 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5834 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 5835 RdxDesc.getRecurrenceKind()); 5836 }); 5837 if (HasSelectCmpReductions) { 5838 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 5839 return 1; 5840 } 5841 5842 // If we have a scalar reduction (vector reductions are already dealt with 5843 // by this point), we can increase the critical path length if the loop 5844 // we're interleaving is inside another loop. For tree-wise reductions 5845 // set the limit to 2, and for ordered reductions it's best to disable 5846 // interleaving entirely. 5847 if (HasReductions && TheLoop->getLoopDepth() > 1) { 5848 bool HasOrderedReductions = 5849 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5850 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5851 return RdxDesc.isOrdered(); 5852 }); 5853 if (HasOrderedReductions) { 5854 LLVM_DEBUG( 5855 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 5856 return 1; 5857 } 5858 5859 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5860 SmallIC = std::min(SmallIC, F); 5861 StoresIC = std::min(StoresIC, F); 5862 LoadsIC = std::min(LoadsIC, F); 5863 } 5864 5865 if (EnableLoadStoreRuntimeInterleave && 5866 std::max(StoresIC, LoadsIC) > SmallIC) { 5867 LLVM_DEBUG( 5868 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5869 return std::max(StoresIC, LoadsIC); 5870 } 5871 5872 // If there are scalar reductions and TTI has enabled aggressive 5873 // interleaving for reductions, we will interleave to expose ILP. 5874 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 5875 AggressivelyInterleaveReductions) { 5876 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5877 // Interleave no less than SmallIC but not as aggressive as the normal IC 5878 // to satisfy the rare situation when resources are too limited. 5879 return std::max(IC / 2, SmallIC); 5880 } else { 5881 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5882 return SmallIC; 5883 } 5884 } 5885 5886 // Interleave if this is a large loop (small loops are already dealt with by 5887 // this point) that could benefit from interleaving. 5888 if (AggressivelyInterleaveReductions) { 5889 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5890 return IC; 5891 } 5892 5893 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5894 return 1; 5895 } 5896 5897 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5898 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 5899 // This function calculates the register usage by measuring the highest number 5900 // of values that are alive at a single location. Obviously, this is a very 5901 // rough estimation. We scan the loop in a topological order in order and 5902 // assign a number to each instruction. We use RPO to ensure that defs are 5903 // met before their users. We assume that each instruction that has in-loop 5904 // users starts an interval. We record every time that an in-loop value is 5905 // used, so we have a list of the first and last occurrences of each 5906 // instruction. Next, we transpose this data structure into a multi map that 5907 // holds the list of intervals that *end* at a specific location. This multi 5908 // map allows us to perform a linear search. We scan the instructions linearly 5909 // and record each time that a new interval starts, by placing it in a set. 5910 // If we find this value in the multi-map then we remove it from the set. 5911 // The max register usage is the maximum size of the set. 5912 // We also search for instructions that are defined outside the loop, but are 5913 // used inside the loop. We need this number separately from the max-interval 5914 // usage number because when we unroll, loop-invariant values do not take 5915 // more register. 5916 LoopBlocksDFS DFS(TheLoop); 5917 DFS.perform(LI); 5918 5919 RegisterUsage RU; 5920 5921 // Each 'key' in the map opens a new interval. The values 5922 // of the map are the index of the 'last seen' usage of the 5923 // instruction that is the key. 5924 using IntervalMap = DenseMap<Instruction *, unsigned>; 5925 5926 // Maps instruction to its index. 5927 SmallVector<Instruction *, 64> IdxToInstr; 5928 // Marks the end of each interval. 5929 IntervalMap EndPoint; 5930 // Saves the list of instruction indices that are used in the loop. 5931 SmallPtrSet<Instruction *, 8> Ends; 5932 // Saves the list of values that are used in the loop but are 5933 // defined outside the loop, such as arguments and constants. 5934 SmallPtrSet<Value *, 8> LoopInvariants; 5935 5936 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5937 for (Instruction &I : BB->instructionsWithoutDebug()) { 5938 IdxToInstr.push_back(&I); 5939 5940 // Save the end location of each USE. 5941 for (Value *U : I.operands()) { 5942 auto *Instr = dyn_cast<Instruction>(U); 5943 5944 // Ignore non-instruction values such as arguments, constants, etc. 5945 if (!Instr) 5946 continue; 5947 5948 // If this instruction is outside the loop then record it and continue. 5949 if (!TheLoop->contains(Instr)) { 5950 LoopInvariants.insert(Instr); 5951 continue; 5952 } 5953 5954 // Overwrite previous end points. 5955 EndPoint[Instr] = IdxToInstr.size(); 5956 Ends.insert(Instr); 5957 } 5958 } 5959 } 5960 5961 // Saves the list of intervals that end with the index in 'key'. 5962 using InstrList = SmallVector<Instruction *, 2>; 5963 DenseMap<unsigned, InstrList> TransposeEnds; 5964 5965 // Transpose the EndPoints to a list of values that end at each index. 5966 for (auto &Interval : EndPoint) 5967 TransposeEnds[Interval.second].push_back(Interval.first); 5968 5969 SmallPtrSet<Instruction *, 8> OpenIntervals; 5970 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5971 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 5972 5973 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5974 5975 // A lambda that gets the register usage for the given type and VF. 5976 const auto &TTICapture = TTI; 5977 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 5978 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 5979 return 0; 5980 InstructionCost::CostType RegUsage = 5981 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 5982 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 5983 "Nonsensical values for register usage."); 5984 return RegUsage; 5985 }; 5986 5987 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 5988 Instruction *I = IdxToInstr[i]; 5989 5990 // Remove all of the instructions that end at this location. 5991 InstrList &List = TransposeEnds[i]; 5992 for (Instruction *ToRemove : List) 5993 OpenIntervals.erase(ToRemove); 5994 5995 // Ignore instructions that are never used within the loop. 5996 if (!Ends.count(I)) 5997 continue; 5998 5999 // Skip ignored values. 6000 if (ValuesToIgnore.count(I)) 6001 continue; 6002 6003 // For each VF find the maximum usage of registers. 6004 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6005 // Count the number of live intervals. 6006 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6007 6008 if (VFs[j].isScalar()) { 6009 for (auto Inst : OpenIntervals) { 6010 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6011 if (RegUsage.find(ClassID) == RegUsage.end()) 6012 RegUsage[ClassID] = 1; 6013 else 6014 RegUsage[ClassID] += 1; 6015 } 6016 } else { 6017 collectUniformsAndScalars(VFs[j]); 6018 for (auto Inst : OpenIntervals) { 6019 // Skip ignored values for VF > 1. 6020 if (VecValuesToIgnore.count(Inst)) 6021 continue; 6022 if (isScalarAfterVectorization(Inst, VFs[j])) { 6023 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6024 if (RegUsage.find(ClassID) == RegUsage.end()) 6025 RegUsage[ClassID] = 1; 6026 else 6027 RegUsage[ClassID] += 1; 6028 } else { 6029 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6030 if (RegUsage.find(ClassID) == RegUsage.end()) 6031 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6032 else 6033 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6034 } 6035 } 6036 } 6037 6038 for (auto& pair : RegUsage) { 6039 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6040 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6041 else 6042 MaxUsages[j][pair.first] = pair.second; 6043 } 6044 } 6045 6046 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6047 << OpenIntervals.size() << '\n'); 6048 6049 // Add the current instruction to the list of open intervals. 6050 OpenIntervals.insert(I); 6051 } 6052 6053 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6054 SmallMapVector<unsigned, unsigned, 4> Invariant; 6055 6056 for (auto Inst : LoopInvariants) { 6057 unsigned Usage = 6058 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6059 unsigned ClassID = 6060 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6061 if (Invariant.find(ClassID) == Invariant.end()) 6062 Invariant[ClassID] = Usage; 6063 else 6064 Invariant[ClassID] += Usage; 6065 } 6066 6067 LLVM_DEBUG({ 6068 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6069 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6070 << " item\n"; 6071 for (const auto &pair : MaxUsages[i]) { 6072 dbgs() << "LV(REG): RegisterClass: " 6073 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6074 << " registers\n"; 6075 } 6076 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6077 << " item\n"; 6078 for (const auto &pair : Invariant) { 6079 dbgs() << "LV(REG): RegisterClass: " 6080 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6081 << " registers\n"; 6082 } 6083 }); 6084 6085 RU.LoopInvariantRegs = Invariant; 6086 RU.MaxLocalUsers = MaxUsages[i]; 6087 RUs[i] = RU; 6088 } 6089 6090 return RUs; 6091 } 6092 6093 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I, 6094 ElementCount VF) { 6095 // TODO: Cost model for emulated masked load/store is completely 6096 // broken. This hack guides the cost model to use an artificially 6097 // high enough value to practically disable vectorization with such 6098 // operations, except where previously deployed legality hack allowed 6099 // using very low cost values. This is to avoid regressions coming simply 6100 // from moving "masked load/store" check from legality to cost model. 6101 // Masked Load/Gather emulation was previously never allowed. 6102 // Limited number of Masked Store/Scatter emulation was allowed. 6103 assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction"); 6104 return isa<LoadInst>(I) || 6105 (isa<StoreInst>(I) && 6106 NumPredStores > NumberOfStoresToPredicate); 6107 } 6108 6109 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6110 // If we aren't vectorizing the loop, or if we've already collected the 6111 // instructions to scalarize, there's nothing to do. Collection may already 6112 // have occurred if we have a user-selected VF and are now computing the 6113 // expected cost for interleaving. 6114 if (VF.isScalar() || VF.isZero() || 6115 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6116 return; 6117 6118 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6119 // not profitable to scalarize any instructions, the presence of VF in the 6120 // map will indicate that we've analyzed it already. 6121 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6122 6123 // Find all the instructions that are scalar with predication in the loop and 6124 // determine if it would be better to not if-convert the blocks they are in. 6125 // If so, we also record the instructions to scalarize. 6126 for (BasicBlock *BB : TheLoop->blocks()) { 6127 if (!blockNeedsPredicationForAnyReason(BB)) 6128 continue; 6129 for (Instruction &I : *BB) 6130 if (isScalarWithPredication(&I, VF)) { 6131 ScalarCostsTy ScalarCosts; 6132 // Do not apply discount if scalable, because that would lead to 6133 // invalid scalarization costs. 6134 // Do not apply discount logic if hacked cost is needed 6135 // for emulated masked memrefs. 6136 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) && 6137 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6138 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6139 // Remember that BB will remain after vectorization. 6140 PredicatedBBsAfterVectorization.insert(BB); 6141 } 6142 } 6143 } 6144 6145 int LoopVectorizationCostModel::computePredInstDiscount( 6146 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6147 assert(!isUniformAfterVectorization(PredInst, VF) && 6148 "Instruction marked uniform-after-vectorization will be predicated"); 6149 6150 // Initialize the discount to zero, meaning that the scalar version and the 6151 // vector version cost the same. 6152 InstructionCost Discount = 0; 6153 6154 // Holds instructions to analyze. The instructions we visit are mapped in 6155 // ScalarCosts. Those instructions are the ones that would be scalarized if 6156 // we find that the scalar version costs less. 6157 SmallVector<Instruction *, 8> Worklist; 6158 6159 // Returns true if the given instruction can be scalarized. 6160 auto canBeScalarized = [&](Instruction *I) -> bool { 6161 // We only attempt to scalarize instructions forming a single-use chain 6162 // from the original predicated block that would otherwise be vectorized. 6163 // Although not strictly necessary, we give up on instructions we know will 6164 // already be scalar to avoid traversing chains that are unlikely to be 6165 // beneficial. 6166 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6167 isScalarAfterVectorization(I, VF)) 6168 return false; 6169 6170 // If the instruction is scalar with predication, it will be analyzed 6171 // separately. We ignore it within the context of PredInst. 6172 if (isScalarWithPredication(I, VF)) 6173 return false; 6174 6175 // If any of the instruction's operands are uniform after vectorization, 6176 // the instruction cannot be scalarized. This prevents, for example, a 6177 // masked load from being scalarized. 6178 // 6179 // We assume we will only emit a value for lane zero of an instruction 6180 // marked uniform after vectorization, rather than VF identical values. 6181 // Thus, if we scalarize an instruction that uses a uniform, we would 6182 // create uses of values corresponding to the lanes we aren't emitting code 6183 // for. This behavior can be changed by allowing getScalarValue to clone 6184 // the lane zero values for uniforms rather than asserting. 6185 for (Use &U : I->operands()) 6186 if (auto *J = dyn_cast<Instruction>(U.get())) 6187 if (isUniformAfterVectorization(J, VF)) 6188 return false; 6189 6190 // Otherwise, we can scalarize the instruction. 6191 return true; 6192 }; 6193 6194 // Compute the expected cost discount from scalarizing the entire expression 6195 // feeding the predicated instruction. We currently only consider expressions 6196 // that are single-use instruction chains. 6197 Worklist.push_back(PredInst); 6198 while (!Worklist.empty()) { 6199 Instruction *I = Worklist.pop_back_val(); 6200 6201 // If we've already analyzed the instruction, there's nothing to do. 6202 if (ScalarCosts.find(I) != ScalarCosts.end()) 6203 continue; 6204 6205 // Compute the cost of the vector instruction. Note that this cost already 6206 // includes the scalarization overhead of the predicated instruction. 6207 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6208 6209 // Compute the cost of the scalarized instruction. This cost is the cost of 6210 // the instruction as if it wasn't if-converted and instead remained in the 6211 // predicated block. We will scale this cost by block probability after 6212 // computing the scalarization overhead. 6213 InstructionCost ScalarCost = 6214 VF.getFixedValue() * 6215 getInstructionCost(I, ElementCount::getFixed(1)).first; 6216 6217 // Compute the scalarization overhead of needed insertelement instructions 6218 // and phi nodes. 6219 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) { 6220 ScalarCost += TTI.getScalarizationOverhead( 6221 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6222 APInt::getAllOnes(VF.getFixedValue()), true, false); 6223 ScalarCost += 6224 VF.getFixedValue() * 6225 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6226 } 6227 6228 // Compute the scalarization overhead of needed extractelement 6229 // instructions. For each of the instruction's operands, if the operand can 6230 // be scalarized, add it to the worklist; otherwise, account for the 6231 // overhead. 6232 for (Use &U : I->operands()) 6233 if (auto *J = dyn_cast<Instruction>(U.get())) { 6234 assert(VectorType::isValidElementType(J->getType()) && 6235 "Instruction has non-scalar type"); 6236 if (canBeScalarized(J)) 6237 Worklist.push_back(J); 6238 else if (needsExtract(J, VF)) { 6239 ScalarCost += TTI.getScalarizationOverhead( 6240 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6241 APInt::getAllOnes(VF.getFixedValue()), false, true); 6242 } 6243 } 6244 6245 // Scale the total scalar cost by block probability. 6246 ScalarCost /= getReciprocalPredBlockProb(); 6247 6248 // Compute the discount. A non-negative discount means the vector version 6249 // of the instruction costs more, and scalarizing would be beneficial. 6250 Discount += VectorCost - ScalarCost; 6251 ScalarCosts[I] = ScalarCost; 6252 } 6253 6254 return *Discount.getValue(); 6255 } 6256 6257 LoopVectorizationCostModel::VectorizationCostTy 6258 LoopVectorizationCostModel::expectedCost( 6259 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6260 VectorizationCostTy Cost; 6261 6262 // For each block. 6263 for (BasicBlock *BB : TheLoop->blocks()) { 6264 VectorizationCostTy BlockCost; 6265 6266 // For each instruction in the old loop. 6267 for (Instruction &I : BB->instructionsWithoutDebug()) { 6268 // Skip ignored values. 6269 if (ValuesToIgnore.count(&I) || 6270 (VF.isVector() && VecValuesToIgnore.count(&I))) 6271 continue; 6272 6273 VectorizationCostTy C = getInstructionCost(&I, VF); 6274 6275 // Check if we should override the cost. 6276 if (C.first.isValid() && 6277 ForceTargetInstructionCost.getNumOccurrences() > 0) 6278 C.first = InstructionCost(ForceTargetInstructionCost); 6279 6280 // Keep a list of instructions with invalid costs. 6281 if (Invalid && !C.first.isValid()) 6282 Invalid->emplace_back(&I, VF); 6283 6284 BlockCost.first += C.first; 6285 BlockCost.second |= C.second; 6286 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6287 << " for VF " << VF << " For instruction: " << I 6288 << '\n'); 6289 } 6290 6291 // If we are vectorizing a predicated block, it will have been 6292 // if-converted. This means that the block's instructions (aside from 6293 // stores and instructions that may divide by zero) will now be 6294 // unconditionally executed. For the scalar case, we may not always execute 6295 // the predicated block, if it is an if-else block. Thus, scale the block's 6296 // cost by the probability of executing it. blockNeedsPredication from 6297 // Legal is used so as to not include all blocks in tail folded loops. 6298 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6299 BlockCost.first /= getReciprocalPredBlockProb(); 6300 6301 Cost.first += BlockCost.first; 6302 Cost.second |= BlockCost.second; 6303 } 6304 6305 return Cost; 6306 } 6307 6308 /// Gets Address Access SCEV after verifying that the access pattern 6309 /// is loop invariant except the induction variable dependence. 6310 /// 6311 /// This SCEV can be sent to the Target in order to estimate the address 6312 /// calculation cost. 6313 static const SCEV *getAddressAccessSCEV( 6314 Value *Ptr, 6315 LoopVectorizationLegality *Legal, 6316 PredicatedScalarEvolution &PSE, 6317 const Loop *TheLoop) { 6318 6319 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6320 if (!Gep) 6321 return nullptr; 6322 6323 // We are looking for a gep with all loop invariant indices except for one 6324 // which should be an induction variable. 6325 auto SE = PSE.getSE(); 6326 unsigned NumOperands = Gep->getNumOperands(); 6327 for (unsigned i = 1; i < NumOperands; ++i) { 6328 Value *Opd = Gep->getOperand(i); 6329 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6330 !Legal->isInductionVariable(Opd)) 6331 return nullptr; 6332 } 6333 6334 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6335 return PSE.getSCEV(Ptr); 6336 } 6337 6338 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6339 return Legal->hasStride(I->getOperand(0)) || 6340 Legal->hasStride(I->getOperand(1)); 6341 } 6342 6343 InstructionCost 6344 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6345 ElementCount VF) { 6346 assert(VF.isVector() && 6347 "Scalarization cost of instruction implies vectorization."); 6348 if (VF.isScalable()) 6349 return InstructionCost::getInvalid(); 6350 6351 Type *ValTy = getLoadStoreType(I); 6352 auto SE = PSE.getSE(); 6353 6354 unsigned AS = getLoadStoreAddressSpace(I); 6355 Value *Ptr = getLoadStorePointerOperand(I); 6356 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6357 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost` 6358 // that it is being called from this specific place. 6359 6360 // Figure out whether the access is strided and get the stride value 6361 // if it's known in compile time 6362 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6363 6364 // Get the cost of the scalar memory instruction and address computation. 6365 InstructionCost Cost = 6366 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6367 6368 // Don't pass *I here, since it is scalar but will actually be part of a 6369 // vectorized loop where the user of it is a vectorized instruction. 6370 const Align Alignment = getLoadStoreAlignment(I); 6371 Cost += VF.getKnownMinValue() * 6372 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6373 AS, TTI::TCK_RecipThroughput); 6374 6375 // Get the overhead of the extractelement and insertelement instructions 6376 // we might create due to scalarization. 6377 Cost += getScalarizationOverhead(I, VF); 6378 6379 // If we have a predicated load/store, it will need extra i1 extracts and 6380 // conditional branches, but may not be executed for each vector lane. Scale 6381 // the cost by the probability of executing the predicated block. 6382 if (isPredicatedInst(I, VF)) { 6383 Cost /= getReciprocalPredBlockProb(); 6384 6385 // Add the cost of an i1 extract and a branch 6386 auto *Vec_i1Ty = 6387 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6388 Cost += TTI.getScalarizationOverhead( 6389 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 6390 /*Insert=*/false, /*Extract=*/true); 6391 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6392 6393 if (useEmulatedMaskMemRefHack(I, VF)) 6394 // Artificially setting to a high enough value to practically disable 6395 // vectorization with such operations. 6396 Cost = 3000000; 6397 } 6398 6399 return Cost; 6400 } 6401 6402 InstructionCost 6403 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6404 ElementCount VF) { 6405 Type *ValTy = getLoadStoreType(I); 6406 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6407 Value *Ptr = getLoadStorePointerOperand(I); 6408 unsigned AS = getLoadStoreAddressSpace(I); 6409 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 6410 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6411 6412 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6413 "Stride should be 1 or -1 for consecutive memory access"); 6414 const Align Alignment = getLoadStoreAlignment(I); 6415 InstructionCost Cost = 0; 6416 if (Legal->isMaskRequired(I)) 6417 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6418 CostKind); 6419 else 6420 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6421 CostKind, I); 6422 6423 bool Reverse = ConsecutiveStride < 0; 6424 if (Reverse) 6425 Cost += 6426 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6427 return Cost; 6428 } 6429 6430 InstructionCost 6431 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6432 ElementCount VF) { 6433 assert(Legal->isUniformMemOp(*I)); 6434 6435 Type *ValTy = getLoadStoreType(I); 6436 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6437 const Align Alignment = getLoadStoreAlignment(I); 6438 unsigned AS = getLoadStoreAddressSpace(I); 6439 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6440 if (isa<LoadInst>(I)) { 6441 return TTI.getAddressComputationCost(ValTy) + 6442 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6443 CostKind) + 6444 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6445 } 6446 StoreInst *SI = cast<StoreInst>(I); 6447 6448 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6449 return TTI.getAddressComputationCost(ValTy) + 6450 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6451 CostKind) + 6452 (isLoopInvariantStoreValue 6453 ? 0 6454 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6455 VF.getKnownMinValue() - 1)); 6456 } 6457 6458 InstructionCost 6459 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6460 ElementCount VF) { 6461 Type *ValTy = getLoadStoreType(I); 6462 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6463 const Align Alignment = getLoadStoreAlignment(I); 6464 const Value *Ptr = getLoadStorePointerOperand(I); 6465 6466 return TTI.getAddressComputationCost(VectorTy) + 6467 TTI.getGatherScatterOpCost( 6468 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6469 TargetTransformInfo::TCK_RecipThroughput, I); 6470 } 6471 6472 InstructionCost 6473 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6474 ElementCount VF) { 6475 // TODO: Once we have support for interleaving with scalable vectors 6476 // we can calculate the cost properly here. 6477 if (VF.isScalable()) 6478 return InstructionCost::getInvalid(); 6479 6480 Type *ValTy = getLoadStoreType(I); 6481 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6482 unsigned AS = getLoadStoreAddressSpace(I); 6483 6484 auto Group = getInterleavedAccessGroup(I); 6485 assert(Group && "Fail to get an interleaved access group."); 6486 6487 unsigned InterleaveFactor = Group->getFactor(); 6488 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6489 6490 // Holds the indices of existing members in the interleaved group. 6491 SmallVector<unsigned, 4> Indices; 6492 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 6493 if (Group->getMember(IF)) 6494 Indices.push_back(IF); 6495 6496 // Calculate the cost of the whole interleaved group. 6497 bool UseMaskForGaps = 6498 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 6499 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 6500 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6501 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6502 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6503 6504 if (Group->isReverse()) { 6505 // TODO: Add support for reversed masked interleaved access. 6506 assert(!Legal->isMaskRequired(I) && 6507 "Reverse masked interleaved access not supported."); 6508 Cost += 6509 Group->getNumMembers() * 6510 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6511 } 6512 return Cost; 6513 } 6514 6515 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 6516 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6517 using namespace llvm::PatternMatch; 6518 // Early exit for no inloop reductions 6519 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6520 return None; 6521 auto *VectorTy = cast<VectorType>(Ty); 6522 6523 // We are looking for a pattern of, and finding the minimal acceptable cost: 6524 // reduce(mul(ext(A), ext(B))) or 6525 // reduce(mul(A, B)) or 6526 // reduce(ext(A)) or 6527 // reduce(A). 6528 // The basic idea is that we walk down the tree to do that, finding the root 6529 // reduction instruction in InLoopReductionImmediateChains. From there we find 6530 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6531 // of the components. If the reduction cost is lower then we return it for the 6532 // reduction instruction and 0 for the other instructions in the pattern. If 6533 // it is not we return an invalid cost specifying the orignal cost method 6534 // should be used. 6535 Instruction *RetI = I; 6536 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 6537 if (!RetI->hasOneUser()) 6538 return None; 6539 RetI = RetI->user_back(); 6540 } 6541 if (match(RetI, m_Mul(m_Value(), m_Value())) && 6542 RetI->user_back()->getOpcode() == Instruction::Add) { 6543 if (!RetI->hasOneUser()) 6544 return None; 6545 RetI = RetI->user_back(); 6546 } 6547 6548 // Test if the found instruction is a reduction, and if not return an invalid 6549 // cost specifying the parent to use the original cost modelling. 6550 if (!InLoopReductionImmediateChains.count(RetI)) 6551 return None; 6552 6553 // Find the reduction this chain is a part of and calculate the basic cost of 6554 // the reduction on its own. 6555 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6556 Instruction *ReductionPhi = LastChain; 6557 while (!isa<PHINode>(ReductionPhi)) 6558 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6559 6560 const RecurrenceDescriptor &RdxDesc = 6561 Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second; 6562 6563 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 6564 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 6565 6566 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a 6567 // normal fmul instruction to the cost of the fadd reduction. 6568 if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd) 6569 BaseCost += 6570 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind); 6571 6572 // If we're using ordered reductions then we can just return the base cost 6573 // here, since getArithmeticReductionCost calculates the full ordered 6574 // reduction cost when FP reassociation is not allowed. 6575 if (useOrderedReductions(RdxDesc)) 6576 return BaseCost; 6577 6578 // Get the operand that was not the reduction chain and match it to one of the 6579 // patterns, returning the better cost if it is found. 6580 Instruction *RedOp = RetI->getOperand(1) == LastChain 6581 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6582 : dyn_cast<Instruction>(RetI->getOperand(1)); 6583 6584 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6585 6586 Instruction *Op0, *Op1; 6587 if (RedOp && 6588 match(RedOp, 6589 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 6590 match(Op0, m_ZExtOrSExt(m_Value())) && 6591 Op0->getOpcode() == Op1->getOpcode() && 6592 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6593 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 6594 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 6595 6596 // Matched reduce(ext(mul(ext(A), ext(B))) 6597 // Note that the extend opcodes need to all match, or if A==B they will have 6598 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 6599 // which is equally fine. 6600 bool IsUnsigned = isa<ZExtInst>(Op0); 6601 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6602 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 6603 6604 InstructionCost ExtCost = 6605 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 6606 TTI::CastContextHint::None, CostKind, Op0); 6607 InstructionCost MulCost = 6608 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 6609 InstructionCost Ext2Cost = 6610 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 6611 TTI::CastContextHint::None, CostKind, RedOp); 6612 6613 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6614 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6615 CostKind); 6616 6617 if (RedCost.isValid() && 6618 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 6619 return I == RetI ? RedCost : 0; 6620 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 6621 !TheLoop->isLoopInvariant(RedOp)) { 6622 // Matched reduce(ext(A)) 6623 bool IsUnsigned = isa<ZExtInst>(RedOp); 6624 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6625 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6626 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6627 CostKind); 6628 6629 InstructionCost ExtCost = 6630 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6631 TTI::CastContextHint::None, CostKind, RedOp); 6632 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6633 return I == RetI ? RedCost : 0; 6634 } else if (RedOp && 6635 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 6636 if (match(Op0, m_ZExtOrSExt(m_Value())) && 6637 Op0->getOpcode() == Op1->getOpcode() && 6638 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 6639 bool IsUnsigned = isa<ZExtInst>(Op0); 6640 Type *Op0Ty = Op0->getOperand(0)->getType(); 6641 Type *Op1Ty = Op1->getOperand(0)->getType(); 6642 Type *LargestOpTy = 6643 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty 6644 : Op0Ty; 6645 auto *ExtType = VectorType::get(LargestOpTy, VectorTy); 6646 6647 // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of 6648 // different sizes. We take the largest type as the ext to reduce, and add 6649 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))). 6650 InstructionCost ExtCost0 = TTI.getCastInstrCost( 6651 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy), 6652 TTI::CastContextHint::None, CostKind, Op0); 6653 InstructionCost ExtCost1 = TTI.getCastInstrCost( 6654 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy), 6655 TTI::CastContextHint::None, CostKind, Op1); 6656 InstructionCost MulCost = 6657 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6658 6659 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6660 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6661 CostKind); 6662 InstructionCost ExtraExtCost = 0; 6663 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) { 6664 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1; 6665 ExtraExtCost = TTI.getCastInstrCost( 6666 ExtraExtOp->getOpcode(), ExtType, 6667 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy), 6668 TTI::CastContextHint::None, CostKind, ExtraExtOp); 6669 } 6670 6671 if (RedCost.isValid() && 6672 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost)) 6673 return I == RetI ? RedCost : 0; 6674 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 6675 // Matched reduce(mul()) 6676 InstructionCost MulCost = 6677 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6678 6679 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6680 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 6681 CostKind); 6682 6683 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 6684 return I == RetI ? RedCost : 0; 6685 } 6686 } 6687 6688 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 6689 } 6690 6691 InstructionCost 6692 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6693 ElementCount VF) { 6694 // Calculate scalar cost only. Vectorization cost should be ready at this 6695 // moment. 6696 if (VF.isScalar()) { 6697 Type *ValTy = getLoadStoreType(I); 6698 const Align Alignment = getLoadStoreAlignment(I); 6699 unsigned AS = getLoadStoreAddressSpace(I); 6700 6701 return TTI.getAddressComputationCost(ValTy) + 6702 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 6703 TTI::TCK_RecipThroughput, I); 6704 } 6705 return getWideningCost(I, VF); 6706 } 6707 6708 LoopVectorizationCostModel::VectorizationCostTy 6709 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6710 ElementCount VF) { 6711 // If we know that this instruction will remain uniform, check the cost of 6712 // the scalar version. 6713 if (isUniformAfterVectorization(I, VF)) 6714 VF = ElementCount::getFixed(1); 6715 6716 if (VF.isVector() && isProfitableToScalarize(I, VF)) 6717 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6718 6719 // Forced scalars do not have any scalarization overhead. 6720 auto ForcedScalar = ForcedScalars.find(VF); 6721 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 6722 auto InstSet = ForcedScalar->second; 6723 if (InstSet.count(I)) 6724 return VectorizationCostTy( 6725 (getInstructionCost(I, ElementCount::getFixed(1)).first * 6726 VF.getKnownMinValue()), 6727 false); 6728 } 6729 6730 Type *VectorTy; 6731 InstructionCost C = getInstructionCost(I, VF, VectorTy); 6732 6733 bool TypeNotScalarized = false; 6734 if (VF.isVector() && VectorTy->isVectorTy()) { 6735 unsigned NumParts = TTI.getNumberOfParts(VectorTy); 6736 if (NumParts) 6737 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 6738 else 6739 C = InstructionCost::getInvalid(); 6740 } 6741 return VectorizationCostTy(C, TypeNotScalarized); 6742 } 6743 6744 InstructionCost 6745 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6746 ElementCount VF) const { 6747 6748 // There is no mechanism yet to create a scalable scalarization loop, 6749 // so this is currently Invalid. 6750 if (VF.isScalable()) 6751 return InstructionCost::getInvalid(); 6752 6753 if (VF.isScalar()) 6754 return 0; 6755 6756 InstructionCost Cost = 0; 6757 Type *RetTy = ToVectorTy(I->getType(), VF); 6758 if (!RetTy->isVoidTy() && 6759 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 6760 Cost += TTI.getScalarizationOverhead( 6761 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 6762 false); 6763 6764 // Some targets keep addresses scalar. 6765 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 6766 return Cost; 6767 6768 // Some targets support efficient element stores. 6769 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 6770 return Cost; 6771 6772 // Collect operands to consider. 6773 CallInst *CI = dyn_cast<CallInst>(I); 6774 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 6775 6776 // Skip operands that do not require extraction/scalarization and do not incur 6777 // any overhead. 6778 SmallVector<Type *> Tys; 6779 for (auto *V : filterExtractingOperands(Ops, VF)) 6780 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 6781 return Cost + TTI.getOperandsScalarizationOverhead( 6782 filterExtractingOperands(Ops, VF), Tys); 6783 } 6784 6785 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 6786 if (VF.isScalar()) 6787 return; 6788 NumPredStores = 0; 6789 for (BasicBlock *BB : TheLoop->blocks()) { 6790 // For each instruction in the old loop. 6791 for (Instruction &I : *BB) { 6792 Value *Ptr = getLoadStorePointerOperand(&I); 6793 if (!Ptr) 6794 continue; 6795 6796 // TODO: We should generate better code and update the cost model for 6797 // predicated uniform stores. Today they are treated as any other 6798 // predicated store (see added test cases in 6799 // invariant-store-vectorization.ll). 6800 if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF)) 6801 NumPredStores++; 6802 6803 if (Legal->isUniformMemOp(I)) { 6804 // TODO: Avoid replicating loads and stores instead of 6805 // relying on instcombine to remove them. 6806 // Load: Scalar load + broadcast 6807 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 6808 InstructionCost Cost; 6809 if (isa<StoreInst>(&I) && VF.isScalable() && 6810 isLegalGatherOrScatter(&I, VF)) { 6811 Cost = getGatherScatterCost(&I, VF); 6812 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 6813 } else { 6814 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 6815 "Cannot yet scalarize uniform stores"); 6816 Cost = getUniformMemOpCost(&I, VF); 6817 setWideningDecision(&I, VF, CM_Scalarize, Cost); 6818 } 6819 continue; 6820 } 6821 6822 // We assume that widening is the best solution when possible. 6823 if (memoryInstructionCanBeWidened(&I, VF)) { 6824 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 6825 int ConsecutiveStride = Legal->isConsecutivePtr( 6826 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 6827 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6828 "Expected consecutive stride."); 6829 InstWidening Decision = 6830 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 6831 setWideningDecision(&I, VF, Decision, Cost); 6832 continue; 6833 } 6834 6835 // Choose between Interleaving, Gather/Scatter or Scalarization. 6836 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 6837 unsigned NumAccesses = 1; 6838 if (isAccessInterleaved(&I)) { 6839 auto Group = getInterleavedAccessGroup(&I); 6840 assert(Group && "Fail to get an interleaved access group."); 6841 6842 // Make one decision for the whole group. 6843 if (getWideningDecision(&I, VF) != CM_Unknown) 6844 continue; 6845 6846 NumAccesses = Group->getNumMembers(); 6847 if (interleavedAccessCanBeWidened(&I, VF)) 6848 InterleaveCost = getInterleaveGroupCost(&I, VF); 6849 } 6850 6851 InstructionCost GatherScatterCost = 6852 isLegalGatherOrScatter(&I, VF) 6853 ? getGatherScatterCost(&I, VF) * NumAccesses 6854 : InstructionCost::getInvalid(); 6855 6856 InstructionCost ScalarizationCost = 6857 getMemInstScalarizationCost(&I, VF) * NumAccesses; 6858 6859 // Choose better solution for the current VF, 6860 // write down this decision and use it during vectorization. 6861 InstructionCost Cost; 6862 InstWidening Decision; 6863 if (InterleaveCost <= GatherScatterCost && 6864 InterleaveCost < ScalarizationCost) { 6865 Decision = CM_Interleave; 6866 Cost = InterleaveCost; 6867 } else if (GatherScatterCost < ScalarizationCost) { 6868 Decision = CM_GatherScatter; 6869 Cost = GatherScatterCost; 6870 } else { 6871 Decision = CM_Scalarize; 6872 Cost = ScalarizationCost; 6873 } 6874 // If the instructions belongs to an interleave group, the whole group 6875 // receives the same decision. The whole group receives the cost, but 6876 // the cost will actually be assigned to one instruction. 6877 if (auto Group = getInterleavedAccessGroup(&I)) 6878 setWideningDecision(Group, VF, Decision, Cost); 6879 else 6880 setWideningDecision(&I, VF, Decision, Cost); 6881 } 6882 } 6883 6884 // Make sure that any load of address and any other address computation 6885 // remains scalar unless there is gather/scatter support. This avoids 6886 // inevitable extracts into address registers, and also has the benefit of 6887 // activating LSR more, since that pass can't optimize vectorized 6888 // addresses. 6889 if (TTI.prefersVectorizedAddressing()) 6890 return; 6891 6892 // Start with all scalar pointer uses. 6893 SmallPtrSet<Instruction *, 8> AddrDefs; 6894 for (BasicBlock *BB : TheLoop->blocks()) 6895 for (Instruction &I : *BB) { 6896 Instruction *PtrDef = 6897 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 6898 if (PtrDef && TheLoop->contains(PtrDef) && 6899 getWideningDecision(&I, VF) != CM_GatherScatter) 6900 AddrDefs.insert(PtrDef); 6901 } 6902 6903 // Add all instructions used to generate the addresses. 6904 SmallVector<Instruction *, 4> Worklist; 6905 append_range(Worklist, AddrDefs); 6906 while (!Worklist.empty()) { 6907 Instruction *I = Worklist.pop_back_val(); 6908 for (auto &Op : I->operands()) 6909 if (auto *InstOp = dyn_cast<Instruction>(Op)) 6910 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 6911 AddrDefs.insert(InstOp).second) 6912 Worklist.push_back(InstOp); 6913 } 6914 6915 for (auto *I : AddrDefs) { 6916 if (isa<LoadInst>(I)) { 6917 // Setting the desired widening decision should ideally be handled in 6918 // by cost functions, but since this involves the task of finding out 6919 // if the loaded register is involved in an address computation, it is 6920 // instead changed here when we know this is the case. 6921 InstWidening Decision = getWideningDecision(I, VF); 6922 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 6923 // Scalarize a widened load of address. 6924 setWideningDecision( 6925 I, VF, CM_Scalarize, 6926 (VF.getKnownMinValue() * 6927 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 6928 else if (auto Group = getInterleavedAccessGroup(I)) { 6929 // Scalarize an interleave group of address loads. 6930 for (unsigned I = 0; I < Group->getFactor(); ++I) { 6931 if (Instruction *Member = Group->getMember(I)) 6932 setWideningDecision( 6933 Member, VF, CM_Scalarize, 6934 (VF.getKnownMinValue() * 6935 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 6936 } 6937 } 6938 } else 6939 // Make sure I gets scalarized and a cost estimate without 6940 // scalarization overhead. 6941 ForcedScalars[VF].insert(I); 6942 } 6943 } 6944 6945 InstructionCost 6946 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 6947 Type *&VectorTy) { 6948 Type *RetTy = I->getType(); 6949 if (canTruncateToMinimalBitwidth(I, VF)) 6950 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6951 auto SE = PSE.getSE(); 6952 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6953 6954 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 6955 ElementCount VF) -> bool { 6956 if (VF.isScalar()) 6957 return true; 6958 6959 auto Scalarized = InstsToScalarize.find(VF); 6960 assert(Scalarized != InstsToScalarize.end() && 6961 "VF not yet analyzed for scalarization profitability"); 6962 return !Scalarized->second.count(I) && 6963 llvm::all_of(I->users(), [&](User *U) { 6964 auto *UI = cast<Instruction>(U); 6965 return !Scalarized->second.count(UI); 6966 }); 6967 }; 6968 (void) hasSingleCopyAfterVectorization; 6969 6970 if (isScalarAfterVectorization(I, VF)) { 6971 // With the exception of GEPs and PHIs, after scalarization there should 6972 // only be one copy of the instruction generated in the loop. This is 6973 // because the VF is either 1, or any instructions that need scalarizing 6974 // have already been dealt with by the the time we get here. As a result, 6975 // it means we don't have to multiply the instruction cost by VF. 6976 assert(I->getOpcode() == Instruction::GetElementPtr || 6977 I->getOpcode() == Instruction::PHI || 6978 (I->getOpcode() == Instruction::BitCast && 6979 I->getType()->isPointerTy()) || 6980 hasSingleCopyAfterVectorization(I, VF)); 6981 VectorTy = RetTy; 6982 } else 6983 VectorTy = ToVectorTy(RetTy, VF); 6984 6985 // TODO: We need to estimate the cost of intrinsic calls. 6986 switch (I->getOpcode()) { 6987 case Instruction::GetElementPtr: 6988 // We mark this instruction as zero-cost because the cost of GEPs in 6989 // vectorized code depends on whether the corresponding memory instruction 6990 // is scalarized or not. Therefore, we handle GEPs with the memory 6991 // instruction cost. 6992 return 0; 6993 case Instruction::Br: { 6994 // In cases of scalarized and predicated instructions, there will be VF 6995 // predicated blocks in the vectorized loop. Each branch around these 6996 // blocks requires also an extract of its vector compare i1 element. 6997 bool ScalarPredicatedBB = false; 6998 BranchInst *BI = cast<BranchInst>(I); 6999 if (VF.isVector() && BI->isConditional() && 7000 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7001 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7002 ScalarPredicatedBB = true; 7003 7004 if (ScalarPredicatedBB) { 7005 // Not possible to scalarize scalable vector with predicated instructions. 7006 if (VF.isScalable()) 7007 return InstructionCost::getInvalid(); 7008 // Return cost for branches around scalarized and predicated blocks. 7009 auto *Vec_i1Ty = 7010 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7011 return ( 7012 TTI.getScalarizationOverhead( 7013 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7014 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7015 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7016 // The back-edge branch will remain, as will all scalar branches. 7017 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7018 else 7019 // This branch will be eliminated by if-conversion. 7020 return 0; 7021 // Note: We currently assume zero cost for an unconditional branch inside 7022 // a predicated block since it will become a fall-through, although we 7023 // may decide in the future to call TTI for all branches. 7024 } 7025 case Instruction::PHI: { 7026 auto *Phi = cast<PHINode>(I); 7027 7028 // First-order recurrences are replaced by vector shuffles inside the loop. 7029 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7030 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7031 return TTI.getShuffleCost( 7032 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7033 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7034 7035 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7036 // converted into select instructions. We require N - 1 selects per phi 7037 // node, where N is the number of incoming values. 7038 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7039 return (Phi->getNumIncomingValues() - 1) * 7040 TTI.getCmpSelInstrCost( 7041 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7042 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7043 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7044 7045 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7046 } 7047 case Instruction::UDiv: 7048 case Instruction::SDiv: 7049 case Instruction::URem: 7050 case Instruction::SRem: 7051 // If we have a predicated instruction, it may not be executed for each 7052 // vector lane. Get the scalarization cost and scale this amount by the 7053 // probability of executing the predicated block. If the instruction is not 7054 // predicated, we fall through to the next case. 7055 if (VF.isVector() && isScalarWithPredication(I, VF)) { 7056 InstructionCost Cost = 0; 7057 7058 // These instructions have a non-void type, so account for the phi nodes 7059 // that we will create. This cost is likely to be zero. The phi node 7060 // cost, if any, should be scaled by the block probability because it 7061 // models a copy at the end of each predicated block. 7062 Cost += VF.getKnownMinValue() * 7063 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7064 7065 // The cost of the non-predicated instruction. 7066 Cost += VF.getKnownMinValue() * 7067 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7068 7069 // The cost of insertelement and extractelement instructions needed for 7070 // scalarization. 7071 Cost += getScalarizationOverhead(I, VF); 7072 7073 // Scale the cost by the probability of executing the predicated blocks. 7074 // This assumes the predicated block for each vector lane is equally 7075 // likely. 7076 return Cost / getReciprocalPredBlockProb(); 7077 } 7078 LLVM_FALLTHROUGH; 7079 case Instruction::Add: 7080 case Instruction::FAdd: 7081 case Instruction::Sub: 7082 case Instruction::FSub: 7083 case Instruction::Mul: 7084 case Instruction::FMul: 7085 case Instruction::FDiv: 7086 case Instruction::FRem: 7087 case Instruction::Shl: 7088 case Instruction::LShr: 7089 case Instruction::AShr: 7090 case Instruction::And: 7091 case Instruction::Or: 7092 case Instruction::Xor: { 7093 // Since we will replace the stride by 1 the multiplication should go away. 7094 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7095 return 0; 7096 7097 // Detect reduction patterns 7098 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7099 return *RedCost; 7100 7101 // Certain instructions can be cheaper to vectorize if they have a constant 7102 // second vector operand. One example of this are shifts on x86. 7103 Value *Op2 = I->getOperand(1); 7104 TargetTransformInfo::OperandValueProperties Op2VP; 7105 TargetTransformInfo::OperandValueKind Op2VK = 7106 TTI.getOperandInfo(Op2, Op2VP); 7107 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7108 Op2VK = TargetTransformInfo::OK_UniformValue; 7109 7110 SmallVector<const Value *, 4> Operands(I->operand_values()); 7111 return TTI.getArithmeticInstrCost( 7112 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7113 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7114 } 7115 case Instruction::FNeg: { 7116 return TTI.getArithmeticInstrCost( 7117 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7118 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7119 TargetTransformInfo::OP_None, I->getOperand(0), I); 7120 } 7121 case Instruction::Select: { 7122 SelectInst *SI = cast<SelectInst>(I); 7123 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7124 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7125 7126 const Value *Op0, *Op1; 7127 using namespace llvm::PatternMatch; 7128 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7129 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7130 // select x, y, false --> x & y 7131 // select x, true, y --> x | y 7132 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7133 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7134 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7135 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7136 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7137 Op1->getType()->getScalarSizeInBits() == 1); 7138 7139 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7140 return TTI.getArithmeticInstrCost( 7141 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7142 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7143 } 7144 7145 Type *CondTy = SI->getCondition()->getType(); 7146 if (!ScalarCond) 7147 CondTy = VectorType::get(CondTy, VF); 7148 7149 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 7150 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition())) 7151 Pred = Cmp->getPredicate(); 7152 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred, 7153 CostKind, I); 7154 } 7155 case Instruction::ICmp: 7156 case Instruction::FCmp: { 7157 Type *ValTy = I->getOperand(0)->getType(); 7158 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7159 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7160 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7161 VectorTy = ToVectorTy(ValTy, VF); 7162 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7163 cast<CmpInst>(I)->getPredicate(), CostKind, 7164 I); 7165 } 7166 case Instruction::Store: 7167 case Instruction::Load: { 7168 ElementCount Width = VF; 7169 if (Width.isVector()) { 7170 InstWidening Decision = getWideningDecision(I, Width); 7171 assert(Decision != CM_Unknown && 7172 "CM decision should be taken at this point"); 7173 if (Decision == CM_Scalarize) 7174 Width = ElementCount::getFixed(1); 7175 } 7176 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7177 return getMemoryInstructionCost(I, VF); 7178 } 7179 case Instruction::BitCast: 7180 if (I->getType()->isPointerTy()) 7181 return 0; 7182 LLVM_FALLTHROUGH; 7183 case Instruction::ZExt: 7184 case Instruction::SExt: 7185 case Instruction::FPToUI: 7186 case Instruction::FPToSI: 7187 case Instruction::FPExt: 7188 case Instruction::PtrToInt: 7189 case Instruction::IntToPtr: 7190 case Instruction::SIToFP: 7191 case Instruction::UIToFP: 7192 case Instruction::Trunc: 7193 case Instruction::FPTrunc: { 7194 // Computes the CastContextHint from a Load/Store instruction. 7195 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7196 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7197 "Expected a load or a store!"); 7198 7199 if (VF.isScalar() || !TheLoop->contains(I)) 7200 return TTI::CastContextHint::Normal; 7201 7202 switch (getWideningDecision(I, VF)) { 7203 case LoopVectorizationCostModel::CM_GatherScatter: 7204 return TTI::CastContextHint::GatherScatter; 7205 case LoopVectorizationCostModel::CM_Interleave: 7206 return TTI::CastContextHint::Interleave; 7207 case LoopVectorizationCostModel::CM_Scalarize: 7208 case LoopVectorizationCostModel::CM_Widen: 7209 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7210 : TTI::CastContextHint::Normal; 7211 case LoopVectorizationCostModel::CM_Widen_Reverse: 7212 return TTI::CastContextHint::Reversed; 7213 case LoopVectorizationCostModel::CM_Unknown: 7214 llvm_unreachable("Instr did not go through cost modelling?"); 7215 } 7216 7217 llvm_unreachable("Unhandled case!"); 7218 }; 7219 7220 unsigned Opcode = I->getOpcode(); 7221 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7222 // For Trunc, the context is the only user, which must be a StoreInst. 7223 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7224 if (I->hasOneUse()) 7225 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7226 CCH = ComputeCCH(Store); 7227 } 7228 // For Z/Sext, the context is the operand, which must be a LoadInst. 7229 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7230 Opcode == Instruction::FPExt) { 7231 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7232 CCH = ComputeCCH(Load); 7233 } 7234 7235 // We optimize the truncation of induction variables having constant 7236 // integer steps. The cost of these truncations is the same as the scalar 7237 // operation. 7238 if (isOptimizableIVTruncate(I, VF)) { 7239 auto *Trunc = cast<TruncInst>(I); 7240 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7241 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7242 } 7243 7244 // Detect reduction patterns 7245 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7246 return *RedCost; 7247 7248 Type *SrcScalarTy = I->getOperand(0)->getType(); 7249 Type *SrcVecTy = 7250 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7251 if (canTruncateToMinimalBitwidth(I, VF)) { 7252 // This cast is going to be shrunk. This may remove the cast or it might 7253 // turn it into slightly different cast. For example, if MinBW == 16, 7254 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7255 // 7256 // Calculate the modified src and dest types. 7257 Type *MinVecTy = VectorTy; 7258 if (Opcode == Instruction::Trunc) { 7259 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7260 VectorTy = 7261 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7262 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7263 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7264 VectorTy = 7265 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7266 } 7267 } 7268 7269 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7270 } 7271 case Instruction::Call: { 7272 if (RecurrenceDescriptor::isFMulAddIntrinsic(I)) 7273 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7274 return *RedCost; 7275 bool NeedToScalarize; 7276 CallInst *CI = cast<CallInst>(I); 7277 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7278 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7279 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7280 return std::min(CallCost, IntrinsicCost); 7281 } 7282 return CallCost; 7283 } 7284 case Instruction::ExtractValue: 7285 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7286 case Instruction::Alloca: 7287 // We cannot easily widen alloca to a scalable alloca, as 7288 // the result would need to be a vector of pointers. 7289 if (VF.isScalable()) 7290 return InstructionCost::getInvalid(); 7291 LLVM_FALLTHROUGH; 7292 default: 7293 // This opcode is unknown. Assume that it is the same as 'mul'. 7294 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7295 } // end of switch. 7296 } 7297 7298 char LoopVectorize::ID = 0; 7299 7300 static const char lv_name[] = "Loop Vectorization"; 7301 7302 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7303 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7304 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7305 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7306 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7307 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7308 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7309 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7310 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7311 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7312 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7313 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7314 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7315 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7316 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7317 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7318 7319 namespace llvm { 7320 7321 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7322 7323 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7324 bool VectorizeOnlyWhenForced) { 7325 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7326 } 7327 7328 } // end namespace llvm 7329 7330 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7331 // Check if the pointer operand of a load or store instruction is 7332 // consecutive. 7333 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7334 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 7335 return false; 7336 } 7337 7338 void LoopVectorizationCostModel::collectValuesToIgnore() { 7339 // Ignore ephemeral values. 7340 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7341 7342 // Ignore type-promoting instructions we identified during reduction 7343 // detection. 7344 for (auto &Reduction : Legal->getReductionVars()) { 7345 const RecurrenceDescriptor &RedDes = Reduction.second; 7346 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7347 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7348 } 7349 // Ignore type-casting instructions we identified during induction 7350 // detection. 7351 for (auto &Induction : Legal->getInductionVars()) { 7352 const InductionDescriptor &IndDes = Induction.second; 7353 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7354 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7355 } 7356 } 7357 7358 void LoopVectorizationCostModel::collectInLoopReductions() { 7359 for (auto &Reduction : Legal->getReductionVars()) { 7360 PHINode *Phi = Reduction.first; 7361 const RecurrenceDescriptor &RdxDesc = Reduction.second; 7362 7363 // We don't collect reductions that are type promoted (yet). 7364 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7365 continue; 7366 7367 // If the target would prefer this reduction to happen "in-loop", then we 7368 // want to record it as such. 7369 unsigned Opcode = RdxDesc.getOpcode(); 7370 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7371 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7372 TargetTransformInfo::ReductionFlags())) 7373 continue; 7374 7375 // Check that we can correctly put the reductions into the loop, by 7376 // finding the chain of operations that leads from the phi to the loop 7377 // exit value. 7378 SmallVector<Instruction *, 4> ReductionOperations = 7379 RdxDesc.getReductionOpChain(Phi, TheLoop); 7380 bool InLoop = !ReductionOperations.empty(); 7381 if (InLoop) { 7382 InLoopReductionChains[Phi] = ReductionOperations; 7383 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7384 Instruction *LastChain = Phi; 7385 for (auto *I : ReductionOperations) { 7386 InLoopReductionImmediateChains[I] = LastChain; 7387 LastChain = I; 7388 } 7389 } 7390 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7391 << " reduction for phi: " << *Phi << "\n"); 7392 } 7393 } 7394 7395 // TODO: we could return a pair of values that specify the max VF and 7396 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7397 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7398 // doesn't have a cost model that can choose which plan to execute if 7399 // more than one is generated. 7400 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7401 LoopVectorizationCostModel &CM) { 7402 unsigned WidestType; 7403 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7404 return WidestVectorRegBits / WidestType; 7405 } 7406 7407 VectorizationFactor 7408 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7409 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7410 ElementCount VF = UserVF; 7411 // Outer loop handling: They may require CFG and instruction level 7412 // transformations before even evaluating whether vectorization is profitable. 7413 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7414 // the vectorization pipeline. 7415 if (!OrigLoop->isInnermost()) { 7416 // If the user doesn't provide a vectorization factor, determine a 7417 // reasonable one. 7418 if (UserVF.isZero()) { 7419 VF = ElementCount::getFixed(determineVPlanVF( 7420 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7421 .getFixedSize(), 7422 CM)); 7423 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7424 7425 // Make sure we have a VF > 1 for stress testing. 7426 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7427 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7428 << "overriding computed VF.\n"); 7429 VF = ElementCount::getFixed(4); 7430 } 7431 } 7432 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7433 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7434 "VF needs to be a power of two"); 7435 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7436 << "VF " << VF << " to build VPlans.\n"); 7437 buildVPlans(VF, VF); 7438 7439 // For VPlan build stress testing, we bail out after VPlan construction. 7440 if (VPlanBuildStressTest) 7441 return VectorizationFactor::Disabled(); 7442 7443 return {VF, 0 /*Cost*/}; 7444 } 7445 7446 LLVM_DEBUG( 7447 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7448 "VPlan-native path.\n"); 7449 return VectorizationFactor::Disabled(); 7450 } 7451 7452 Optional<VectorizationFactor> 7453 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7454 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7455 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 7456 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 7457 return None; 7458 7459 // Invalidate interleave groups if all blocks of loop will be predicated. 7460 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 7461 !useMaskedInterleavedAccesses(*TTI)) { 7462 LLVM_DEBUG( 7463 dbgs() 7464 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7465 "which requires masked-interleaved support.\n"); 7466 if (CM.InterleaveInfo.invalidateGroups()) 7467 // Invalidating interleave groups also requires invalidating all decisions 7468 // based on them, which includes widening decisions and uniform and scalar 7469 // values. 7470 CM.invalidateCostModelingDecisions(); 7471 } 7472 7473 ElementCount MaxUserVF = 7474 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 7475 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 7476 if (!UserVF.isZero() && UserVFIsLegal) { 7477 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 7478 "VF needs to be a power of two"); 7479 // Collect the instructions (and their associated costs) that will be more 7480 // profitable to scalarize. 7481 if (CM.selectUserVectorizationFactor(UserVF)) { 7482 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7483 CM.collectInLoopReductions(); 7484 buildVPlansWithVPRecipes(UserVF, UserVF); 7485 LLVM_DEBUG(printPlans(dbgs())); 7486 return {{UserVF, 0}}; 7487 } else 7488 reportVectorizationInfo("UserVF ignored because of invalid costs.", 7489 "InvalidCost", ORE, OrigLoop); 7490 } 7491 7492 // Populate the set of Vectorization Factor Candidates. 7493 ElementCountSet VFCandidates; 7494 for (auto VF = ElementCount::getFixed(1); 7495 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 7496 VFCandidates.insert(VF); 7497 for (auto VF = ElementCount::getScalable(1); 7498 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 7499 VFCandidates.insert(VF); 7500 7501 for (const auto &VF : VFCandidates) { 7502 // Collect Uniform and Scalar instructions after vectorization with VF. 7503 CM.collectUniformsAndScalars(VF); 7504 7505 // Collect the instructions (and their associated costs) that will be more 7506 // profitable to scalarize. 7507 if (VF.isVector()) 7508 CM.collectInstsToScalarize(VF); 7509 } 7510 7511 CM.collectInLoopReductions(); 7512 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 7513 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 7514 7515 LLVM_DEBUG(printPlans(dbgs())); 7516 if (!MaxFactors.hasVector()) 7517 return VectorizationFactor::Disabled(); 7518 7519 // Select the optimal vectorization factor. 7520 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 7521 7522 // Check if it is profitable to vectorize with runtime checks. 7523 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 7524 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 7525 bool PragmaThresholdReached = 7526 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 7527 bool ThresholdReached = 7528 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 7529 if ((ThresholdReached && !Hints.allowReordering()) || 7530 PragmaThresholdReached) { 7531 ORE->emit([&]() { 7532 return OptimizationRemarkAnalysisAliasing( 7533 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 7534 OrigLoop->getHeader()) 7535 << "loop not vectorized: cannot prove it is safe to reorder " 7536 "memory operations"; 7537 }); 7538 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 7539 Hints.emitRemarkWithHints(); 7540 return VectorizationFactor::Disabled(); 7541 } 7542 } 7543 return SelectedVF; 7544 } 7545 7546 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 7547 assert(count_if(VPlans, 7548 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 7549 1 && 7550 "Best VF has not a single VPlan."); 7551 7552 for (const VPlanPtr &Plan : VPlans) { 7553 if (Plan->hasVF(VF)) 7554 return *Plan.get(); 7555 } 7556 llvm_unreachable("No plan found!"); 7557 } 7558 7559 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7560 SmallVector<Metadata *, 4> MDs; 7561 // Reserve first location for self reference to the LoopID metadata node. 7562 MDs.push_back(nullptr); 7563 bool IsUnrollMetadata = false; 7564 MDNode *LoopID = L->getLoopID(); 7565 if (LoopID) { 7566 // First find existing loop unrolling disable metadata. 7567 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7568 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7569 if (MD) { 7570 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7571 IsUnrollMetadata = 7572 S && S->getString().startswith("llvm.loop.unroll.disable"); 7573 } 7574 MDs.push_back(LoopID->getOperand(i)); 7575 } 7576 } 7577 7578 if (!IsUnrollMetadata) { 7579 // Add runtime unroll disable metadata. 7580 LLVMContext &Context = L->getHeader()->getContext(); 7581 SmallVector<Metadata *, 1> DisableOperands; 7582 DisableOperands.push_back( 7583 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7584 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7585 MDs.push_back(DisableNode); 7586 MDNode *NewLoopID = MDNode::get(Context, MDs); 7587 // Set operand 0 to refer to the loop id itself. 7588 NewLoopID->replaceOperandWith(0, NewLoopID); 7589 L->setLoopID(NewLoopID); 7590 } 7591 } 7592 7593 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 7594 VPlan &BestVPlan, 7595 InnerLoopVectorizer &ILV, 7596 DominatorTree *DT) { 7597 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 7598 << '\n'); 7599 7600 // Perform the actual loop transformation. 7601 7602 // 1. Set up the skeleton for vectorization, including vector pre-header and 7603 // middle block. The vector loop is created during VPlan execution. 7604 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 7605 Value *CanonicalIVStartValue; 7606 std::tie(State.CFG.PrevBB, CanonicalIVStartValue) = 7607 ILV.createVectorizedLoopSkeleton(); 7608 ILV.collectPoisonGeneratingRecipes(State); 7609 7610 ILV.printDebugTracesAtStart(); 7611 7612 //===------------------------------------------------===// 7613 // 7614 // Notice: any optimization or new instruction that go 7615 // into the code below should also be implemented in 7616 // the cost-model. 7617 // 7618 //===------------------------------------------------===// 7619 7620 // 2. Copy and widen instructions from the old loop into the new loop. 7621 BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr), 7622 ILV.getOrCreateVectorTripCount(nullptr), 7623 CanonicalIVStartValue, State); 7624 BestVPlan.execute(&State); 7625 7626 // Keep all loop hints from the original loop on the vector loop (we'll 7627 // replace the vectorizer-specific hints below). 7628 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7629 7630 Optional<MDNode *> VectorizedLoopID = 7631 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7632 LLVMLoopVectorizeFollowupVectorized}); 7633 7634 Loop *L = LI->getLoopFor(State.CFG.PrevBB); 7635 if (VectorizedLoopID.hasValue()) 7636 L->setLoopID(VectorizedLoopID.getValue()); 7637 else { 7638 // Keep all loop hints from the original loop on the vector loop (we'll 7639 // replace the vectorizer-specific hints below). 7640 if (MDNode *LID = OrigLoop->getLoopID()) 7641 L->setLoopID(LID); 7642 7643 LoopVectorizeHints Hints(L, true, *ORE); 7644 Hints.setAlreadyVectorized(); 7645 } 7646 // Disable runtime unrolling when vectorizing the epilogue loop. 7647 if (CanonicalIVStartValue) 7648 AddRuntimeUnrollDisableMetaData(L); 7649 7650 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7651 // predication, updating analyses. 7652 ILV.fixVectorizedLoop(State); 7653 7654 ILV.printDebugTracesAtEnd(); 7655 } 7656 7657 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 7658 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 7659 for (const auto &Plan : VPlans) 7660 if (PrintVPlansInDotFormat) 7661 Plan->printDOT(O); 7662 else 7663 Plan->print(O); 7664 } 7665 #endif 7666 7667 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7668 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7669 7670 // We create new control-flow for the vectorized loop, so the original exit 7671 // conditions will be dead after vectorization if it's only used by the 7672 // terminator 7673 SmallVector<BasicBlock*> ExitingBlocks; 7674 OrigLoop->getExitingBlocks(ExitingBlocks); 7675 for (auto *BB : ExitingBlocks) { 7676 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 7677 if (!Cmp || !Cmp->hasOneUse()) 7678 continue; 7679 7680 // TODO: we should introduce a getUniqueExitingBlocks on Loop 7681 if (!DeadInstructions.insert(Cmp).second) 7682 continue; 7683 7684 // The operands of the icmp is often a dead trunc, used by IndUpdate. 7685 // TODO: can recurse through operands in general 7686 for (Value *Op : Cmp->operands()) { 7687 if (isa<TruncInst>(Op) && Op->hasOneUse()) 7688 DeadInstructions.insert(cast<Instruction>(Op)); 7689 } 7690 } 7691 7692 // We create new "steps" for induction variable updates to which the original 7693 // induction variables map. An original update instruction will be dead if 7694 // all its users except the induction variable are dead. 7695 auto *Latch = OrigLoop->getLoopLatch(); 7696 for (auto &Induction : Legal->getInductionVars()) { 7697 PHINode *Ind = Induction.first; 7698 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7699 7700 // If the tail is to be folded by masking, the primary induction variable, 7701 // if exists, isn't dead: it will be used for masking. Don't kill it. 7702 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 7703 continue; 7704 7705 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7706 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7707 })) 7708 DeadInstructions.insert(IndUpdate); 7709 } 7710 } 7711 7712 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7713 7714 //===--------------------------------------------------------------------===// 7715 // EpilogueVectorizerMainLoop 7716 //===--------------------------------------------------------------------===// 7717 7718 /// This function is partially responsible for generating the control flow 7719 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7720 std::pair<BasicBlock *, Value *> 7721 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 7722 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7723 7724 // Workaround! Compute the trip count of the original loop and cache it 7725 // before we start modifying the CFG. This code has a systemic problem 7726 // wherein it tries to run analysis over partially constructed IR; this is 7727 // wrong, and not simply for SCEV. The trip count of the original loop 7728 // simply happens to be prone to hitting this in practice. In theory, we 7729 // can hit the same issue for any SCEV, or ValueTracking query done during 7730 // mutation. See PR49900. 7731 getOrCreateTripCount(OrigLoop->getLoopPreheader()); 7732 createVectorLoopSkeleton(""); 7733 7734 // Generate the code to check the minimum iteration count of the vector 7735 // epilogue (see below). 7736 EPI.EpilogueIterationCountCheck = 7737 emitMinimumIterationCountCheck(LoopScalarPreHeader, true); 7738 EPI.EpilogueIterationCountCheck->setName("iter.check"); 7739 7740 // Generate the code to check any assumptions that we've made for SCEV 7741 // expressions. 7742 EPI.SCEVSafetyCheck = emitSCEVChecks(LoopScalarPreHeader); 7743 7744 // Generate the code that checks at runtime if arrays overlap. We put the 7745 // checks into a separate block to make the more common case of few elements 7746 // faster. 7747 EPI.MemSafetyCheck = emitMemRuntimeChecks(LoopScalarPreHeader); 7748 7749 // Generate the iteration count check for the main loop, *after* the check 7750 // for the epilogue loop, so that the path-length is shorter for the case 7751 // that goes directly through the vector epilogue. The longer-path length for 7752 // the main loop is compensated for, by the gain from vectorizing the larger 7753 // trip count. Note: the branch will get updated later on when we vectorize 7754 // the epilogue. 7755 EPI.MainLoopIterationCountCheck = 7756 emitMinimumIterationCountCheck(LoopScalarPreHeader, false); 7757 7758 // Generate the induction variable. 7759 Value *CountRoundDown = getOrCreateVectorTripCount(LoopVectorPreHeader); 7760 EPI.VectorTripCount = CountRoundDown; 7761 7762 // Skip induction resume value creation here because they will be created in 7763 // the second pass. If we created them here, they wouldn't be used anyway, 7764 // because the vplan in the second pass still contains the inductions from the 7765 // original loop. 7766 7767 return {completeLoopSkeleton(OrigLoopID), nullptr}; 7768 } 7769 7770 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 7771 LLVM_DEBUG({ 7772 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 7773 << "Main Loop VF:" << EPI.MainLoopVF 7774 << ", Main Loop UF:" << EPI.MainLoopUF 7775 << ", Epilogue Loop VF:" << EPI.EpilogueVF 7776 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7777 }); 7778 } 7779 7780 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 7781 DEBUG_WITH_TYPE(VerboseDebug, { 7782 dbgs() << "intermediate fn:\n" 7783 << *OrigLoop->getHeader()->getParent() << "\n"; 7784 }); 7785 } 7786 7787 BasicBlock * 7788 EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(BasicBlock *Bypass, 7789 bool ForEpilogue) { 7790 assert(Bypass && "Expected valid bypass basic block."); 7791 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 7792 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 7793 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 7794 // Reuse existing vector loop preheader for TC checks. 7795 // Note that new preheader block is generated for vector loop. 7796 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 7797 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 7798 7799 // Generate code to check if the loop's trip count is less than VF * UF of the 7800 // main vector loop. 7801 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 7802 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7803 7804 Value *CheckMinIters = Builder.CreateICmp( 7805 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 7806 "min.iters.check"); 7807 7808 if (!ForEpilogue) 7809 TCCheckBlock->setName("vector.main.loop.iter.check"); 7810 7811 // Create new preheader for vector loop. 7812 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 7813 DT, LI, nullptr, "vector.ph"); 7814 7815 if (ForEpilogue) { 7816 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 7817 DT->getNode(Bypass)->getIDom()) && 7818 "TC check is expected to dominate Bypass"); 7819 7820 // Update dominator for Bypass & LoopExit. 7821 DT->changeImmediateDominator(Bypass, TCCheckBlock); 7822 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7823 // For loops with multiple exits, there's no edge from the middle block 7824 // to exit blocks (as the epilogue must run) and thus no need to update 7825 // the immediate dominator of the exit blocks. 7826 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 7827 7828 LoopBypassBlocks.push_back(TCCheckBlock); 7829 7830 // Save the trip count so we don't have to regenerate it in the 7831 // vec.epilog.iter.check. This is safe to do because the trip count 7832 // generated here dominates the vector epilog iter check. 7833 EPI.TripCount = Count; 7834 } 7835 7836 ReplaceInstWithInst( 7837 TCCheckBlock->getTerminator(), 7838 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7839 7840 return TCCheckBlock; 7841 } 7842 7843 //===--------------------------------------------------------------------===// 7844 // EpilogueVectorizerEpilogueLoop 7845 //===--------------------------------------------------------------------===// 7846 7847 /// This function is partially responsible for generating the control flow 7848 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7849 std::pair<BasicBlock *, Value *> 7850 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 7851 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7852 createVectorLoopSkeleton("vec.epilog."); 7853 7854 // Now, compare the remaining count and if there aren't enough iterations to 7855 // execute the vectorized epilogue skip to the scalar part. 7856 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 7857 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 7858 LoopVectorPreHeader = 7859 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 7860 LI, nullptr, "vec.epilog.ph"); 7861 emitMinimumVectorEpilogueIterCountCheck(LoopScalarPreHeader, 7862 VecEpilogueIterationCountCheck); 7863 7864 // Adjust the control flow taking the state info from the main loop 7865 // vectorization into account. 7866 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 7867 "expected this to be saved from the previous pass."); 7868 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 7869 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 7870 7871 DT->changeImmediateDominator(LoopVectorPreHeader, 7872 EPI.MainLoopIterationCountCheck); 7873 7874 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 7875 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7876 7877 if (EPI.SCEVSafetyCheck) 7878 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 7879 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7880 if (EPI.MemSafetyCheck) 7881 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 7882 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7883 7884 DT->changeImmediateDominator( 7885 VecEpilogueIterationCountCheck, 7886 VecEpilogueIterationCountCheck->getSinglePredecessor()); 7887 7888 DT->changeImmediateDominator(LoopScalarPreHeader, 7889 EPI.EpilogueIterationCountCheck); 7890 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7891 // If there is an epilogue which must run, there's no edge from the 7892 // middle block to exit blocks and thus no need to update the immediate 7893 // dominator of the exit blocks. 7894 DT->changeImmediateDominator(LoopExitBlock, 7895 EPI.EpilogueIterationCountCheck); 7896 7897 // Keep track of bypass blocks, as they feed start values to the induction 7898 // phis in the scalar loop preheader. 7899 if (EPI.SCEVSafetyCheck) 7900 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 7901 if (EPI.MemSafetyCheck) 7902 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 7903 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 7904 7905 // The vec.epilog.iter.check block may contain Phi nodes from reductions which 7906 // merge control-flow from the latch block and the middle block. Update the 7907 // incoming values here and move the Phi into the preheader. 7908 SmallVector<PHINode *, 4> PhisInBlock; 7909 for (PHINode &Phi : VecEpilogueIterationCountCheck->phis()) 7910 PhisInBlock.push_back(&Phi); 7911 7912 for (PHINode *Phi : PhisInBlock) { 7913 Phi->replaceIncomingBlockWith( 7914 VecEpilogueIterationCountCheck->getSinglePredecessor(), 7915 VecEpilogueIterationCountCheck); 7916 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck); 7917 if (EPI.SCEVSafetyCheck) 7918 Phi->removeIncomingValue(EPI.SCEVSafetyCheck); 7919 if (EPI.MemSafetyCheck) 7920 Phi->removeIncomingValue(EPI.MemSafetyCheck); 7921 Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI()); 7922 } 7923 7924 // Generate a resume induction for the vector epilogue and put it in the 7925 // vector epilogue preheader 7926 Type *IdxTy = Legal->getWidestInductionType(); 7927 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 7928 LoopVectorPreHeader->getFirstNonPHI()); 7929 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 7930 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 7931 EPI.MainLoopIterationCountCheck); 7932 7933 // Generate induction resume values. These variables save the new starting 7934 // indexes for the scalar loop. They are used to test if there are any tail 7935 // iterations left once the vector loop has completed. 7936 // Note that when the vectorized epilogue is skipped due to iteration count 7937 // check, then the resume value for the induction variable comes from 7938 // the trip count of the main vector loop, hence passing the AdditionalBypass 7939 // argument. 7940 createInductionResumeValues({VecEpilogueIterationCountCheck, 7941 EPI.VectorTripCount} /* AdditionalBypass */); 7942 7943 return {completeLoopSkeleton(OrigLoopID), EPResumeVal}; 7944 } 7945 7946 BasicBlock * 7947 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 7948 BasicBlock *Bypass, BasicBlock *Insert) { 7949 7950 assert(EPI.TripCount && 7951 "Expected trip count to have been safed in the first pass."); 7952 assert( 7953 (!isa<Instruction>(EPI.TripCount) || 7954 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 7955 "saved trip count does not dominate insertion point."); 7956 Value *TC = EPI.TripCount; 7957 IRBuilder<> Builder(Insert->getTerminator()); 7958 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 7959 7960 // Generate code to check if the loop's trip count is less than VF * UF of the 7961 // vector epilogue loop. 7962 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 7963 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7964 7965 Value *CheckMinIters = 7966 Builder.CreateICmp(P, Count, 7967 createStepForVF(Builder, Count->getType(), 7968 EPI.EpilogueVF, EPI.EpilogueUF), 7969 "min.epilog.iters.check"); 7970 7971 ReplaceInstWithInst( 7972 Insert->getTerminator(), 7973 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7974 7975 LoopBypassBlocks.push_back(Insert); 7976 return Insert; 7977 } 7978 7979 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 7980 LLVM_DEBUG({ 7981 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 7982 << "Epilogue Loop VF:" << EPI.EpilogueVF 7983 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7984 }); 7985 } 7986 7987 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 7988 DEBUG_WITH_TYPE(VerboseDebug, { 7989 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n"; 7990 }); 7991 } 7992 7993 bool LoopVectorizationPlanner::getDecisionAndClampRange( 7994 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 7995 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 7996 bool PredicateAtRangeStart = Predicate(Range.Start); 7997 7998 for (ElementCount TmpVF = Range.Start * 2; 7999 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8000 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8001 Range.End = TmpVF; 8002 break; 8003 } 8004 8005 return PredicateAtRangeStart; 8006 } 8007 8008 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8009 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8010 /// of VF's starting at a given VF and extending it as much as possible. Each 8011 /// vectorization decision can potentially shorten this sub-range during 8012 /// buildVPlan(). 8013 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8014 ElementCount MaxVF) { 8015 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8016 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8017 VFRange SubRange = {VF, MaxVFPlusOne}; 8018 VPlans.push_back(buildVPlan(SubRange)); 8019 VF = SubRange.End; 8020 } 8021 } 8022 8023 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8024 VPlanPtr &Plan) { 8025 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8026 8027 // Look for cached value. 8028 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8029 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8030 if (ECEntryIt != EdgeMaskCache.end()) 8031 return ECEntryIt->second; 8032 8033 VPValue *SrcMask = createBlockInMask(Src, Plan); 8034 8035 // The terminator has to be a branch inst! 8036 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8037 assert(BI && "Unexpected terminator found"); 8038 8039 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8040 return EdgeMaskCache[Edge] = SrcMask; 8041 8042 // If source is an exiting block, we know the exit edge is dynamically dead 8043 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8044 // adding uses of an otherwise potentially dead instruction. 8045 if (OrigLoop->isLoopExiting(Src)) 8046 return EdgeMaskCache[Edge] = SrcMask; 8047 8048 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8049 assert(EdgeMask && "No Edge Mask found for condition"); 8050 8051 if (BI->getSuccessor(0) != Dst) 8052 EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc()); 8053 8054 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8055 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8056 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8057 // The select version does not introduce new UB if SrcMask is false and 8058 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8059 VPValue *False = Plan->getOrAddVPValue( 8060 ConstantInt::getFalse(BI->getCondition()->getType())); 8061 EdgeMask = 8062 Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc()); 8063 } 8064 8065 return EdgeMaskCache[Edge] = EdgeMask; 8066 } 8067 8068 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8069 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8070 8071 // Look for cached value. 8072 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8073 if (BCEntryIt != BlockMaskCache.end()) 8074 return BCEntryIt->second; 8075 8076 // All-one mask is modelled as no-mask following the convention for masked 8077 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8078 VPValue *BlockMask = nullptr; 8079 8080 if (OrigLoop->getHeader() == BB) { 8081 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8082 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8083 8084 // Introduce the early-exit compare IV <= BTC to form header block mask. 8085 // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by 8086 // constructing the desired canonical IV in the header block as its first 8087 // non-phi instructions. 8088 assert(CM.foldTailByMasking() && "must fold the tail"); 8089 VPBasicBlock *HeaderVPBB = 8090 Plan->getVectorLoopRegion()->getEntryBasicBlock(); 8091 auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi(); 8092 auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV()); 8093 HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi()); 8094 8095 VPBuilder::InsertPointGuard Guard(Builder); 8096 Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint); 8097 if (CM.TTI.emitGetActiveLaneMask()) { 8098 VPValue *TC = Plan->getOrCreateTripCount(); 8099 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC}); 8100 } else { 8101 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8102 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8103 } 8104 return BlockMaskCache[BB] = BlockMask; 8105 } 8106 8107 // This is the block mask. We OR all incoming edges. 8108 for (auto *Predecessor : predecessors(BB)) { 8109 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8110 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8111 return BlockMaskCache[BB] = EdgeMask; 8112 8113 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8114 BlockMask = EdgeMask; 8115 continue; 8116 } 8117 8118 BlockMask = Builder.createOr(BlockMask, EdgeMask, {}); 8119 } 8120 8121 return BlockMaskCache[BB] = BlockMask; 8122 } 8123 8124 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8125 ArrayRef<VPValue *> Operands, 8126 VFRange &Range, 8127 VPlanPtr &Plan) { 8128 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8129 "Must be called with either a load or store"); 8130 8131 auto willWiden = [&](ElementCount VF) -> bool { 8132 if (VF.isScalar()) 8133 return false; 8134 LoopVectorizationCostModel::InstWidening Decision = 8135 CM.getWideningDecision(I, VF); 8136 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8137 "CM decision should be taken at this point."); 8138 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8139 return true; 8140 if (CM.isScalarAfterVectorization(I, VF) || 8141 CM.isProfitableToScalarize(I, VF)) 8142 return false; 8143 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8144 }; 8145 8146 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8147 return nullptr; 8148 8149 VPValue *Mask = nullptr; 8150 if (Legal->isMaskRequired(I)) 8151 Mask = createBlockInMask(I->getParent(), Plan); 8152 8153 // Determine if the pointer operand of the access is either consecutive or 8154 // reverse consecutive. 8155 LoopVectorizationCostModel::InstWidening Decision = 8156 CM.getWideningDecision(I, Range.Start); 8157 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8158 bool Consecutive = 8159 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8160 8161 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8162 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8163 Consecutive, Reverse); 8164 8165 StoreInst *Store = cast<StoreInst>(I); 8166 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8167 Mask, Consecutive, Reverse); 8168 } 8169 8170 static VPWidenIntOrFpInductionRecipe * 8171 createWidenInductionRecipe(PHINode *Phi, Instruction *PhiOrTrunc, 8172 VPValue *Start, const InductionDescriptor &IndDesc, 8173 LoopVectorizationCostModel &CM, ScalarEvolution &SE, 8174 Loop &OrigLoop, VFRange &Range) { 8175 // Returns true if an instruction \p I should be scalarized instead of 8176 // vectorized for the chosen vectorization factor. 8177 auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) { 8178 return CM.isScalarAfterVectorization(I, VF) || 8179 CM.isProfitableToScalarize(I, VF); 8180 }; 8181 8182 bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange( 8183 [&](ElementCount VF) { 8184 // Returns true if we should generate a scalar version of \p IV. 8185 if (ShouldScalarizeInstruction(PhiOrTrunc, VF)) 8186 return true; 8187 auto isScalarInst = [&](User *U) -> bool { 8188 auto *I = cast<Instruction>(U); 8189 return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF); 8190 }; 8191 return any_of(PhiOrTrunc->users(), isScalarInst); 8192 }, 8193 Range); 8194 bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange( 8195 [&](ElementCount VF) { 8196 return ShouldScalarizeInstruction(PhiOrTrunc, VF); 8197 }, 8198 Range); 8199 assert(IndDesc.getStartValue() == 8200 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader())); 8201 assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) && 8202 "step must be loop invariant"); 8203 if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) { 8204 return new VPWidenIntOrFpInductionRecipe( 8205 Phi, Start, IndDesc, TruncI, NeedsScalarIV, !NeedsScalarIVOnly, SE); 8206 } 8207 assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here"); 8208 return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, NeedsScalarIV, 8209 !NeedsScalarIVOnly, SE); 8210 } 8211 8212 VPRecipeBase *VPRecipeBuilder::tryToOptimizeInductionPHI( 8213 PHINode *Phi, ArrayRef<VPValue *> Operands, VFRange &Range) const { 8214 8215 // Check if this is an integer or fp induction. If so, build the recipe that 8216 // produces its scalar and vector values. 8217 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) 8218 return createWidenInductionRecipe(Phi, Phi, Operands[0], *II, CM, 8219 *PSE.getSE(), *OrigLoop, Range); 8220 8221 // Check if this is pointer induction. If so, build the recipe for it. 8222 if (auto *II = Legal->getPointerInductionDescriptor(Phi)) 8223 return new VPWidenPointerInductionRecipe(Phi, Operands[0], *II, 8224 *PSE.getSE()); 8225 return nullptr; 8226 } 8227 8228 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8229 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8230 VPlan &Plan) const { 8231 // Optimize the special case where the source is a constant integer 8232 // induction variable. Notice that we can only optimize the 'trunc' case 8233 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8234 // (c) other casts depend on pointer size. 8235 8236 // Determine whether \p K is a truncation based on an induction variable that 8237 // can be optimized. 8238 auto isOptimizableIVTruncate = 8239 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8240 return [=](ElementCount VF) -> bool { 8241 return CM.isOptimizableIVTruncate(K, VF); 8242 }; 8243 }; 8244 8245 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8246 isOptimizableIVTruncate(I), Range)) { 8247 8248 auto *Phi = cast<PHINode>(I->getOperand(0)); 8249 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi); 8250 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8251 return createWidenInductionRecipe(Phi, I, Start, II, CM, *PSE.getSE(), 8252 *OrigLoop, Range); 8253 } 8254 return nullptr; 8255 } 8256 8257 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8258 ArrayRef<VPValue *> Operands, 8259 VPlanPtr &Plan) { 8260 // If all incoming values are equal, the incoming VPValue can be used directly 8261 // instead of creating a new VPBlendRecipe. 8262 VPValue *FirstIncoming = Operands[0]; 8263 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8264 return FirstIncoming == Inc; 8265 })) { 8266 return Operands[0]; 8267 } 8268 8269 unsigned NumIncoming = Phi->getNumIncomingValues(); 8270 // For in-loop reductions, we do not need to create an additional select. 8271 VPValue *InLoopVal = nullptr; 8272 for (unsigned In = 0; In < NumIncoming; In++) { 8273 PHINode *PhiOp = 8274 dyn_cast_or_null<PHINode>(Operands[In]->getUnderlyingValue()); 8275 if (PhiOp && CM.isInLoopReduction(PhiOp)) { 8276 assert(!InLoopVal && "Found more than one in-loop reduction!"); 8277 InLoopVal = Operands[In]; 8278 } 8279 } 8280 8281 assert((!InLoopVal || NumIncoming == 2) && 8282 "Found an in-loop reduction for PHI with unexpected number of " 8283 "incoming values"); 8284 if (InLoopVal) 8285 return Operands[Operands[0] == InLoopVal ? 1 : 0]; 8286 8287 // We know that all PHIs in non-header blocks are converted into selects, so 8288 // we don't have to worry about the insertion order and we can just use the 8289 // builder. At this point we generate the predication tree. There may be 8290 // duplications since this is a simple recursive scan, but future 8291 // optimizations will clean it up. 8292 SmallVector<VPValue *, 2> OperandsWithMask; 8293 8294 for (unsigned In = 0; In < NumIncoming; In++) { 8295 VPValue *EdgeMask = 8296 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8297 assert((EdgeMask || NumIncoming == 1) && 8298 "Multiple predecessors with one having a full mask"); 8299 OperandsWithMask.push_back(Operands[In]); 8300 if (EdgeMask) 8301 OperandsWithMask.push_back(EdgeMask); 8302 } 8303 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8304 } 8305 8306 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8307 ArrayRef<VPValue *> Operands, 8308 VFRange &Range) const { 8309 8310 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8311 [this, CI](ElementCount VF) { 8312 return CM.isScalarWithPredication(CI, VF); 8313 }, 8314 Range); 8315 8316 if (IsPredicated) 8317 return nullptr; 8318 8319 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8320 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8321 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8322 ID == Intrinsic::pseudoprobe || 8323 ID == Intrinsic::experimental_noalias_scope_decl)) 8324 return nullptr; 8325 8326 auto willWiden = [&](ElementCount VF) -> bool { 8327 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8328 // The following case may be scalarized depending on the VF. 8329 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8330 // version of the instruction. 8331 // Is it beneficial to perform intrinsic call compared to lib call? 8332 bool NeedToScalarize = false; 8333 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8334 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8335 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8336 return UseVectorIntrinsic || !NeedToScalarize; 8337 }; 8338 8339 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8340 return nullptr; 8341 8342 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8343 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8344 } 8345 8346 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8347 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8348 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8349 // Instruction should be widened, unless it is scalar after vectorization, 8350 // scalarization is profitable or it is predicated. 8351 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8352 return CM.isScalarAfterVectorization(I, VF) || 8353 CM.isProfitableToScalarize(I, VF) || 8354 CM.isScalarWithPredication(I, VF); 8355 }; 8356 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8357 Range); 8358 } 8359 8360 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8361 ArrayRef<VPValue *> Operands) const { 8362 auto IsVectorizableOpcode = [](unsigned Opcode) { 8363 switch (Opcode) { 8364 case Instruction::Add: 8365 case Instruction::And: 8366 case Instruction::AShr: 8367 case Instruction::BitCast: 8368 case Instruction::FAdd: 8369 case Instruction::FCmp: 8370 case Instruction::FDiv: 8371 case Instruction::FMul: 8372 case Instruction::FNeg: 8373 case Instruction::FPExt: 8374 case Instruction::FPToSI: 8375 case Instruction::FPToUI: 8376 case Instruction::FPTrunc: 8377 case Instruction::FRem: 8378 case Instruction::FSub: 8379 case Instruction::ICmp: 8380 case Instruction::IntToPtr: 8381 case Instruction::LShr: 8382 case Instruction::Mul: 8383 case Instruction::Or: 8384 case Instruction::PtrToInt: 8385 case Instruction::SDiv: 8386 case Instruction::Select: 8387 case Instruction::SExt: 8388 case Instruction::Shl: 8389 case Instruction::SIToFP: 8390 case Instruction::SRem: 8391 case Instruction::Sub: 8392 case Instruction::Trunc: 8393 case Instruction::UDiv: 8394 case Instruction::UIToFP: 8395 case Instruction::URem: 8396 case Instruction::Xor: 8397 case Instruction::ZExt: 8398 return true; 8399 } 8400 return false; 8401 }; 8402 8403 if (!IsVectorizableOpcode(I->getOpcode())) 8404 return nullptr; 8405 8406 // Success: widen this instruction. 8407 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8408 } 8409 8410 void VPRecipeBuilder::fixHeaderPhis() { 8411 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8412 for (VPHeaderPHIRecipe *R : PhisToFix) { 8413 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8414 VPRecipeBase *IncR = 8415 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8416 R->addOperand(IncR->getVPSingleValue()); 8417 } 8418 } 8419 8420 VPBasicBlock *VPRecipeBuilder::handleReplication( 8421 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8422 VPlanPtr &Plan) { 8423 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8424 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8425 Range); 8426 8427 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8428 [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); }, 8429 Range); 8430 8431 // Even if the instruction is not marked as uniform, there are certain 8432 // intrinsic calls that can be effectively treated as such, so we check for 8433 // them here. Conservatively, we only do this for scalable vectors, since 8434 // for fixed-width VFs we can always fall back on full scalarization. 8435 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 8436 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 8437 case Intrinsic::assume: 8438 case Intrinsic::lifetime_start: 8439 case Intrinsic::lifetime_end: 8440 // For scalable vectors if one of the operands is variant then we still 8441 // want to mark as uniform, which will generate one instruction for just 8442 // the first lane of the vector. We can't scalarize the call in the same 8443 // way as for fixed-width vectors because we don't know how many lanes 8444 // there are. 8445 // 8446 // The reasons for doing it this way for scalable vectors are: 8447 // 1. For the assume intrinsic generating the instruction for the first 8448 // lane is still be better than not generating any at all. For 8449 // example, the input may be a splat across all lanes. 8450 // 2. For the lifetime start/end intrinsics the pointer operand only 8451 // does anything useful when the input comes from a stack object, 8452 // which suggests it should always be uniform. For non-stack objects 8453 // the effect is to poison the object, which still allows us to 8454 // remove the call. 8455 IsUniform = true; 8456 break; 8457 default: 8458 break; 8459 } 8460 } 8461 8462 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8463 IsUniform, IsPredicated); 8464 setRecipe(I, Recipe); 8465 Plan->addVPValue(I, Recipe); 8466 8467 // Find if I uses a predicated instruction. If so, it will use its scalar 8468 // value. Avoid hoisting the insert-element which packs the scalar value into 8469 // a vector value, as that happens iff all users use the vector value. 8470 for (VPValue *Op : Recipe->operands()) { 8471 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8472 if (!PredR) 8473 continue; 8474 auto *RepR = 8475 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8476 assert(RepR->isPredicated() && 8477 "expected Replicate recipe to be predicated"); 8478 RepR->setAlsoPack(false); 8479 } 8480 8481 // Finalize the recipe for Instr, first if it is not predicated. 8482 if (!IsPredicated) { 8483 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8484 VPBB->appendRecipe(Recipe); 8485 return VPBB; 8486 } 8487 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8488 8489 VPBlockBase *SingleSucc = VPBB->getSingleSuccessor(); 8490 assert(SingleSucc && "VPBB must have a single successor when handling " 8491 "predicated replication."); 8492 VPBlockUtils::disconnectBlocks(VPBB, SingleSucc); 8493 // Record predicated instructions for above packing optimizations. 8494 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8495 VPBlockUtils::insertBlockAfter(Region, VPBB); 8496 auto *RegSucc = new VPBasicBlock(); 8497 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8498 VPBlockUtils::connectBlocks(RegSucc, SingleSucc); 8499 return RegSucc; 8500 } 8501 8502 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8503 VPRecipeBase *PredRecipe, 8504 VPlanPtr &Plan) { 8505 // Instructions marked for predication are replicated and placed under an 8506 // if-then construct to prevent side-effects. 8507 8508 // Generate recipes to compute the block mask for this region. 8509 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8510 8511 // Build the triangular if-then region. 8512 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8513 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8514 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8515 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8516 auto *PHIRecipe = Instr->getType()->isVoidTy() 8517 ? nullptr 8518 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8519 if (PHIRecipe) { 8520 Plan->removeVPValueFor(Instr); 8521 Plan->addVPValue(Instr, PHIRecipe); 8522 } 8523 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8524 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8525 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8526 8527 // Note: first set Entry as region entry and then connect successors starting 8528 // from it in order, to propagate the "parent" of each VPBasicBlock. 8529 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8530 VPBlockUtils::connectBlocks(Pred, Exit); 8531 8532 return Region; 8533 } 8534 8535 VPRecipeOrVPValueTy 8536 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8537 ArrayRef<VPValue *> Operands, 8538 VFRange &Range, VPlanPtr &Plan) { 8539 // First, check for specific widening recipes that deal with calls, memory 8540 // operations, inductions and Phi nodes. 8541 if (auto *CI = dyn_cast<CallInst>(Instr)) 8542 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8543 8544 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8545 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8546 8547 VPRecipeBase *Recipe; 8548 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8549 if (Phi->getParent() != OrigLoop->getHeader()) 8550 return tryToBlend(Phi, Operands, Plan); 8551 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range))) 8552 return toVPRecipeResult(Recipe); 8553 8554 VPHeaderPHIRecipe *PhiRecipe = nullptr; 8555 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 8556 VPValue *StartV = Operands[0]; 8557 if (Legal->isReductionVariable(Phi)) { 8558 const RecurrenceDescriptor &RdxDesc = 8559 Legal->getReductionVars().find(Phi)->second; 8560 assert(RdxDesc.getRecurrenceStartValue() == 8561 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8562 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 8563 CM.isInLoopReduction(Phi), 8564 CM.useOrderedReductions(RdxDesc)); 8565 } else { 8566 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 8567 } 8568 8569 // Record the incoming value from the backedge, so we can add the incoming 8570 // value from the backedge after all recipes have been created. 8571 recordRecipeOf(cast<Instruction>( 8572 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8573 PhisToFix.push_back(PhiRecipe); 8574 } else { 8575 // TODO: record backedge value for remaining pointer induction phis. 8576 assert(Phi->getType()->isPointerTy() && 8577 "only pointer phis should be handled here"); 8578 assert(Legal->getInductionVars().count(Phi) && 8579 "Not an induction variable"); 8580 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8581 VPValue *Start = Plan->getOrAddVPValue(II.getStartValue()); 8582 PhiRecipe = new VPWidenPHIRecipe(Phi, Start); 8583 } 8584 8585 return toVPRecipeResult(PhiRecipe); 8586 } 8587 8588 if (isa<TruncInst>(Instr) && 8589 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8590 Range, *Plan))) 8591 return toVPRecipeResult(Recipe); 8592 8593 if (!shouldWiden(Instr, Range)) 8594 return nullptr; 8595 8596 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8597 return toVPRecipeResult(new VPWidenGEPRecipe( 8598 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8599 8600 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8601 bool InvariantCond = 8602 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8603 return toVPRecipeResult(new VPWidenSelectRecipe( 8604 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8605 } 8606 8607 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8608 } 8609 8610 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8611 ElementCount MaxVF) { 8612 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8613 8614 // Collect instructions from the original loop that will become trivially dead 8615 // in the vectorized loop. We don't need to vectorize these instructions. For 8616 // example, original induction update instructions can become dead because we 8617 // separately emit induction "steps" when generating code for the new loop. 8618 // Similarly, we create a new latch condition when setting up the structure 8619 // of the new loop, so the old one can become dead. 8620 SmallPtrSet<Instruction *, 4> DeadInstructions; 8621 collectTriviallyDeadInstructions(DeadInstructions); 8622 8623 // Add assume instructions we need to drop to DeadInstructions, to prevent 8624 // them from being added to the VPlan. 8625 // TODO: We only need to drop assumes in blocks that get flattend. If the 8626 // control flow is preserved, we should keep them. 8627 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8628 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8629 8630 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8631 // Dead instructions do not need sinking. Remove them from SinkAfter. 8632 for (Instruction *I : DeadInstructions) 8633 SinkAfter.erase(I); 8634 8635 // Cannot sink instructions after dead instructions (there won't be any 8636 // recipes for them). Instead, find the first non-dead previous instruction. 8637 for (auto &P : Legal->getSinkAfter()) { 8638 Instruction *SinkTarget = P.second; 8639 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 8640 (void)FirstInst; 8641 while (DeadInstructions.contains(SinkTarget)) { 8642 assert( 8643 SinkTarget != FirstInst && 8644 "Must find a live instruction (at least the one feeding the " 8645 "first-order recurrence PHI) before reaching beginning of the block"); 8646 SinkTarget = SinkTarget->getPrevNode(); 8647 assert(SinkTarget != P.first && 8648 "sink source equals target, no sinking required"); 8649 } 8650 P.second = SinkTarget; 8651 } 8652 8653 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8654 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8655 VFRange SubRange = {VF, MaxVFPlusOne}; 8656 VPlans.push_back( 8657 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8658 VF = SubRange.End; 8659 } 8660 } 8661 8662 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a 8663 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a 8664 // BranchOnCount VPInstruction to the latch. 8665 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL, 8666 bool HasNUW, bool IsVPlanNative) { 8667 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8668 auto *StartV = Plan.getOrAddVPValue(StartIdx); 8669 8670 auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL); 8671 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion(); 8672 VPBasicBlock *Header = TopRegion->getEntryBasicBlock(); 8673 Header->insert(CanonicalIVPHI, Header->begin()); 8674 8675 auto *CanonicalIVIncrement = 8676 new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW 8677 : VPInstruction::CanonicalIVIncrement, 8678 {CanonicalIVPHI}, DL); 8679 CanonicalIVPHI->addOperand(CanonicalIVIncrement); 8680 8681 VPBasicBlock *EB = TopRegion->getExitBasicBlock(); 8682 if (IsVPlanNative) 8683 EB->setCondBit(nullptr); 8684 EB->appendRecipe(CanonicalIVIncrement); 8685 8686 auto *BranchOnCount = 8687 new VPInstruction(VPInstruction::BranchOnCount, 8688 {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL); 8689 EB->appendRecipe(BranchOnCount); 8690 } 8691 8692 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8693 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8694 const MapVector<Instruction *, Instruction *> &SinkAfter) { 8695 8696 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8697 8698 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8699 8700 // --------------------------------------------------------------------------- 8701 // Pre-construction: record ingredients whose recipes we'll need to further 8702 // process after constructing the initial VPlan. 8703 // --------------------------------------------------------------------------- 8704 8705 // Mark instructions we'll need to sink later and their targets as 8706 // ingredients whose recipe we'll need to record. 8707 for (auto &Entry : SinkAfter) { 8708 RecipeBuilder.recordRecipeOf(Entry.first); 8709 RecipeBuilder.recordRecipeOf(Entry.second); 8710 } 8711 for (auto &Reduction : CM.getInLoopReductionChains()) { 8712 PHINode *Phi = Reduction.first; 8713 RecurKind Kind = 8714 Legal->getReductionVars().find(Phi)->second.getRecurrenceKind(); 8715 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8716 8717 RecipeBuilder.recordRecipeOf(Phi); 8718 for (auto &R : ReductionOperations) { 8719 RecipeBuilder.recordRecipeOf(R); 8720 // For min/max reductions, where we have a pair of icmp/select, we also 8721 // need to record the ICmp recipe, so it can be removed later. 8722 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 8723 "Only min/max recurrences allowed for inloop reductions"); 8724 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8725 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8726 } 8727 } 8728 8729 // For each interleave group which is relevant for this (possibly trimmed) 8730 // Range, add it to the set of groups to be later applied to the VPlan and add 8731 // placeholders for its members' Recipes which we'll be replacing with a 8732 // single VPInterleaveRecipe. 8733 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8734 auto applyIG = [IG, this](ElementCount VF) -> bool { 8735 return (VF.isVector() && // Query is illegal for VF == 1 8736 CM.getWideningDecision(IG->getInsertPos(), VF) == 8737 LoopVectorizationCostModel::CM_Interleave); 8738 }; 8739 if (!getDecisionAndClampRange(applyIG, Range)) 8740 continue; 8741 InterleaveGroups.insert(IG); 8742 for (unsigned i = 0; i < IG->getFactor(); i++) 8743 if (Instruction *Member = IG->getMember(i)) 8744 RecipeBuilder.recordRecipeOf(Member); 8745 }; 8746 8747 // --------------------------------------------------------------------------- 8748 // Build initial VPlan: Scan the body of the loop in a topological order to 8749 // visit each basic block after having visited its predecessor basic blocks. 8750 // --------------------------------------------------------------------------- 8751 8752 // Create initial VPlan skeleton, starting with a block for the pre-header, 8753 // followed by a region for the vector loop. The skeleton vector loop region 8754 // contains a header and latch block. 8755 VPBasicBlock *Preheader = new VPBasicBlock("vector.ph"); 8756 auto Plan = std::make_unique<VPlan>(Preheader); 8757 8758 VPBasicBlock *HeaderVPBB = new VPBasicBlock("vector.body"); 8759 VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch"); 8760 VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB); 8761 auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop"); 8762 VPBlockUtils::insertBlockAfter(TopRegion, Preheader); 8763 8764 Instruction *DLInst = 8765 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 8766 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), 8767 DLInst ? DLInst->getDebugLoc() : DebugLoc(), 8768 !CM.foldTailByMasking(), false); 8769 8770 // Scan the body of the loop in a topological order to visit each basic block 8771 // after having visited its predecessor basic blocks. 8772 LoopBlocksDFS DFS(OrigLoop); 8773 DFS.perform(LI); 8774 8775 VPBasicBlock *VPBB = HeaderVPBB; 8776 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 8777 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8778 // Relevant instructions from basic block BB will be grouped into VPRecipe 8779 // ingredients and fill a new VPBasicBlock. 8780 unsigned VPBBsForBB = 0; 8781 if (VPBB != HeaderVPBB) 8782 VPBB->setName(BB->getName()); 8783 Builder.setInsertPoint(VPBB); 8784 8785 // Introduce each ingredient into VPlan. 8786 // TODO: Model and preserve debug instrinsics in VPlan. 8787 for (Instruction &I : BB->instructionsWithoutDebug()) { 8788 Instruction *Instr = &I; 8789 8790 // First filter out irrelevant instructions, to ensure no recipes are 8791 // built for them. 8792 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 8793 continue; 8794 8795 SmallVector<VPValue *, 4> Operands; 8796 auto *Phi = dyn_cast<PHINode>(Instr); 8797 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 8798 Operands.push_back(Plan->getOrAddVPValue( 8799 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 8800 } else { 8801 auto OpRange = Plan->mapToVPValues(Instr->operands()); 8802 Operands = {OpRange.begin(), OpRange.end()}; 8803 } 8804 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 8805 Instr, Operands, Range, Plan)) { 8806 // If Instr can be simplified to an existing VPValue, use it. 8807 if (RecipeOrValue.is<VPValue *>()) { 8808 auto *VPV = RecipeOrValue.get<VPValue *>(); 8809 Plan->addVPValue(Instr, VPV); 8810 // If the re-used value is a recipe, register the recipe for the 8811 // instruction, in case the recipe for Instr needs to be recorded. 8812 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 8813 RecipeBuilder.setRecipe(Instr, R); 8814 continue; 8815 } 8816 // Otherwise, add the new recipe. 8817 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 8818 for (auto *Def : Recipe->definedValues()) { 8819 auto *UV = Def->getUnderlyingValue(); 8820 Plan->addVPValue(UV, Def); 8821 } 8822 8823 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 8824 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 8825 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 8826 // of the header block. That can happen for truncates of induction 8827 // variables. Those recipes are moved to the phi section of the header 8828 // block after applying SinkAfter, which relies on the original 8829 // position of the trunc. 8830 assert(isa<TruncInst>(Instr)); 8831 InductionsToMove.push_back( 8832 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 8833 } 8834 RecipeBuilder.setRecipe(Instr, Recipe); 8835 VPBB->appendRecipe(Recipe); 8836 continue; 8837 } 8838 8839 // Otherwise, if all widening options failed, Instruction is to be 8840 // replicated. This may create a successor for VPBB. 8841 VPBasicBlock *NextVPBB = 8842 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 8843 if (NextVPBB != VPBB) { 8844 VPBB = NextVPBB; 8845 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8846 : ""); 8847 } 8848 } 8849 8850 VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB); 8851 VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor()); 8852 } 8853 8854 HeaderVPBB->setName("vector.body"); 8855 8856 // Fold the last, empty block into its predecessor. 8857 VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB); 8858 assert(VPBB && "expected to fold last (empty) block"); 8859 // After here, VPBB should not be used. 8860 VPBB = nullptr; 8861 8862 assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) && 8863 !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() && 8864 "entry block must be set to a VPRegionBlock having a non-empty entry " 8865 "VPBasicBlock"); 8866 RecipeBuilder.fixHeaderPhis(); 8867 8868 // --------------------------------------------------------------------------- 8869 // Transform initial VPlan: Apply previously taken decisions, in order, to 8870 // bring the VPlan to its final state. 8871 // --------------------------------------------------------------------------- 8872 8873 // Apply Sink-After legal constraints. 8874 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 8875 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 8876 if (Region && Region->isReplicator()) { 8877 assert(Region->getNumSuccessors() == 1 && 8878 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 8879 assert(R->getParent()->size() == 1 && 8880 "A recipe in an original replicator region must be the only " 8881 "recipe in its block"); 8882 return Region; 8883 } 8884 return nullptr; 8885 }; 8886 for (auto &Entry : SinkAfter) { 8887 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 8888 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 8889 8890 auto *TargetRegion = GetReplicateRegion(Target); 8891 auto *SinkRegion = GetReplicateRegion(Sink); 8892 if (!SinkRegion) { 8893 // If the sink source is not a replicate region, sink the recipe directly. 8894 if (TargetRegion) { 8895 // The target is in a replication region, make sure to move Sink to 8896 // the block after it, not into the replication region itself. 8897 VPBasicBlock *NextBlock = 8898 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 8899 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 8900 } else 8901 Sink->moveAfter(Target); 8902 continue; 8903 } 8904 8905 // The sink source is in a replicate region. Unhook the region from the CFG. 8906 auto *SinkPred = SinkRegion->getSinglePredecessor(); 8907 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 8908 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 8909 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 8910 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 8911 8912 if (TargetRegion) { 8913 // The target recipe is also in a replicate region, move the sink region 8914 // after the target region. 8915 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 8916 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 8917 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 8918 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 8919 } else { 8920 // The sink source is in a replicate region, we need to move the whole 8921 // replicate region, which should only contain a single recipe in the 8922 // main block. 8923 auto *SplitBlock = 8924 Target->getParent()->splitAt(std::next(Target->getIterator())); 8925 8926 auto *SplitPred = SplitBlock->getSinglePredecessor(); 8927 8928 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 8929 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 8930 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 8931 } 8932 } 8933 8934 VPlanTransforms::removeRedundantCanonicalIVs(*Plan); 8935 VPlanTransforms::removeRedundantInductionCasts(*Plan); 8936 8937 // Now that sink-after is done, move induction recipes for optimized truncates 8938 // to the phi section of the header block. 8939 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 8940 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 8941 8942 // Adjust the recipes for any inloop reductions. 8943 adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan, 8944 RecipeBuilder, Range.Start); 8945 8946 // Introduce a recipe to combine the incoming and previous values of a 8947 // first-order recurrence. 8948 for (VPRecipeBase &R : 8949 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) { 8950 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 8951 if (!RecurPhi) 8952 continue; 8953 8954 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 8955 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 8956 auto *Region = GetReplicateRegion(PrevRecipe); 8957 if (Region) 8958 InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor()); 8959 if (Region || PrevRecipe->isPhi()) 8960 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 8961 else 8962 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 8963 8964 auto *RecurSplice = cast<VPInstruction>( 8965 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 8966 {RecurPhi, RecurPhi->getBackedgeValue()})); 8967 8968 RecurPhi->replaceAllUsesWith(RecurSplice); 8969 // Set the first operand of RecurSplice to RecurPhi again, after replacing 8970 // all users. 8971 RecurSplice->setOperand(0, RecurPhi); 8972 } 8973 8974 // Interleave memory: for each Interleave Group we marked earlier as relevant 8975 // for this VPlan, replace the Recipes widening its memory instructions with a 8976 // single VPInterleaveRecipe at its insertion point. 8977 for (auto IG : InterleaveGroups) { 8978 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 8979 RecipeBuilder.getRecipe(IG->getInsertPos())); 8980 SmallVector<VPValue *, 4> StoredValues; 8981 for (unsigned i = 0; i < IG->getFactor(); ++i) 8982 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 8983 auto *StoreR = 8984 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 8985 StoredValues.push_back(StoreR->getStoredValue()); 8986 } 8987 8988 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 8989 Recipe->getMask()); 8990 VPIG->insertBefore(Recipe); 8991 unsigned J = 0; 8992 for (unsigned i = 0; i < IG->getFactor(); ++i) 8993 if (Instruction *Member = IG->getMember(i)) { 8994 if (!Member->getType()->isVoidTy()) { 8995 VPValue *OriginalV = Plan->getVPValue(Member); 8996 Plan->removeVPValueFor(Member); 8997 Plan->addVPValue(Member, VPIG->getVPValue(J)); 8998 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 8999 J++; 9000 } 9001 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9002 } 9003 } 9004 9005 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9006 // in ways that accessing values using original IR values is incorrect. 9007 Plan->disableValue2VPValue(); 9008 9009 VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE()); 9010 VPlanTransforms::sinkScalarOperands(*Plan); 9011 VPlanTransforms::mergeReplicateRegions(*Plan); 9012 VPlanTransforms::removeDeadRecipes(*Plan, *OrigLoop); 9013 9014 std::string PlanName; 9015 raw_string_ostream RSO(PlanName); 9016 ElementCount VF = Range.Start; 9017 Plan->addVF(VF); 9018 RSO << "Initial VPlan for VF={" << VF; 9019 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9020 Plan->addVF(VF); 9021 RSO << "," << VF; 9022 } 9023 RSO << "},UF>=1"; 9024 RSO.flush(); 9025 Plan->setName(PlanName); 9026 9027 // Fold Exit block into its predecessor if possible. 9028 // TODO: Fold block earlier once all VPlan transforms properly maintain a 9029 // VPBasicBlock as exit. 9030 VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit()); 9031 9032 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9033 return Plan; 9034 } 9035 9036 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9037 // Outer loop handling: They may require CFG and instruction level 9038 // transformations before even evaluating whether vectorization is profitable. 9039 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9040 // the vectorization pipeline. 9041 assert(!OrigLoop->isInnermost()); 9042 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9043 9044 // Create new empty VPlan 9045 auto Plan = std::make_unique<VPlan>(); 9046 9047 // Build hierarchical CFG 9048 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9049 HCFGBuilder.buildHierarchicalCFG(); 9050 9051 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9052 VF *= 2) 9053 Plan->addVF(VF); 9054 9055 if (EnableVPlanPredication) { 9056 VPlanPredicator VPP(*Plan); 9057 VPP.predicate(); 9058 9059 // Avoid running transformation to recipes until masked code generation in 9060 // VPlan-native path is in place. 9061 return Plan; 9062 } 9063 9064 SmallPtrSet<Instruction *, 1> DeadInstructions; 9065 VPlanTransforms::VPInstructionsToVPRecipes( 9066 OrigLoop, Plan, 9067 [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); }, 9068 DeadInstructions, *PSE.getSE()); 9069 9070 // Update plan to be compatible with the inner loop vectorizer for 9071 // code-generation. 9072 VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion(); 9073 VPBasicBlock *Preheader = LoopRegion->getEntryBasicBlock(); 9074 VPBasicBlock *Exit = LoopRegion->getExitBasicBlock(); 9075 VPBlockBase *Latch = Exit->getSinglePredecessor(); 9076 VPBlockBase *Header = Preheader->getSingleSuccessor(); 9077 9078 // 1. Move preheader block out of main vector loop. 9079 Preheader->setParent(LoopRegion->getParent()); 9080 VPBlockUtils::disconnectBlocks(Preheader, Header); 9081 VPBlockUtils::connectBlocks(Preheader, LoopRegion); 9082 Plan->setEntry(Preheader); 9083 9084 // 2. Disconnect backedge and exit block. 9085 VPBlockUtils::disconnectBlocks(Latch, Header); 9086 VPBlockUtils::disconnectBlocks(Latch, Exit); 9087 9088 // 3. Update entry and exit of main vector loop region. 9089 LoopRegion->setEntry(Header); 9090 LoopRegion->setExit(Latch); 9091 9092 // 4. Remove exit block. 9093 delete Exit; 9094 9095 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(), 9096 true, true); 9097 return Plan; 9098 } 9099 9100 // Adjust the recipes for reductions. For in-loop reductions the chain of 9101 // instructions leading from the loop exit instr to the phi need to be converted 9102 // to reductions, with one operand being vector and the other being the scalar 9103 // reduction chain. For other reductions, a select is introduced between the phi 9104 // and live-out recipes when folding the tail. 9105 void LoopVectorizationPlanner::adjustRecipesForReductions( 9106 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9107 ElementCount MinVF) { 9108 for (auto &Reduction : CM.getInLoopReductionChains()) { 9109 PHINode *Phi = Reduction.first; 9110 const RecurrenceDescriptor &RdxDesc = 9111 Legal->getReductionVars().find(Phi)->second; 9112 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9113 9114 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9115 continue; 9116 9117 // ReductionOperations are orders top-down from the phi's use to the 9118 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9119 // which of the two operands will remain scalar and which will be reduced. 9120 // For minmax the chain will be the select instructions. 9121 Instruction *Chain = Phi; 9122 for (Instruction *R : ReductionOperations) { 9123 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9124 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9125 9126 VPValue *ChainOp = Plan->getVPValue(Chain); 9127 unsigned FirstOpId; 9128 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9129 "Only min/max recurrences allowed for inloop reductions"); 9130 // Recognize a call to the llvm.fmuladd intrinsic. 9131 bool IsFMulAdd = (Kind == RecurKind::FMulAdd); 9132 assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) && 9133 "Expected instruction to be a call to the llvm.fmuladd intrinsic"); 9134 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9135 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9136 "Expected to replace a VPWidenSelectSC"); 9137 FirstOpId = 1; 9138 } else { 9139 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) || 9140 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) && 9141 "Expected to replace a VPWidenSC"); 9142 FirstOpId = 0; 9143 } 9144 unsigned VecOpId = 9145 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9146 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9147 9148 auto *CondOp = CM.blockNeedsPredicationForAnyReason(R->getParent()) 9149 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9150 : nullptr; 9151 9152 if (IsFMulAdd) { 9153 // If the instruction is a call to the llvm.fmuladd intrinsic then we 9154 // need to create an fmul recipe to use as the vector operand for the 9155 // fadd reduction. 9156 VPInstruction *FMulRecipe = new VPInstruction( 9157 Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))}); 9158 FMulRecipe->setFastMathFlags(R->getFastMathFlags()); 9159 WidenRecipe->getParent()->insert(FMulRecipe, 9160 WidenRecipe->getIterator()); 9161 VecOp = FMulRecipe; 9162 } 9163 VPReductionRecipe *RedRecipe = 9164 new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9165 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9166 Plan->removeVPValueFor(R); 9167 Plan->addVPValue(R, RedRecipe); 9168 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9169 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9170 WidenRecipe->eraseFromParent(); 9171 9172 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9173 VPRecipeBase *CompareRecipe = 9174 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9175 assert(isa<VPWidenRecipe>(CompareRecipe) && 9176 "Expected to replace a VPWidenSC"); 9177 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9178 "Expected no remaining users"); 9179 CompareRecipe->eraseFromParent(); 9180 } 9181 Chain = R; 9182 } 9183 } 9184 9185 // If tail is folded by masking, introduce selects between the phi 9186 // and the live-out instruction of each reduction, at the beginning of the 9187 // dedicated latch block. 9188 if (CM.foldTailByMasking()) { 9189 Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin()); 9190 for (VPRecipeBase &R : 9191 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) { 9192 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9193 if (!PhiR || PhiR->isInLoop()) 9194 continue; 9195 VPValue *Cond = 9196 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9197 VPValue *Red = PhiR->getBackedgeValue(); 9198 assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB && 9199 "reduction recipe must be defined before latch"); 9200 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9201 } 9202 } 9203 } 9204 9205 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9206 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9207 VPSlotTracker &SlotTracker) const { 9208 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9209 IG->getInsertPos()->printAsOperand(O, false); 9210 O << ", "; 9211 getAddr()->printAsOperand(O, SlotTracker); 9212 VPValue *Mask = getMask(); 9213 if (Mask) { 9214 O << ", "; 9215 Mask->printAsOperand(O, SlotTracker); 9216 } 9217 9218 unsigned OpIdx = 0; 9219 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9220 if (!IG->getMember(i)) 9221 continue; 9222 if (getNumStoreOperands() > 0) { 9223 O << "\n" << Indent << " store "; 9224 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9225 O << " to index " << i; 9226 } else { 9227 O << "\n" << Indent << " "; 9228 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9229 O << " = load from index " << i; 9230 } 9231 ++OpIdx; 9232 } 9233 } 9234 #endif 9235 9236 void VPWidenCallRecipe::execute(VPTransformState &State) { 9237 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9238 *this, State); 9239 } 9240 9241 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9242 auto &I = *cast<SelectInst>(getUnderlyingInstr()); 9243 State.ILV->setDebugLocFromInst(&I); 9244 9245 // The condition can be loop invariant but still defined inside the 9246 // loop. This means that we can't just use the original 'cond' value. 9247 // We have to take the 'vectorized' value and pick the first lane. 9248 // Instcombine will make this a no-op. 9249 auto *InvarCond = 9250 InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr; 9251 9252 for (unsigned Part = 0; Part < State.UF; ++Part) { 9253 Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part); 9254 Value *Op0 = State.get(getOperand(1), Part); 9255 Value *Op1 = State.get(getOperand(2), Part); 9256 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1); 9257 State.set(this, Sel, Part); 9258 State.ILV->addMetadata(Sel, &I); 9259 } 9260 } 9261 9262 void VPWidenRecipe::execute(VPTransformState &State) { 9263 auto &I = *cast<Instruction>(getUnderlyingValue()); 9264 auto &Builder = State.Builder; 9265 switch (I.getOpcode()) { 9266 case Instruction::Call: 9267 case Instruction::Br: 9268 case Instruction::PHI: 9269 case Instruction::GetElementPtr: 9270 case Instruction::Select: 9271 llvm_unreachable("This instruction is handled by a different recipe."); 9272 case Instruction::UDiv: 9273 case Instruction::SDiv: 9274 case Instruction::SRem: 9275 case Instruction::URem: 9276 case Instruction::Add: 9277 case Instruction::FAdd: 9278 case Instruction::Sub: 9279 case Instruction::FSub: 9280 case Instruction::FNeg: 9281 case Instruction::Mul: 9282 case Instruction::FMul: 9283 case Instruction::FDiv: 9284 case Instruction::FRem: 9285 case Instruction::Shl: 9286 case Instruction::LShr: 9287 case Instruction::AShr: 9288 case Instruction::And: 9289 case Instruction::Or: 9290 case Instruction::Xor: { 9291 // Just widen unops and binops. 9292 State.ILV->setDebugLocFromInst(&I); 9293 9294 for (unsigned Part = 0; Part < State.UF; ++Part) { 9295 SmallVector<Value *, 2> Ops; 9296 for (VPValue *VPOp : operands()) 9297 Ops.push_back(State.get(VPOp, Part)); 9298 9299 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 9300 9301 if (auto *VecOp = dyn_cast<Instruction>(V)) { 9302 VecOp->copyIRFlags(&I); 9303 9304 // If the instruction is vectorized and was in a basic block that needed 9305 // predication, we can't propagate poison-generating flags (nuw/nsw, 9306 // exact, etc.). The control flow has been linearized and the 9307 // instruction is no longer guarded by the predicate, which could make 9308 // the flag properties to no longer hold. 9309 if (State.MayGeneratePoisonRecipes.contains(this)) 9310 VecOp->dropPoisonGeneratingFlags(); 9311 } 9312 9313 // Use this vector value for all users of the original instruction. 9314 State.set(this, V, Part); 9315 State.ILV->addMetadata(V, &I); 9316 } 9317 9318 break; 9319 } 9320 case Instruction::ICmp: 9321 case Instruction::FCmp: { 9322 // Widen compares. Generate vector compares. 9323 bool FCmp = (I.getOpcode() == Instruction::FCmp); 9324 auto *Cmp = cast<CmpInst>(&I); 9325 State.ILV->setDebugLocFromInst(Cmp); 9326 for (unsigned Part = 0; Part < State.UF; ++Part) { 9327 Value *A = State.get(getOperand(0), Part); 9328 Value *B = State.get(getOperand(1), Part); 9329 Value *C = nullptr; 9330 if (FCmp) { 9331 // Propagate fast math flags. 9332 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9333 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 9334 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 9335 } else { 9336 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 9337 } 9338 State.set(this, C, Part); 9339 State.ILV->addMetadata(C, &I); 9340 } 9341 9342 break; 9343 } 9344 9345 case Instruction::ZExt: 9346 case Instruction::SExt: 9347 case Instruction::FPToUI: 9348 case Instruction::FPToSI: 9349 case Instruction::FPExt: 9350 case Instruction::PtrToInt: 9351 case Instruction::IntToPtr: 9352 case Instruction::SIToFP: 9353 case Instruction::UIToFP: 9354 case Instruction::Trunc: 9355 case Instruction::FPTrunc: 9356 case Instruction::BitCast: { 9357 auto *CI = cast<CastInst>(&I); 9358 State.ILV->setDebugLocFromInst(CI); 9359 9360 /// Vectorize casts. 9361 Type *DestTy = (State.VF.isScalar()) 9362 ? CI->getType() 9363 : VectorType::get(CI->getType(), State.VF); 9364 9365 for (unsigned Part = 0; Part < State.UF; ++Part) { 9366 Value *A = State.get(getOperand(0), Part); 9367 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 9368 State.set(this, Cast, Part); 9369 State.ILV->addMetadata(Cast, &I); 9370 } 9371 break; 9372 } 9373 default: 9374 // This instruction is not vectorized by simple widening. 9375 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 9376 llvm_unreachable("Unhandled instruction!"); 9377 } // end of switch. 9378 } 9379 9380 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9381 auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr()); 9382 // Construct a vector GEP by widening the operands of the scalar GEP as 9383 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 9384 // results in a vector of pointers when at least one operand of the GEP 9385 // is vector-typed. Thus, to keep the representation compact, we only use 9386 // vector-typed operands for loop-varying values. 9387 9388 if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 9389 // If we are vectorizing, but the GEP has only loop-invariant operands, 9390 // the GEP we build (by only using vector-typed operands for 9391 // loop-varying values) would be a scalar pointer. Thus, to ensure we 9392 // produce a vector of pointers, we need to either arbitrarily pick an 9393 // operand to broadcast, or broadcast a clone of the original GEP. 9394 // Here, we broadcast a clone of the original. 9395 // 9396 // TODO: If at some point we decide to scalarize instructions having 9397 // loop-invariant operands, this special case will no longer be 9398 // required. We would add the scalarization decision to 9399 // collectLoopScalars() and teach getVectorValue() to broadcast 9400 // the lane-zero scalar value. 9401 auto *Clone = State.Builder.Insert(GEP->clone()); 9402 for (unsigned Part = 0; Part < State.UF; ++Part) { 9403 Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone); 9404 State.set(this, EntryPart, Part); 9405 State.ILV->addMetadata(EntryPart, GEP); 9406 } 9407 } else { 9408 // If the GEP has at least one loop-varying operand, we are sure to 9409 // produce a vector of pointers. But if we are only unrolling, we want 9410 // to produce a scalar GEP for each unroll part. Thus, the GEP we 9411 // produce with the code below will be scalar (if VF == 1) or vector 9412 // (otherwise). Note that for the unroll-only case, we still maintain 9413 // values in the vector mapping with initVector, as we do for other 9414 // instructions. 9415 for (unsigned Part = 0; Part < State.UF; ++Part) { 9416 // The pointer operand of the new GEP. If it's loop-invariant, we 9417 // won't broadcast it. 9418 auto *Ptr = IsPtrLoopInvariant 9419 ? State.get(getOperand(0), VPIteration(0, 0)) 9420 : State.get(getOperand(0), Part); 9421 9422 // Collect all the indices for the new GEP. If any index is 9423 // loop-invariant, we won't broadcast it. 9424 SmallVector<Value *, 4> Indices; 9425 for (unsigned I = 1, E = getNumOperands(); I < E; I++) { 9426 VPValue *Operand = getOperand(I); 9427 if (IsIndexLoopInvariant[I - 1]) 9428 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 9429 else 9430 Indices.push_back(State.get(Operand, Part)); 9431 } 9432 9433 // If the GEP instruction is vectorized and was in a basic block that 9434 // needed predication, we can't propagate the poison-generating 'inbounds' 9435 // flag. The control flow has been linearized and the GEP is no longer 9436 // guarded by the predicate, which could make the 'inbounds' properties to 9437 // no longer hold. 9438 bool IsInBounds = 9439 GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0; 9440 9441 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 9442 // but it should be a vector, otherwise. 9443 auto *NewGEP = IsInBounds 9444 ? State.Builder.CreateInBoundsGEP( 9445 GEP->getSourceElementType(), Ptr, Indices) 9446 : State.Builder.CreateGEP(GEP->getSourceElementType(), 9447 Ptr, Indices); 9448 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) && 9449 "NewGEP is not a pointer vector"); 9450 State.set(this, NewGEP, Part); 9451 State.ILV->addMetadata(NewGEP, GEP); 9452 } 9453 } 9454 } 9455 9456 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9457 assert(!State.Instance && "Int or FP induction being replicated."); 9458 9459 Value *Start = getStartValue()->getLiveInIRValue(); 9460 const InductionDescriptor &ID = getInductionDescriptor(); 9461 TruncInst *Trunc = getTruncInst(); 9462 IRBuilderBase &Builder = State.Builder; 9463 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 9464 assert(State.VF.isVector() && "must have vector VF"); 9465 9466 // The value from the original loop to which we are mapping the new induction 9467 // variable. 9468 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 9469 9470 auto &DL = EntryVal->getModule()->getDataLayout(); 9471 9472 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); 9473 // Generate code for the induction step. Note that induction steps are 9474 // required to be loop-invariant 9475 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 9476 if (SE.isSCEVable(IV->getType())) { 9477 SCEVExpander Exp(SE, DL, "induction"); 9478 return Exp.expandCodeFor(Step, Step->getType(), 9479 VectorPH->getTerminator()); 9480 } 9481 return cast<SCEVUnknown>(Step)->getValue(); 9482 }; 9483 9484 // Fast-math-flags propagate from the original induction instruction. 9485 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9486 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 9487 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 9488 9489 // Now do the actual transformations, and start with creating the step value. 9490 Value *Step = CreateStepValue(ID.getStep()); 9491 9492 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 9493 "Expected either an induction phi-node or a truncate of it!"); 9494 9495 // Construct the initial value of the vector IV in the vector loop preheader 9496 auto CurrIP = Builder.saveIP(); 9497 Builder.SetInsertPoint(VectorPH->getTerminator()); 9498 if (isa<TruncInst>(EntryVal)) { 9499 assert(Start->getType()->isIntegerTy() && 9500 "Truncation requires an integer type"); 9501 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 9502 Step = Builder.CreateTrunc(Step, TruncType); 9503 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 9504 } 9505 9506 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 9507 Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start); 9508 Value *SteppedStart = getStepVector( 9509 SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder); 9510 9511 // We create vector phi nodes for both integer and floating-point induction 9512 // variables. Here, we determine the kind of arithmetic we will perform. 9513 Instruction::BinaryOps AddOp; 9514 Instruction::BinaryOps MulOp; 9515 if (Step->getType()->isIntegerTy()) { 9516 AddOp = Instruction::Add; 9517 MulOp = Instruction::Mul; 9518 } else { 9519 AddOp = ID.getInductionOpcode(); 9520 MulOp = Instruction::FMul; 9521 } 9522 9523 // Multiply the vectorization factor by the step using integer or 9524 // floating-point arithmetic as appropriate. 9525 Type *StepType = Step->getType(); 9526 Value *RuntimeVF; 9527 if (Step->getType()->isFloatingPointTy()) 9528 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF); 9529 else 9530 RuntimeVF = getRuntimeVF(Builder, StepType, State.VF); 9531 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 9532 9533 // Create a vector splat to use in the induction update. 9534 // 9535 // FIXME: If the step is non-constant, we create the vector splat with 9536 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 9537 // handle a constant vector splat. 9538 Value *SplatVF = isa<Constant>(Mul) 9539 ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul)) 9540 : Builder.CreateVectorSplat(State.VF, Mul); 9541 Builder.restoreIP(CurrIP); 9542 9543 // We may need to add the step a number of times, depending on the unroll 9544 // factor. The last of those goes into the PHI. 9545 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 9546 &*State.CFG.PrevBB->getFirstInsertionPt()); 9547 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 9548 Instruction *LastInduction = VecInd; 9549 for (unsigned Part = 0; Part < State.UF; ++Part) { 9550 State.set(this, LastInduction, Part); 9551 9552 if (isa<TruncInst>(EntryVal)) 9553 State.ILV->addMetadata(LastInduction, EntryVal); 9554 9555 LastInduction = cast<Instruction>( 9556 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 9557 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 9558 } 9559 9560 LastInduction->setName("vec.ind.next"); 9561 VecInd->addIncoming(SteppedStart, VectorPH); 9562 // Add induction update using an incorrect block temporarily. The phi node 9563 // will be fixed after VPlan execution. Note that at this point the latch 9564 // block cannot be used, as it does not exist yet. 9565 // TODO: Model increment value in VPlan, by turning the recipe into a 9566 // multi-def and a subclass of VPHeaderPHIRecipe. 9567 VecInd->addIncoming(LastInduction, VectorPH); 9568 } 9569 9570 void VPWidenPointerInductionRecipe::execute(VPTransformState &State) { 9571 assert(IndDesc.getKind() == InductionDescriptor::IK_PtrInduction && 9572 "Not a pointer induction according to InductionDescriptor!"); 9573 assert(cast<PHINode>(getUnderlyingInstr())->getType()->isPointerTy() && 9574 "Unexpected type."); 9575 9576 auto *IVR = getParent()->getPlan()->getCanonicalIV(); 9577 PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0)); 9578 9579 if (all_of(users(), [this](const VPUser *U) { 9580 return cast<VPRecipeBase>(U)->usesScalars(this); 9581 })) { 9582 // This is the normalized GEP that starts counting at zero. 9583 Value *PtrInd = State.Builder.CreateSExtOrTrunc( 9584 CanonicalIV, IndDesc.getStep()->getType()); 9585 // Determine the number of scalars we need to generate for each unroll 9586 // iteration. If the instruction is uniform, we only need to generate the 9587 // first lane. Otherwise, we generate all VF values. 9588 bool IsUniform = vputils::onlyFirstLaneUsed(this); 9589 assert((IsUniform || !State.VF.isScalable()) && 9590 "Cannot scalarize a scalable VF"); 9591 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 9592 9593 for (unsigned Part = 0; Part < State.UF; ++Part) { 9594 Value *PartStart = 9595 createStepForVF(State.Builder, PtrInd->getType(), State.VF, Part); 9596 9597 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 9598 Value *Idx = State.Builder.CreateAdd( 9599 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 9600 Value *GlobalIdx = State.Builder.CreateAdd(PtrInd, Idx); 9601 9602 Value *Step = CreateStepValue(IndDesc.getStep(), SE, 9603 State.CFG.PrevBB->getTerminator()); 9604 Value *SclrGep = emitTransformedIndex( 9605 State.Builder, GlobalIdx, IndDesc.getStartValue(), Step, IndDesc); 9606 SclrGep->setName("next.gep"); 9607 State.set(this, SclrGep, VPIteration(Part, Lane)); 9608 } 9609 } 9610 return; 9611 } 9612 9613 assert(isa<SCEVConstant>(IndDesc.getStep()) && 9614 "Induction step not a SCEV constant!"); 9615 Type *PhiType = IndDesc.getStep()->getType(); 9616 9617 // Build a pointer phi 9618 Value *ScalarStartValue = getStartValue()->getLiveInIRValue(); 9619 Type *ScStValueType = ScalarStartValue->getType(); 9620 PHINode *NewPointerPhi = 9621 PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV); 9622 9623 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); 9624 NewPointerPhi->addIncoming(ScalarStartValue, VectorPH); 9625 9626 // A pointer induction, performed by using a gep 9627 const DataLayout &DL = NewPointerPhi->getModule()->getDataLayout(); 9628 Instruction *InductionLoc = &*State.Builder.GetInsertPoint(); 9629 9630 const SCEV *ScalarStep = IndDesc.getStep(); 9631 SCEVExpander Exp(SE, DL, "induction"); 9632 Value *ScalarStepValue = Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 9633 Value *RuntimeVF = getRuntimeVF(State.Builder, PhiType, State.VF); 9634 Value *NumUnrolledElems = 9635 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 9636 Value *InductionGEP = GetElementPtrInst::Create( 9637 IndDesc.getElementType(), NewPointerPhi, 9638 State.Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 9639 InductionLoc); 9640 // Add induction update using an incorrect block temporarily. The phi node 9641 // will be fixed after VPlan execution. Note that at this point the latch 9642 // block cannot be used, as it does not exist yet. 9643 // TODO: Model increment value in VPlan, by turning the recipe into a 9644 // multi-def and a subclass of VPHeaderPHIRecipe. 9645 NewPointerPhi->addIncoming(InductionGEP, VectorPH); 9646 9647 // Create UF many actual address geps that use the pointer 9648 // phi as base and a vectorized version of the step value 9649 // (<step*0, ..., step*N>) as offset. 9650 for (unsigned Part = 0; Part < State.UF; ++Part) { 9651 Type *VecPhiType = VectorType::get(PhiType, State.VF); 9652 Value *StartOffsetScalar = 9653 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 9654 Value *StartOffset = 9655 State.Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 9656 // Create a vector of consecutive numbers from zero to VF. 9657 StartOffset = State.Builder.CreateAdd( 9658 StartOffset, State.Builder.CreateStepVector(VecPhiType)); 9659 9660 Value *GEP = State.Builder.CreateGEP( 9661 IndDesc.getElementType(), NewPointerPhi, 9662 State.Builder.CreateMul( 9663 StartOffset, 9664 State.Builder.CreateVectorSplat(State.VF, ScalarStepValue), 9665 "vector.gep")); 9666 State.set(this, GEP, Part); 9667 } 9668 } 9669 9670 void VPScalarIVStepsRecipe::execute(VPTransformState &State) { 9671 assert(!State.Instance && "VPScalarIVStepsRecipe being replicated."); 9672 9673 // Fast-math-flags propagate from the original induction instruction. 9674 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder); 9675 if (IndDesc.getInductionBinOp() && 9676 isa<FPMathOperator>(IndDesc.getInductionBinOp())) 9677 State.Builder.setFastMathFlags( 9678 IndDesc.getInductionBinOp()->getFastMathFlags()); 9679 9680 Value *Step = State.get(getStepValue(), VPIteration(0, 0)); 9681 auto CreateScalarIV = [&](Value *&Step) -> Value * { 9682 Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0)); 9683 auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0); 9684 if (!isCanonical() || CanonicalIV->getType() != Ty) { 9685 ScalarIV = 9686 Ty->isIntegerTy() 9687 ? State.Builder.CreateSExtOrTrunc(ScalarIV, Ty) 9688 : State.Builder.CreateCast(Instruction::SIToFP, ScalarIV, Ty); 9689 ScalarIV = emitTransformedIndex(State.Builder, ScalarIV, 9690 getStartValue()->getLiveInIRValue(), Step, 9691 IndDesc); 9692 ScalarIV->setName("offset.idx"); 9693 } 9694 if (TruncToTy) { 9695 assert(Step->getType()->isIntegerTy() && 9696 "Truncation requires an integer step"); 9697 ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncToTy); 9698 Step = State.Builder.CreateTrunc(Step, TruncToTy); 9699 } 9700 return ScalarIV; 9701 }; 9702 9703 Value *ScalarIV = CreateScalarIV(Step); 9704 if (State.VF.isVector()) { 9705 buildScalarSteps(ScalarIV, Step, IndDesc, this, State); 9706 return; 9707 } 9708 9709 for (unsigned Part = 0; Part < State.UF; ++Part) { 9710 assert(!State.VF.isScalable() && "scalable vectors not yet supported."); 9711 Value *EntryPart; 9712 if (Step->getType()->isFloatingPointTy()) { 9713 Value *StartIdx = 9714 getRuntimeVFAsFloat(State.Builder, Step->getType(), State.VF * Part); 9715 // Floating-point operations inherit FMF via the builder's flags. 9716 Value *MulOp = State.Builder.CreateFMul(StartIdx, Step); 9717 EntryPart = State.Builder.CreateBinOp(IndDesc.getInductionOpcode(), 9718 ScalarIV, MulOp); 9719 } else { 9720 Value *StartIdx = 9721 getRuntimeVF(State.Builder, Step->getType(), State.VF * Part); 9722 EntryPart = State.Builder.CreateAdd( 9723 ScalarIV, State.Builder.CreateMul(StartIdx, Step), "induction"); 9724 } 9725 State.set(this, EntryPart, Part); 9726 } 9727 } 9728 9729 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9730 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9731 State); 9732 } 9733 9734 void VPBlendRecipe::execute(VPTransformState &State) { 9735 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9736 // We know that all PHIs in non-header blocks are converted into 9737 // selects, so we don't have to worry about the insertion order and we 9738 // can just use the builder. 9739 // At this point we generate the predication tree. There may be 9740 // duplications since this is a simple recursive scan, but future 9741 // optimizations will clean it up. 9742 9743 unsigned NumIncoming = getNumIncomingValues(); 9744 9745 // Generate a sequence of selects of the form: 9746 // SELECT(Mask3, In3, 9747 // SELECT(Mask2, In2, 9748 // SELECT(Mask1, In1, 9749 // In0))) 9750 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9751 // are essentially undef are taken from In0. 9752 InnerLoopVectorizer::VectorParts Entry(State.UF); 9753 for (unsigned In = 0; In < NumIncoming; ++In) { 9754 for (unsigned Part = 0; Part < State.UF; ++Part) { 9755 // We might have single edge PHIs (blocks) - use an identity 9756 // 'select' for the first PHI operand. 9757 Value *In0 = State.get(getIncomingValue(In), Part); 9758 if (In == 0) 9759 Entry[Part] = In0; // Initialize with the first incoming value. 9760 else { 9761 // Select between the current value and the previous incoming edge 9762 // based on the incoming mask. 9763 Value *Cond = State.get(getMask(In), Part); 9764 Entry[Part] = 9765 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9766 } 9767 } 9768 } 9769 for (unsigned Part = 0; Part < State.UF; ++Part) 9770 State.set(this, Entry[Part], Part); 9771 } 9772 9773 void VPInterleaveRecipe::execute(VPTransformState &State) { 9774 assert(!State.Instance && "Interleave group being replicated."); 9775 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9776 getStoredValues(), getMask()); 9777 } 9778 9779 void VPReductionRecipe::execute(VPTransformState &State) { 9780 assert(!State.Instance && "Reduction being replicated."); 9781 Value *PrevInChain = State.get(getChainOp(), 0); 9782 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9783 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9784 // Propagate the fast-math flags carried by the underlying instruction. 9785 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9786 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9787 for (unsigned Part = 0; Part < State.UF; ++Part) { 9788 Value *NewVecOp = State.get(getVecOp(), Part); 9789 if (VPValue *Cond = getCondOp()) { 9790 Value *NewCond = State.get(Cond, Part); 9791 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9792 Value *Iden = RdxDesc->getRecurrenceIdentity( 9793 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9794 Value *IdenVec = 9795 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9796 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9797 NewVecOp = Select; 9798 } 9799 Value *NewRed; 9800 Value *NextInChain; 9801 if (IsOrdered) { 9802 if (State.VF.isVector()) 9803 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9804 PrevInChain); 9805 else 9806 NewRed = State.Builder.CreateBinOp( 9807 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9808 NewVecOp); 9809 PrevInChain = NewRed; 9810 } else { 9811 PrevInChain = State.get(getChainOp(), Part); 9812 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9813 } 9814 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9815 NextInChain = 9816 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9817 NewRed, PrevInChain); 9818 } else if (IsOrdered) 9819 NextInChain = NewRed; 9820 else 9821 NextInChain = State.Builder.CreateBinOp( 9822 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9823 PrevInChain); 9824 State.set(this, NextInChain, Part); 9825 } 9826 } 9827 9828 void VPReplicateRecipe::execute(VPTransformState &State) { 9829 if (State.Instance) { // Generate a single instance. 9830 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9831 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance, 9832 IsPredicated, State); 9833 // Insert scalar instance packing it into a vector. 9834 if (AlsoPack && State.VF.isVector()) { 9835 // If we're constructing lane 0, initialize to start from poison. 9836 if (State.Instance->Lane.isFirstLane()) { 9837 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9838 Value *Poison = PoisonValue::get( 9839 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9840 State.set(this, Poison, State.Instance->Part); 9841 } 9842 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9843 } 9844 return; 9845 } 9846 9847 // Generate scalar instances for all VF lanes of all UF parts, unless the 9848 // instruction is uniform inwhich case generate only the first lane for each 9849 // of the UF parts. 9850 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9851 assert((!State.VF.isScalable() || IsUniform) && 9852 "Can't scalarize a scalable vector"); 9853 for (unsigned Part = 0; Part < State.UF; ++Part) 9854 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9855 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 9856 VPIteration(Part, Lane), IsPredicated, 9857 State); 9858 } 9859 9860 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9861 assert(State.Instance && "Branch on Mask works only on single instance."); 9862 9863 unsigned Part = State.Instance->Part; 9864 unsigned Lane = State.Instance->Lane.getKnownLane(); 9865 9866 Value *ConditionBit = nullptr; 9867 VPValue *BlockInMask = getMask(); 9868 if (BlockInMask) { 9869 ConditionBit = State.get(BlockInMask, Part); 9870 if (ConditionBit->getType()->isVectorTy()) 9871 ConditionBit = State.Builder.CreateExtractElement( 9872 ConditionBit, State.Builder.getInt32(Lane)); 9873 } else // Block in mask is all-one. 9874 ConditionBit = State.Builder.getTrue(); 9875 9876 // Replace the temporary unreachable terminator with a new conditional branch, 9877 // whose two destinations will be set later when they are created. 9878 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9879 assert(isa<UnreachableInst>(CurrentTerminator) && 9880 "Expected to replace unreachable terminator with conditional branch."); 9881 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9882 CondBr->setSuccessor(0, nullptr); 9883 ReplaceInstWithInst(CurrentTerminator, CondBr); 9884 } 9885 9886 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9887 assert(State.Instance && "Predicated instruction PHI works per instance."); 9888 Instruction *ScalarPredInst = 9889 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9890 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9891 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9892 assert(PredicatingBB && "Predicated block has no single predecessor."); 9893 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9894 "operand must be VPReplicateRecipe"); 9895 9896 // By current pack/unpack logic we need to generate only a single phi node: if 9897 // a vector value for the predicated instruction exists at this point it means 9898 // the instruction has vector users only, and a phi for the vector value is 9899 // needed. In this case the recipe of the predicated instruction is marked to 9900 // also do that packing, thereby "hoisting" the insert-element sequence. 9901 // Otherwise, a phi node for the scalar value is needed. 9902 unsigned Part = State.Instance->Part; 9903 if (State.hasVectorValue(getOperand(0), Part)) { 9904 Value *VectorValue = State.get(getOperand(0), Part); 9905 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9906 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9907 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9908 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9909 if (State.hasVectorValue(this, Part)) 9910 State.reset(this, VPhi, Part); 9911 else 9912 State.set(this, VPhi, Part); 9913 // NOTE: Currently we need to update the value of the operand, so the next 9914 // predicated iteration inserts its generated value in the correct vector. 9915 State.reset(getOperand(0), VPhi, Part); 9916 } else { 9917 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9918 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9919 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9920 PredicatingBB); 9921 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9922 if (State.hasScalarValue(this, *State.Instance)) 9923 State.reset(this, Phi, *State.Instance); 9924 else 9925 State.set(this, Phi, *State.Instance); 9926 // NOTE: Currently we need to update the value of the operand, so the next 9927 // predicated iteration inserts its generated value in the correct vector. 9928 State.reset(getOperand(0), Phi, *State.Instance); 9929 } 9930 } 9931 9932 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9933 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9934 9935 // Attempt to issue a wide load. 9936 LoadInst *LI = dyn_cast<LoadInst>(&Ingredient); 9937 StoreInst *SI = dyn_cast<StoreInst>(&Ingredient); 9938 9939 assert((LI || SI) && "Invalid Load/Store instruction"); 9940 assert((!SI || StoredValue) && "No stored value provided for widened store"); 9941 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 9942 9943 Type *ScalarDataTy = getLoadStoreType(&Ingredient); 9944 9945 auto *DataTy = VectorType::get(ScalarDataTy, State.VF); 9946 const Align Alignment = getLoadStoreAlignment(&Ingredient); 9947 bool CreateGatherScatter = !Consecutive; 9948 9949 auto &Builder = State.Builder; 9950 InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF); 9951 bool isMaskRequired = getMask(); 9952 if (isMaskRequired) 9953 for (unsigned Part = 0; Part < State.UF; ++Part) 9954 BlockInMaskParts[Part] = State.get(getMask(), Part); 9955 9956 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 9957 // Calculate the pointer for the specific unroll-part. 9958 GetElementPtrInst *PartPtr = nullptr; 9959 9960 bool InBounds = false; 9961 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 9962 InBounds = gep->isInBounds(); 9963 if (Reverse) { 9964 // If the address is consecutive but reversed, then the 9965 // wide store needs to start at the last vector element. 9966 // RunTimeVF = VScale * VF.getKnownMinValue() 9967 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 9968 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF); 9969 // NumElt = -Part * RunTimeVF 9970 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 9971 // LastLane = 1 - RunTimeVF 9972 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 9973 PartPtr = 9974 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 9975 PartPtr->setIsInBounds(InBounds); 9976 PartPtr = cast<GetElementPtrInst>( 9977 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 9978 PartPtr->setIsInBounds(InBounds); 9979 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 9980 BlockInMaskParts[Part] = 9981 Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse"); 9982 } else { 9983 Value *Increment = 9984 createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part); 9985 PartPtr = cast<GetElementPtrInst>( 9986 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 9987 PartPtr->setIsInBounds(InBounds); 9988 } 9989 9990 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 9991 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 9992 }; 9993 9994 // Handle Stores: 9995 if (SI) { 9996 State.ILV->setDebugLocFromInst(SI); 9997 9998 for (unsigned Part = 0; Part < State.UF; ++Part) { 9999 Instruction *NewSI = nullptr; 10000 Value *StoredVal = State.get(StoredValue, Part); 10001 if (CreateGatherScatter) { 10002 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 10003 Value *VectorGep = State.get(getAddr(), Part); 10004 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 10005 MaskPart); 10006 } else { 10007 if (Reverse) { 10008 // If we store to reverse consecutive memory locations, then we need 10009 // to reverse the order of elements in the stored value. 10010 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse"); 10011 // We don't want to update the value in the map as it might be used in 10012 // another expression. So don't call resetVectorValue(StoredVal). 10013 } 10014 auto *VecPtr = 10015 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10016 if (isMaskRequired) 10017 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 10018 BlockInMaskParts[Part]); 10019 else 10020 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 10021 } 10022 State.ILV->addMetadata(NewSI, SI); 10023 } 10024 return; 10025 } 10026 10027 // Handle loads. 10028 assert(LI && "Must have a load instruction"); 10029 State.ILV->setDebugLocFromInst(LI); 10030 for (unsigned Part = 0; Part < State.UF; ++Part) { 10031 Value *NewLI; 10032 if (CreateGatherScatter) { 10033 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 10034 Value *VectorGep = State.get(getAddr(), Part); 10035 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 10036 nullptr, "wide.masked.gather"); 10037 State.ILV->addMetadata(NewLI, LI); 10038 } else { 10039 auto *VecPtr = 10040 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10041 if (isMaskRequired) 10042 NewLI = Builder.CreateMaskedLoad( 10043 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 10044 PoisonValue::get(DataTy), "wide.masked.load"); 10045 else 10046 NewLI = 10047 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 10048 10049 // Add metadata to the load, but setVectorValue to the reverse shuffle. 10050 State.ILV->addMetadata(NewLI, LI); 10051 if (Reverse) 10052 NewLI = Builder.CreateVectorReverse(NewLI, "reverse"); 10053 } 10054 10055 State.set(this, NewLI, Part); 10056 } 10057 } 10058 10059 // Determine how to lower the scalar epilogue, which depends on 1) optimising 10060 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 10061 // predication, and 4) a TTI hook that analyses whether the loop is suitable 10062 // for predication. 10063 static ScalarEpilogueLowering getScalarEpilogueLowering( 10064 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 10065 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 10066 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 10067 LoopVectorizationLegality &LVL) { 10068 // 1) OptSize takes precedence over all other options, i.e. if this is set, 10069 // don't look at hints or options, and don't request a scalar epilogue. 10070 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 10071 // LoopAccessInfo (due to code dependency and not being able to reliably get 10072 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 10073 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 10074 // versioning when the vectorization is forced, unlike hasOptSize. So revert 10075 // back to the old way and vectorize with versioning when forced. See D81345.) 10076 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 10077 PGSOQueryType::IRPass) && 10078 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 10079 return CM_ScalarEpilogueNotAllowedOptSize; 10080 10081 // 2) If set, obey the directives 10082 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 10083 switch (PreferPredicateOverEpilogue) { 10084 case PreferPredicateTy::ScalarEpilogue: 10085 return CM_ScalarEpilogueAllowed; 10086 case PreferPredicateTy::PredicateElseScalarEpilogue: 10087 return CM_ScalarEpilogueNotNeededUsePredicate; 10088 case PreferPredicateTy::PredicateOrDontVectorize: 10089 return CM_ScalarEpilogueNotAllowedUsePredicate; 10090 }; 10091 } 10092 10093 // 3) If set, obey the hints 10094 switch (Hints.getPredicate()) { 10095 case LoopVectorizeHints::FK_Enabled: 10096 return CM_ScalarEpilogueNotNeededUsePredicate; 10097 case LoopVectorizeHints::FK_Disabled: 10098 return CM_ScalarEpilogueAllowed; 10099 }; 10100 10101 // 4) if the TTI hook indicates this is profitable, request predication. 10102 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 10103 LVL.getLAI())) 10104 return CM_ScalarEpilogueNotNeededUsePredicate; 10105 10106 return CM_ScalarEpilogueAllowed; 10107 } 10108 10109 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 10110 // If Values have been set for this Def return the one relevant for \p Part. 10111 if (hasVectorValue(Def, Part)) 10112 return Data.PerPartOutput[Def][Part]; 10113 10114 if (!hasScalarValue(Def, {Part, 0})) { 10115 Value *IRV = Def->getLiveInIRValue(); 10116 Value *B = ILV->getBroadcastInstrs(IRV); 10117 set(Def, B, Part); 10118 return B; 10119 } 10120 10121 Value *ScalarValue = get(Def, {Part, 0}); 10122 // If we aren't vectorizing, we can just copy the scalar map values over 10123 // to the vector map. 10124 if (VF.isScalar()) { 10125 set(Def, ScalarValue, Part); 10126 return ScalarValue; 10127 } 10128 10129 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 10130 bool IsUniform = RepR && RepR->isUniform(); 10131 10132 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 10133 // Check if there is a scalar value for the selected lane. 10134 if (!hasScalarValue(Def, {Part, LastLane})) { 10135 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 10136 assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) || 10137 isa<VPScalarIVStepsRecipe>(Def->getDef())) && 10138 "unexpected recipe found to be invariant"); 10139 IsUniform = true; 10140 LastLane = 0; 10141 } 10142 10143 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 10144 // Set the insert point after the last scalarized instruction or after the 10145 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 10146 // will directly follow the scalar definitions. 10147 auto OldIP = Builder.saveIP(); 10148 auto NewIP = 10149 isa<PHINode>(LastInst) 10150 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 10151 : std::next(BasicBlock::iterator(LastInst)); 10152 Builder.SetInsertPoint(&*NewIP); 10153 10154 // However, if we are vectorizing, we need to construct the vector values. 10155 // If the value is known to be uniform after vectorization, we can just 10156 // broadcast the scalar value corresponding to lane zero for each unroll 10157 // iteration. Otherwise, we construct the vector values using 10158 // insertelement instructions. Since the resulting vectors are stored in 10159 // State, we will only generate the insertelements once. 10160 Value *VectorValue = nullptr; 10161 if (IsUniform) { 10162 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 10163 set(Def, VectorValue, Part); 10164 } else { 10165 // Initialize packing with insertelements to start from undef. 10166 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 10167 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10168 set(Def, Undef, Part); 10169 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10170 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10171 VectorValue = get(Def, Part); 10172 } 10173 Builder.restoreIP(OldIP); 10174 return VectorValue; 10175 } 10176 10177 // Process the loop in the VPlan-native vectorization path. This path builds 10178 // VPlan upfront in the vectorization pipeline, which allows to apply 10179 // VPlan-to-VPlan transformations from the very beginning without modifying the 10180 // input LLVM IR. 10181 static bool processLoopInVPlanNativePath( 10182 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10183 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10184 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10185 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10186 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10187 LoopVectorizationRequirements &Requirements) { 10188 10189 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10190 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10191 return false; 10192 } 10193 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10194 Function *F = L->getHeader()->getParent(); 10195 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10196 10197 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10198 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10199 10200 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10201 &Hints, IAI); 10202 // Use the planner for outer loop vectorization. 10203 // TODO: CM is not used at this point inside the planner. Turn CM into an 10204 // optional argument if we don't need it in the future. 10205 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10206 Requirements, ORE); 10207 10208 // Get user vectorization factor. 10209 ElementCount UserVF = Hints.getWidth(); 10210 10211 CM.collectElementTypesForWidening(); 10212 10213 // Plan how to best vectorize, return the best VF and its cost. 10214 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10215 10216 // If we are stress testing VPlan builds, do not attempt to generate vector 10217 // code. Masked vector code generation support will follow soon. 10218 // Also, do not attempt to vectorize if no vector code will be produced. 10219 if (VPlanBuildStressTest || EnableVPlanPredication || 10220 VectorizationFactor::Disabled() == VF) 10221 return false; 10222 10223 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10224 10225 { 10226 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10227 F->getParent()->getDataLayout()); 10228 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10229 &CM, BFI, PSI, Checks); 10230 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10231 << L->getHeader()->getParent()->getName() << "\"\n"); 10232 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT); 10233 } 10234 10235 // Mark the loop as already vectorized to avoid vectorizing again. 10236 Hints.setAlreadyVectorized(); 10237 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10238 return true; 10239 } 10240 10241 // Emit a remark if there are stores to floats that required a floating point 10242 // extension. If the vectorized loop was generated with floating point there 10243 // will be a performance penalty from the conversion overhead and the change in 10244 // the vector width. 10245 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10246 SmallVector<Instruction *, 4> Worklist; 10247 for (BasicBlock *BB : L->getBlocks()) { 10248 for (Instruction &Inst : *BB) { 10249 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10250 if (S->getValueOperand()->getType()->isFloatTy()) 10251 Worklist.push_back(S); 10252 } 10253 } 10254 } 10255 10256 // Traverse the floating point stores upwards searching, for floating point 10257 // conversions. 10258 SmallPtrSet<const Instruction *, 4> Visited; 10259 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10260 while (!Worklist.empty()) { 10261 auto *I = Worklist.pop_back_val(); 10262 if (!L->contains(I)) 10263 continue; 10264 if (!Visited.insert(I).second) 10265 continue; 10266 10267 // Emit a remark if the floating point store required a floating 10268 // point conversion. 10269 // TODO: More work could be done to identify the root cause such as a 10270 // constant or a function return type and point the user to it. 10271 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10272 ORE->emit([&]() { 10273 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10274 I->getDebugLoc(), L->getHeader()) 10275 << "floating point conversion changes vector width. " 10276 << "Mixed floating point precision requires an up/down " 10277 << "cast that will negatively impact performance."; 10278 }); 10279 10280 for (Use &Op : I->operands()) 10281 if (auto *OpI = dyn_cast<Instruction>(Op)) 10282 Worklist.push_back(OpI); 10283 } 10284 } 10285 10286 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10287 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10288 !EnableLoopInterleaving), 10289 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10290 !EnableLoopVectorization) {} 10291 10292 bool LoopVectorizePass::processLoop(Loop *L) { 10293 assert((EnableVPlanNativePath || L->isInnermost()) && 10294 "VPlan-native path is not enabled. Only process inner loops."); 10295 10296 #ifndef NDEBUG 10297 const std::string DebugLocStr = getDebugLocString(L); 10298 #endif /* NDEBUG */ 10299 10300 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '" 10301 << L->getHeader()->getParent()->getName() << "' from " 10302 << DebugLocStr << "\n"); 10303 10304 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI); 10305 10306 LLVM_DEBUG( 10307 dbgs() << "LV: Loop hints:" 10308 << " force=" 10309 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10310 ? "disabled" 10311 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10312 ? "enabled" 10313 : "?")) 10314 << " width=" << Hints.getWidth() 10315 << " interleave=" << Hints.getInterleave() << "\n"); 10316 10317 // Function containing loop 10318 Function *F = L->getHeader()->getParent(); 10319 10320 // Looking at the diagnostic output is the only way to determine if a loop 10321 // was vectorized (other than looking at the IR or machine code), so it 10322 // is important to generate an optimization remark for each loop. Most of 10323 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10324 // generated as OptimizationRemark and OptimizationRemarkMissed are 10325 // less verbose reporting vectorized loops and unvectorized loops that may 10326 // benefit from vectorization, respectively. 10327 10328 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10329 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10330 return false; 10331 } 10332 10333 PredicatedScalarEvolution PSE(*SE, *L); 10334 10335 // Check if it is legal to vectorize the loop. 10336 LoopVectorizationRequirements Requirements; 10337 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10338 &Requirements, &Hints, DB, AC, BFI, PSI); 10339 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10340 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10341 Hints.emitRemarkWithHints(); 10342 return false; 10343 } 10344 10345 // Check the function attributes and profiles to find out if this function 10346 // should be optimized for size. 10347 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10348 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10349 10350 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10351 // here. They may require CFG and instruction level transformations before 10352 // even evaluating whether vectorization is profitable. Since we cannot modify 10353 // the incoming IR, we need to build VPlan upfront in the vectorization 10354 // pipeline. 10355 if (!L->isInnermost()) 10356 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10357 ORE, BFI, PSI, Hints, Requirements); 10358 10359 assert(L->isInnermost() && "Inner loop expected."); 10360 10361 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10362 // count by optimizing for size, to minimize overheads. 10363 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10364 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10365 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10366 << "This loop is worth vectorizing only if no scalar " 10367 << "iteration overheads are incurred."); 10368 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10369 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10370 else { 10371 LLVM_DEBUG(dbgs() << "\n"); 10372 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10373 } 10374 } 10375 10376 // Check the function attributes to see if implicit floats are allowed. 10377 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10378 // an integer loop and the vector instructions selected are purely integer 10379 // vector instructions? 10380 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10381 reportVectorizationFailure( 10382 "Can't vectorize when the NoImplicitFloat attribute is used", 10383 "loop not vectorized due to NoImplicitFloat attribute", 10384 "NoImplicitFloat", ORE, L); 10385 Hints.emitRemarkWithHints(); 10386 return false; 10387 } 10388 10389 // Check if the target supports potentially unsafe FP vectorization. 10390 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10391 // for the target we're vectorizing for, to make sure none of the 10392 // additional fp-math flags can help. 10393 if (Hints.isPotentiallyUnsafe() && 10394 TTI->isFPVectorizationPotentiallyUnsafe()) { 10395 reportVectorizationFailure( 10396 "Potentially unsafe FP op prevents vectorization", 10397 "loop not vectorized due to unsafe FP support.", 10398 "UnsafeFP", ORE, L); 10399 Hints.emitRemarkWithHints(); 10400 return false; 10401 } 10402 10403 bool AllowOrderedReductions; 10404 // If the flag is set, use that instead and override the TTI behaviour. 10405 if (ForceOrderedReductions.getNumOccurrences() > 0) 10406 AllowOrderedReductions = ForceOrderedReductions; 10407 else 10408 AllowOrderedReductions = TTI->enableOrderedReductions(); 10409 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10410 ORE->emit([&]() { 10411 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10412 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10413 ExactFPMathInst->getDebugLoc(), 10414 ExactFPMathInst->getParent()) 10415 << "loop not vectorized: cannot prove it is safe to reorder " 10416 "floating-point operations"; 10417 }); 10418 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10419 "reorder floating-point operations\n"); 10420 Hints.emitRemarkWithHints(); 10421 return false; 10422 } 10423 10424 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10425 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10426 10427 // If an override option has been passed in for interleaved accesses, use it. 10428 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10429 UseInterleaved = EnableInterleavedMemAccesses; 10430 10431 // Analyze interleaved memory accesses. 10432 if (UseInterleaved) { 10433 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10434 } 10435 10436 // Use the cost model. 10437 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10438 F, &Hints, IAI); 10439 CM.collectValuesToIgnore(); 10440 CM.collectElementTypesForWidening(); 10441 10442 // Use the planner for vectorization. 10443 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10444 Requirements, ORE); 10445 10446 // Get user vectorization factor and interleave count. 10447 ElementCount UserVF = Hints.getWidth(); 10448 unsigned UserIC = Hints.getInterleave(); 10449 10450 // Plan how to best vectorize, return the best VF and its cost. 10451 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10452 10453 VectorizationFactor VF = VectorizationFactor::Disabled(); 10454 unsigned IC = 1; 10455 10456 if (MaybeVF) { 10457 VF = *MaybeVF; 10458 // Select the interleave count. 10459 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10460 } 10461 10462 // Identify the diagnostic messages that should be produced. 10463 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10464 bool VectorizeLoop = true, InterleaveLoop = true; 10465 if (VF.Width.isScalar()) { 10466 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10467 VecDiagMsg = std::make_pair( 10468 "VectorizationNotBeneficial", 10469 "the cost-model indicates that vectorization is not beneficial"); 10470 VectorizeLoop = false; 10471 } 10472 10473 if (!MaybeVF && UserIC > 1) { 10474 // Tell the user interleaving was avoided up-front, despite being explicitly 10475 // requested. 10476 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10477 "interleaving should be avoided up front\n"); 10478 IntDiagMsg = std::make_pair( 10479 "InterleavingAvoided", 10480 "Ignoring UserIC, because interleaving was avoided up front"); 10481 InterleaveLoop = false; 10482 } else if (IC == 1 && UserIC <= 1) { 10483 // Tell the user interleaving is not beneficial. 10484 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10485 IntDiagMsg = std::make_pair( 10486 "InterleavingNotBeneficial", 10487 "the cost-model indicates that interleaving is not beneficial"); 10488 InterleaveLoop = false; 10489 if (UserIC == 1) { 10490 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10491 IntDiagMsg.second += 10492 " and is explicitly disabled or interleave count is set to 1"; 10493 } 10494 } else if (IC > 1 && UserIC == 1) { 10495 // Tell the user interleaving is beneficial, but it explicitly disabled. 10496 LLVM_DEBUG( 10497 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10498 IntDiagMsg = std::make_pair( 10499 "InterleavingBeneficialButDisabled", 10500 "the cost-model indicates that interleaving is beneficial " 10501 "but is explicitly disabled or interleave count is set to 1"); 10502 InterleaveLoop = false; 10503 } 10504 10505 // Override IC if user provided an interleave count. 10506 IC = UserIC > 0 ? UserIC : IC; 10507 10508 // Emit diagnostic messages, if any. 10509 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10510 if (!VectorizeLoop && !InterleaveLoop) { 10511 // Do not vectorize or interleaving the loop. 10512 ORE->emit([&]() { 10513 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10514 L->getStartLoc(), L->getHeader()) 10515 << VecDiagMsg.second; 10516 }); 10517 ORE->emit([&]() { 10518 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10519 L->getStartLoc(), L->getHeader()) 10520 << IntDiagMsg.second; 10521 }); 10522 return false; 10523 } else if (!VectorizeLoop && InterleaveLoop) { 10524 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10525 ORE->emit([&]() { 10526 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10527 L->getStartLoc(), L->getHeader()) 10528 << VecDiagMsg.second; 10529 }); 10530 } else if (VectorizeLoop && !InterleaveLoop) { 10531 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10532 << ") in " << DebugLocStr << '\n'); 10533 ORE->emit([&]() { 10534 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10535 L->getStartLoc(), L->getHeader()) 10536 << IntDiagMsg.second; 10537 }); 10538 } else if (VectorizeLoop && InterleaveLoop) { 10539 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10540 << ") in " << DebugLocStr << '\n'); 10541 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10542 } 10543 10544 bool DisableRuntimeUnroll = false; 10545 MDNode *OrigLoopID = L->getLoopID(); 10546 { 10547 // Optimistically generate runtime checks. Drop them if they turn out to not 10548 // be profitable. Limit the scope of Checks, so the cleanup happens 10549 // immediately after vector codegeneration is done. 10550 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10551 F->getParent()->getDataLayout()); 10552 if (!VF.Width.isScalar() || IC > 1) 10553 Checks.Create(L, *LVL.getLAI(), PSE.getPredicate()); 10554 10555 using namespace ore; 10556 if (!VectorizeLoop) { 10557 assert(IC > 1 && "interleave count should not be 1 or 0"); 10558 // If we decided that it is not legal to vectorize the loop, then 10559 // interleave it. 10560 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10561 &CM, BFI, PSI, Checks); 10562 10563 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10564 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT); 10565 10566 ORE->emit([&]() { 10567 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10568 L->getHeader()) 10569 << "interleaved loop (interleaved count: " 10570 << NV("InterleaveCount", IC) << ")"; 10571 }); 10572 } else { 10573 // If we decided that it is *legal* to vectorize the loop, then do it. 10574 10575 // Consider vectorizing the epilogue too if it's profitable. 10576 VectorizationFactor EpilogueVF = 10577 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10578 if (EpilogueVF.Width.isVector()) { 10579 10580 // The first pass vectorizes the main loop and creates a scalar epilogue 10581 // to be vectorized by executing the plan (potentially with a different 10582 // factor) again shortly afterwards. 10583 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10584 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10585 EPI, &LVL, &CM, BFI, PSI, Checks); 10586 10587 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10588 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10589 DT); 10590 ++LoopsVectorized; 10591 10592 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10593 formLCSSARecursively(*L, *DT, LI, SE); 10594 10595 // Second pass vectorizes the epilogue and adjusts the control flow 10596 // edges from the first pass. 10597 EPI.MainLoopVF = EPI.EpilogueVF; 10598 EPI.MainLoopUF = EPI.EpilogueUF; 10599 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10600 ORE, EPI, &LVL, &CM, BFI, PSI, 10601 Checks); 10602 10603 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10604 BestEpiPlan.getVectorLoopRegion()->getEntryBasicBlock()->setName( 10605 "vec.epilog.vector.body"); 10606 10607 // Ensure that the start values for any VPReductionPHIRecipes are 10608 // updated before vectorising the epilogue loop. 10609 VPBasicBlock *Header = 10610 BestEpiPlan.getVectorLoopRegion()->getEntryBasicBlock(); 10611 for (VPRecipeBase &R : Header->phis()) { 10612 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) { 10613 if (auto *Resume = MainILV.getReductionResumeValue( 10614 ReductionPhi->getRecurrenceDescriptor())) { 10615 VPValue *StartVal = new VPValue(Resume); 10616 BestEpiPlan.addExternalDef(StartVal); 10617 ReductionPhi->setOperand(0, StartVal); 10618 } 10619 } 10620 } 10621 10622 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10623 DT); 10624 ++LoopsEpilogueVectorized; 10625 10626 if (!MainILV.areSafetyChecksAdded()) 10627 DisableRuntimeUnroll = true; 10628 } else { 10629 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10630 &LVL, &CM, BFI, PSI, Checks); 10631 10632 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10633 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT); 10634 ++LoopsVectorized; 10635 10636 // Add metadata to disable runtime unrolling a scalar loop when there 10637 // are no runtime checks about strides and memory. A scalar loop that is 10638 // rarely used is not worth unrolling. 10639 if (!LB.areSafetyChecksAdded()) 10640 DisableRuntimeUnroll = true; 10641 } 10642 // Report the vectorization decision. 10643 ORE->emit([&]() { 10644 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10645 L->getHeader()) 10646 << "vectorized loop (vectorization width: " 10647 << NV("VectorizationFactor", VF.Width) 10648 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10649 }); 10650 } 10651 10652 if (ORE->allowExtraAnalysis(LV_NAME)) 10653 checkMixedPrecision(L, ORE); 10654 } 10655 10656 Optional<MDNode *> RemainderLoopID = 10657 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10658 LLVMLoopVectorizeFollowupEpilogue}); 10659 if (RemainderLoopID.hasValue()) { 10660 L->setLoopID(RemainderLoopID.getValue()); 10661 } else { 10662 if (DisableRuntimeUnroll) 10663 AddRuntimeUnrollDisableMetaData(L); 10664 10665 // Mark the loop as already vectorized to avoid vectorizing again. 10666 Hints.setAlreadyVectorized(); 10667 } 10668 10669 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10670 return true; 10671 } 10672 10673 LoopVectorizeResult LoopVectorizePass::runImpl( 10674 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10675 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10676 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10677 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10678 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10679 SE = &SE_; 10680 LI = &LI_; 10681 TTI = &TTI_; 10682 DT = &DT_; 10683 BFI = &BFI_; 10684 TLI = TLI_; 10685 AA = &AA_; 10686 AC = &AC_; 10687 GetLAA = &GetLAA_; 10688 DB = &DB_; 10689 ORE = &ORE_; 10690 PSI = PSI_; 10691 10692 // Don't attempt if 10693 // 1. the target claims to have no vector registers, and 10694 // 2. interleaving won't help ILP. 10695 // 10696 // The second condition is necessary because, even if the target has no 10697 // vector registers, loop vectorization may still enable scalar 10698 // interleaving. 10699 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10700 TTI->getMaxInterleaveFactor(1) < 2) 10701 return LoopVectorizeResult(false, false); 10702 10703 bool Changed = false, CFGChanged = false; 10704 10705 // The vectorizer requires loops to be in simplified form. 10706 // Since simplification may add new inner loops, it has to run before the 10707 // legality and profitability checks. This means running the loop vectorizer 10708 // will simplify all loops, regardless of whether anything end up being 10709 // vectorized. 10710 for (auto &L : *LI) 10711 Changed |= CFGChanged |= 10712 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10713 10714 // Build up a worklist of inner-loops to vectorize. This is necessary as 10715 // the act of vectorizing or partially unrolling a loop creates new loops 10716 // and can invalidate iterators across the loops. 10717 SmallVector<Loop *, 8> Worklist; 10718 10719 for (Loop *L : *LI) 10720 collectSupportedLoops(*L, LI, ORE, Worklist); 10721 10722 LoopsAnalyzed += Worklist.size(); 10723 10724 // Now walk the identified inner loops. 10725 while (!Worklist.empty()) { 10726 Loop *L = Worklist.pop_back_val(); 10727 10728 // For the inner loops we actually process, form LCSSA to simplify the 10729 // transform. 10730 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10731 10732 Changed |= CFGChanged |= processLoop(L); 10733 } 10734 10735 // Process each loop nest in the function. 10736 return LoopVectorizeResult(Changed, CFGChanged); 10737 } 10738 10739 PreservedAnalyses LoopVectorizePass::run(Function &F, 10740 FunctionAnalysisManager &AM) { 10741 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10742 auto &LI = AM.getResult<LoopAnalysis>(F); 10743 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10744 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10745 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10746 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10747 auto &AA = AM.getResult<AAManager>(F); 10748 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10749 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10750 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10751 10752 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10753 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10754 [&](Loop &L) -> const LoopAccessInfo & { 10755 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10756 TLI, TTI, nullptr, nullptr, nullptr}; 10757 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10758 }; 10759 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10760 ProfileSummaryInfo *PSI = 10761 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10762 LoopVectorizeResult Result = 10763 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10764 if (!Result.MadeAnyChange) 10765 return PreservedAnalyses::all(); 10766 PreservedAnalyses PA; 10767 10768 // We currently do not preserve loopinfo/dominator analyses with outer loop 10769 // vectorization. Until this is addressed, mark these analyses as preserved 10770 // only for non-VPlan-native path. 10771 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10772 if (!EnableVPlanNativePath) { 10773 PA.preserve<LoopAnalysis>(); 10774 PA.preserve<DominatorTreeAnalysis>(); 10775 } 10776 10777 if (Result.MadeCFGChange) { 10778 // Making CFG changes likely means a loop got vectorized. Indicate that 10779 // extra simplification passes should be run. 10780 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only 10781 // be run if runtime checks have been added. 10782 AM.getResult<ShouldRunExtraVectorPasses>(F); 10783 PA.preserve<ShouldRunExtraVectorPasses>(); 10784 } else { 10785 PA.preserveSet<CFGAnalyses>(); 10786 } 10787 return PA; 10788 } 10789 10790 void LoopVectorizePass::printPipeline( 10791 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10792 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10793 OS, MapClassName2PassName); 10794 10795 OS << "<"; 10796 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10797 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10798 OS << ">"; 10799 } 10800