1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanTransforms.h" 62 #include "llvm/ADT/APInt.h" 63 #include "llvm/ADT/ArrayRef.h" 64 #include "llvm/ADT/DenseMap.h" 65 #include "llvm/ADT/DenseMapInfo.h" 66 #include "llvm/ADT/Hashing.h" 67 #include "llvm/ADT/MapVector.h" 68 #include "llvm/ADT/None.h" 69 #include "llvm/ADT/Optional.h" 70 #include "llvm/ADT/STLExtras.h" 71 #include "llvm/ADT/SmallPtrSet.h" 72 #include "llvm/ADT/SmallSet.h" 73 #include "llvm/ADT/SmallVector.h" 74 #include "llvm/ADT/Statistic.h" 75 #include "llvm/ADT/StringRef.h" 76 #include "llvm/ADT/Twine.h" 77 #include "llvm/ADT/iterator_range.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/BasicAliasAnalysis.h" 80 #include "llvm/Analysis/BlockFrequencyInfo.h" 81 #include "llvm/Analysis/CFG.h" 82 #include "llvm/Analysis/CodeMetrics.h" 83 #include "llvm/Analysis/DemandedBits.h" 84 #include "llvm/Analysis/GlobalsModRef.h" 85 #include "llvm/Analysis/LoopAccessAnalysis.h" 86 #include "llvm/Analysis/LoopAnalysisManager.h" 87 #include "llvm/Analysis/LoopInfo.h" 88 #include "llvm/Analysis/LoopIterator.h" 89 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 90 #include "llvm/Analysis/ProfileSummaryInfo.h" 91 #include "llvm/Analysis/ScalarEvolution.h" 92 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 93 #include "llvm/Analysis/TargetLibraryInfo.h" 94 #include "llvm/Analysis/TargetTransformInfo.h" 95 #include "llvm/Analysis/VectorUtils.h" 96 #include "llvm/IR/Attributes.h" 97 #include "llvm/IR/BasicBlock.h" 98 #include "llvm/IR/CFG.h" 99 #include "llvm/IR/Constant.h" 100 #include "llvm/IR/Constants.h" 101 #include "llvm/IR/DataLayout.h" 102 #include "llvm/IR/DebugInfoMetadata.h" 103 #include "llvm/IR/DebugLoc.h" 104 #include "llvm/IR/DerivedTypes.h" 105 #include "llvm/IR/DiagnosticInfo.h" 106 #include "llvm/IR/Dominators.h" 107 #include "llvm/IR/Function.h" 108 #include "llvm/IR/IRBuilder.h" 109 #include "llvm/IR/InstrTypes.h" 110 #include "llvm/IR/Instruction.h" 111 #include "llvm/IR/Instructions.h" 112 #include "llvm/IR/IntrinsicInst.h" 113 #include "llvm/IR/Intrinsics.h" 114 #include "llvm/IR/Metadata.h" 115 #include "llvm/IR/Module.h" 116 #include "llvm/IR/Operator.h" 117 #include "llvm/IR/PatternMatch.h" 118 #include "llvm/IR/Type.h" 119 #include "llvm/IR/Use.h" 120 #include "llvm/IR/User.h" 121 #include "llvm/IR/Value.h" 122 #include "llvm/IR/ValueHandle.h" 123 #include "llvm/IR/Verifier.h" 124 #include "llvm/InitializePasses.h" 125 #include "llvm/Pass.h" 126 #include "llvm/Support/Casting.h" 127 #include "llvm/Support/CommandLine.h" 128 #include "llvm/Support/Compiler.h" 129 #include "llvm/Support/Debug.h" 130 #include "llvm/Support/ErrorHandling.h" 131 #include "llvm/Support/InstructionCost.h" 132 #include "llvm/Support/MathExtras.h" 133 #include "llvm/Support/raw_ostream.h" 134 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 135 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 136 #include "llvm/Transforms/Utils/LoopSimplify.h" 137 #include "llvm/Transforms/Utils/LoopUtils.h" 138 #include "llvm/Transforms/Utils/LoopVersioning.h" 139 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 140 #include "llvm/Transforms/Utils/SizeOpts.h" 141 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 142 #include <algorithm> 143 #include <cassert> 144 #include <cstdint> 145 #include <functional> 146 #include <iterator> 147 #include <limits> 148 #include <map> 149 #include <memory> 150 #include <string> 151 #include <tuple> 152 #include <utility> 153 154 using namespace llvm; 155 156 #define LV_NAME "loop-vectorize" 157 #define DEBUG_TYPE LV_NAME 158 159 #ifndef NDEBUG 160 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 161 #endif 162 163 /// @{ 164 /// Metadata attribute names 165 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 166 const char LLVMLoopVectorizeFollowupVectorized[] = 167 "llvm.loop.vectorize.followup_vectorized"; 168 const char LLVMLoopVectorizeFollowupEpilogue[] = 169 "llvm.loop.vectorize.followup_epilogue"; 170 /// @} 171 172 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 173 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 174 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 175 176 static cl::opt<bool> EnableEpilogueVectorization( 177 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 178 cl::desc("Enable vectorization of epilogue loops.")); 179 180 static cl::opt<unsigned> EpilogueVectorizationForceVF( 181 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 182 cl::desc("When epilogue vectorization is enabled, and a value greater than " 183 "1 is specified, forces the given VF for all applicable epilogue " 184 "loops.")); 185 186 static cl::opt<unsigned> EpilogueVectorizationMinVF( 187 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 188 cl::desc("Only loops with vectorization factor equal to or larger than " 189 "the specified value are considered for epilogue vectorization.")); 190 191 /// Loops with a known constant trip count below this number are vectorized only 192 /// if no scalar iteration overheads are incurred. 193 static cl::opt<unsigned> TinyTripCountVectorThreshold( 194 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 195 cl::desc("Loops with a constant trip count that is smaller than this " 196 "value are vectorized only if no scalar iteration overheads " 197 "are incurred.")); 198 199 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 200 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 201 cl::desc("The maximum allowed number of runtime memory checks with a " 202 "vectorize(enable) pragma.")); 203 204 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 205 // that predication is preferred, and this lists all options. I.e., the 206 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 207 // and predicate the instructions accordingly. If tail-folding fails, there are 208 // different fallback strategies depending on these values: 209 namespace PreferPredicateTy { 210 enum Option { 211 ScalarEpilogue = 0, 212 PredicateElseScalarEpilogue, 213 PredicateOrDontVectorize 214 }; 215 } // namespace PreferPredicateTy 216 217 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 218 "prefer-predicate-over-epilogue", 219 cl::init(PreferPredicateTy::ScalarEpilogue), 220 cl::Hidden, 221 cl::desc("Tail-folding and predication preferences over creating a scalar " 222 "epilogue loop."), 223 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 224 "scalar-epilogue", 225 "Don't tail-predicate loops, create scalar epilogue"), 226 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 227 "predicate-else-scalar-epilogue", 228 "prefer tail-folding, create scalar epilogue if tail " 229 "folding fails."), 230 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 231 "predicate-dont-vectorize", 232 "prefers tail-folding, don't attempt vectorization if " 233 "tail-folding fails."))); 234 235 static cl::opt<bool> MaximizeBandwidth( 236 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 237 cl::desc("Maximize bandwidth when selecting vectorization factor which " 238 "will be determined by the smallest type in loop.")); 239 240 static cl::opt<bool> EnableInterleavedMemAccesses( 241 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 242 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 243 244 /// An interleave-group may need masking if it resides in a block that needs 245 /// predication, or in order to mask away gaps. 246 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 247 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 248 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 249 250 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 251 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 252 cl::desc("We don't interleave loops with a estimated constant trip count " 253 "below this number")); 254 255 static cl::opt<unsigned> ForceTargetNumScalarRegs( 256 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 257 cl::desc("A flag that overrides the target's number of scalar registers.")); 258 259 static cl::opt<unsigned> ForceTargetNumVectorRegs( 260 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 261 cl::desc("A flag that overrides the target's number of vector registers.")); 262 263 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 264 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 265 cl::desc("A flag that overrides the target's max interleave factor for " 266 "scalar loops.")); 267 268 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 269 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 270 cl::desc("A flag that overrides the target's max interleave factor for " 271 "vectorized loops.")); 272 273 static cl::opt<unsigned> ForceTargetInstructionCost( 274 "force-target-instruction-cost", cl::init(0), cl::Hidden, 275 cl::desc("A flag that overrides the target's expected cost for " 276 "an instruction to a single constant value. Mostly " 277 "useful for getting consistent testing.")); 278 279 static cl::opt<bool> ForceTargetSupportsScalableVectors( 280 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 281 cl::desc( 282 "Pretend that scalable vectors are supported, even if the target does " 283 "not support them. This flag should only be used for testing.")); 284 285 static cl::opt<unsigned> SmallLoopCost( 286 "small-loop-cost", cl::init(20), cl::Hidden, 287 cl::desc( 288 "The cost of a loop that is considered 'small' by the interleaver.")); 289 290 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 291 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 292 cl::desc("Enable the use of the block frequency analysis to access PGO " 293 "heuristics minimizing code growth in cold regions and being more " 294 "aggressive in hot regions.")); 295 296 // Runtime interleave loops for load/store throughput. 297 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 298 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 299 cl::desc( 300 "Enable runtime interleaving until load/store ports are saturated")); 301 302 /// Interleave small loops with scalar reductions. 303 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 304 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 305 cl::desc("Enable interleaving for loops with small iteration counts that " 306 "contain scalar reductions to expose ILP.")); 307 308 /// The number of stores in a loop that are allowed to need predication. 309 static cl::opt<unsigned> NumberOfStoresToPredicate( 310 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 311 cl::desc("Max number of stores to be predicated behind an if.")); 312 313 static cl::opt<bool> EnableIndVarRegisterHeur( 314 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 315 cl::desc("Count the induction variable only once when interleaving")); 316 317 static cl::opt<bool> EnableCondStoresVectorization( 318 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 319 cl::desc("Enable if predication of stores during vectorization.")); 320 321 static cl::opt<unsigned> MaxNestedScalarReductionIC( 322 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 323 cl::desc("The maximum interleave count to use when interleaving a scalar " 324 "reduction in a nested loop.")); 325 326 static cl::opt<bool> 327 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 328 cl::Hidden, 329 cl::desc("Prefer in-loop vector reductions, " 330 "overriding the targets preference.")); 331 332 static cl::opt<bool> ForceOrderedReductions( 333 "force-ordered-reductions", cl::init(false), cl::Hidden, 334 cl::desc("Enable the vectorisation of loops with in-order (strict) " 335 "FP reductions")); 336 337 static cl::opt<bool> PreferPredicatedReductionSelect( 338 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 339 cl::desc( 340 "Prefer predicating a reduction operation over an after loop select.")); 341 342 cl::opt<bool> EnableVPlanNativePath( 343 "enable-vplan-native-path", cl::init(false), cl::Hidden, 344 cl::desc("Enable VPlan-native vectorization path with " 345 "support for outer loop vectorization.")); 346 347 // This flag enables the stress testing of the VPlan H-CFG construction in the 348 // VPlan-native vectorization path. It must be used in conjuction with 349 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 350 // verification of the H-CFGs built. 351 static cl::opt<bool> VPlanBuildStressTest( 352 "vplan-build-stress-test", cl::init(false), cl::Hidden, 353 cl::desc( 354 "Build VPlan for every supported loop nest in the function and bail " 355 "out right after the build (stress test the VPlan H-CFG construction " 356 "in the VPlan-native vectorization path).")); 357 358 cl::opt<bool> llvm::EnableLoopInterleaving( 359 "interleave-loops", cl::init(true), cl::Hidden, 360 cl::desc("Enable loop interleaving in Loop vectorization passes")); 361 cl::opt<bool> llvm::EnableLoopVectorization( 362 "vectorize-loops", cl::init(true), cl::Hidden, 363 cl::desc("Run the Loop vectorization passes")); 364 365 cl::opt<bool> PrintVPlansInDotFormat( 366 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 367 cl::desc("Use dot format instead of plain text when dumping VPlans")); 368 369 /// A helper function that returns true if the given type is irregular. The 370 /// type is irregular if its allocated size doesn't equal the store size of an 371 /// element of the corresponding vector type. 372 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 373 // Determine if an array of N elements of type Ty is "bitcast compatible" 374 // with a <N x Ty> vector. 375 // This is only true if there is no padding between the array elements. 376 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 377 } 378 379 /// A helper function that returns the reciprocal of the block probability of 380 /// predicated blocks. If we return X, we are assuming the predicated block 381 /// will execute once for every X iterations of the loop header. 382 /// 383 /// TODO: We should use actual block probability here, if available. Currently, 384 /// we always assume predicated blocks have a 50% chance of executing. 385 static unsigned getReciprocalPredBlockProb() { return 2; } 386 387 /// A helper function that returns an integer or floating-point constant with 388 /// value C. 389 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 390 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 391 : ConstantFP::get(Ty, C); 392 } 393 394 /// Returns "best known" trip count for the specified loop \p L as defined by 395 /// the following procedure: 396 /// 1) Returns exact trip count if it is known. 397 /// 2) Returns expected trip count according to profile data if any. 398 /// 3) Returns upper bound estimate if it is known. 399 /// 4) Returns None if all of the above failed. 400 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 401 // Check if exact trip count is known. 402 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 403 return ExpectedTC; 404 405 // Check if there is an expected trip count available from profile data. 406 if (LoopVectorizeWithBlockFrequency) 407 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 408 return EstimatedTC; 409 410 // Check if upper bound estimate is known. 411 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 412 return ExpectedTC; 413 414 return None; 415 } 416 417 // Forward declare GeneratedRTChecks. 418 class GeneratedRTChecks; 419 420 namespace llvm { 421 422 AnalysisKey ShouldRunExtraVectorPasses::Key; 423 424 /// InnerLoopVectorizer vectorizes loops which contain only one basic 425 /// block to a specified vectorization factor (VF). 426 /// This class performs the widening of scalars into vectors, or multiple 427 /// scalars. This class also implements the following features: 428 /// * It inserts an epilogue loop for handling loops that don't have iteration 429 /// counts that are known to be a multiple of the vectorization factor. 430 /// * It handles the code generation for reduction variables. 431 /// * Scalarization (implementation using scalars) of un-vectorizable 432 /// instructions. 433 /// InnerLoopVectorizer does not perform any vectorization-legality 434 /// checks, and relies on the caller to check for the different legality 435 /// aspects. The InnerLoopVectorizer relies on the 436 /// LoopVectorizationLegality class to provide information about the induction 437 /// and reduction variables that were found to a given vectorization factor. 438 class InnerLoopVectorizer { 439 public: 440 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 441 LoopInfo *LI, DominatorTree *DT, 442 const TargetLibraryInfo *TLI, 443 const TargetTransformInfo *TTI, AssumptionCache *AC, 444 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 445 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 446 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 447 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 448 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 449 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 450 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 451 PSI(PSI), RTChecks(RTChecks) { 452 // Query this against the original loop and save it here because the profile 453 // of the original loop header may change as the transformation happens. 454 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 455 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 456 } 457 458 virtual ~InnerLoopVectorizer() = default; 459 460 /// Create a new empty loop that will contain vectorized instructions later 461 /// on, while the old loop will be used as the scalar remainder. Control flow 462 /// is generated around the vectorized (and scalar epilogue) loops consisting 463 /// of various checks and bypasses. Return the pre-header block of the new 464 /// loop and the start value for the canonical induction, if it is != 0. The 465 /// latter is the case when vectorizing the epilogue loop. In the case of 466 /// epilogue vectorization, this function is overriden to handle the more 467 /// complex control flow around the loops. 468 virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton(); 469 470 /// Widen a single call instruction within the innermost loop. 471 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 472 VPTransformState &State); 473 474 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 475 void fixVectorizedLoop(VPTransformState &State, VPlan &Plan); 476 477 // Return true if any runtime check is added. 478 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 479 480 /// A type for vectorized values in the new loop. Each value from the 481 /// original loop, when vectorized, is represented by UF vector values in the 482 /// new unrolled loop, where UF is the unroll factor. 483 using VectorParts = SmallVector<Value *, 2>; 484 485 /// Vectorize a single vector PHINode in a block in the VPlan-native path 486 /// only. 487 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 488 VPTransformState &State); 489 490 /// A helper function to scalarize a single Instruction in the innermost loop. 491 /// Generates a sequence of scalar instances for each lane between \p MinLane 492 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 493 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p 494 /// Instr's operands. 495 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe, 496 const VPIteration &Instance, bool IfPredicateInstr, 497 VPTransformState &State); 498 499 /// Construct the vector value of a scalarized value \p V one lane at a time. 500 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 501 VPTransformState &State); 502 503 /// Try to vectorize interleaved access group \p Group with the base address 504 /// given in \p Addr, optionally masking the vector operations if \p 505 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 506 /// values in the vectorized loop. 507 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 508 ArrayRef<VPValue *> VPDefs, 509 VPTransformState &State, VPValue *Addr, 510 ArrayRef<VPValue *> StoredValues, 511 VPValue *BlockInMask = nullptr); 512 513 /// Set the debug location in the builder \p Ptr using the debug location in 514 /// \p V. If \p Ptr is None then it uses the class member's Builder. 515 void setDebugLocFromInst(const Value *V, 516 Optional<IRBuilderBase *> CustomBuilder = None); 517 518 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 519 void fixNonInductionPHIs(VPTransformState &State); 520 521 /// Returns true if the reordering of FP operations is not allowed, but we are 522 /// able to vectorize with strict in-order reductions for the given RdxDesc. 523 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc); 524 525 /// Create a broadcast instruction. This method generates a broadcast 526 /// instruction (shuffle) for loop invariant values and for the induction 527 /// value. If this is the induction variable then we extend it to N, N+1, ... 528 /// this is needed because each iteration in the loop corresponds to a SIMD 529 /// element. 530 virtual Value *getBroadcastInstrs(Value *V); 531 532 /// Add metadata from one instruction to another. 533 /// 534 /// This includes both the original MDs from \p From and additional ones (\see 535 /// addNewMetadata). Use this for *newly created* instructions in the vector 536 /// loop. 537 void addMetadata(Instruction *To, Instruction *From); 538 539 /// Similar to the previous function but it adds the metadata to a 540 /// vector of instructions. 541 void addMetadata(ArrayRef<Value *> To, Instruction *From); 542 543 // Returns the resume value (bc.merge.rdx) for a reduction as 544 // generated by fixReduction. 545 PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc); 546 547 protected: 548 friend class LoopVectorizationPlanner; 549 550 /// A small list of PHINodes. 551 using PhiVector = SmallVector<PHINode *, 4>; 552 553 /// A type for scalarized values in the new loop. Each value from the 554 /// original loop, when scalarized, is represented by UF x VF scalar values 555 /// in the new unrolled loop, where UF is the unroll factor and VF is the 556 /// vectorization factor. 557 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 558 559 /// Set up the values of the IVs correctly when exiting the vector loop. 560 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 561 Value *VectorTripCount, Value *EndValue, 562 BasicBlock *MiddleBlock, BasicBlock *VectorHeader, 563 VPlan &Plan); 564 565 /// Handle all cross-iteration phis in the header. 566 void fixCrossIterationPHIs(VPTransformState &State); 567 568 /// Create the exit value of first order recurrences in the middle block and 569 /// update their users. 570 void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR, 571 VPTransformState &State); 572 573 /// Create code for the loop exit value of the reduction. 574 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 575 576 /// Clear NSW/NUW flags from reduction instructions if necessary. 577 void clearReductionWrapFlags(VPReductionPHIRecipe *PhiR, 578 VPTransformState &State); 579 580 /// Iteratively sink the scalarized operands of a predicated instruction into 581 /// the block that was created for it. 582 void sinkScalarOperands(Instruction *PredInst); 583 584 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 585 /// represented as. 586 void truncateToMinimalBitwidths(VPTransformState &State); 587 588 /// Returns (and creates if needed) the original loop trip count. 589 Value *getOrCreateTripCount(BasicBlock *InsertBlock); 590 591 /// Returns (and creates if needed) the trip count of the widened loop. 592 Value *getOrCreateVectorTripCount(BasicBlock *InsertBlock); 593 594 /// Returns a bitcasted value to the requested vector type. 595 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 596 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 597 const DataLayout &DL); 598 599 /// Emit a bypass check to see if the vector trip count is zero, including if 600 /// it overflows. 601 void emitIterationCountCheck(BasicBlock *Bypass); 602 603 /// Emit a bypass check to see if all of the SCEV assumptions we've 604 /// had to make are correct. Returns the block containing the checks or 605 /// nullptr if no checks have been added. 606 BasicBlock *emitSCEVChecks(BasicBlock *Bypass); 607 608 /// Emit bypass checks to check any memory assumptions we may have made. 609 /// Returns the block containing the checks or nullptr if no checks have been 610 /// added. 611 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass); 612 613 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 614 /// vector loop preheader, middle block and scalar preheader. 615 void createVectorLoopSkeleton(StringRef Prefix); 616 617 /// Create new phi nodes for the induction variables to resume iteration count 618 /// in the scalar epilogue, from where the vectorized loop left off. 619 /// In cases where the loop skeleton is more complicated (eg. epilogue 620 /// vectorization) and the resume values can come from an additional bypass 621 /// block, the \p AdditionalBypass pair provides information about the bypass 622 /// block and the end value on the edge from bypass to this loop. 623 void createInductionResumeValues( 624 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 625 626 /// Complete the loop skeleton by adding debug MDs, creating appropriate 627 /// conditional branches in the middle block, preparing the builder and 628 /// running the verifier. Return the preheader of the completed vector loop. 629 BasicBlock *completeLoopSkeleton(MDNode *OrigLoopID); 630 631 /// Add additional metadata to \p To that was not present on \p Orig. 632 /// 633 /// Currently this is used to add the noalias annotations based on the 634 /// inserted memchecks. Use this for instructions that are *cloned* into the 635 /// vector loop. 636 void addNewMetadata(Instruction *To, const Instruction *Orig); 637 638 /// Collect poison-generating recipes that may generate a poison value that is 639 /// used after vectorization, even when their operands are not poison. Those 640 /// recipes meet the following conditions: 641 /// * Contribute to the address computation of a recipe generating a widen 642 /// memory load/store (VPWidenMemoryInstructionRecipe or 643 /// VPInterleaveRecipe). 644 /// * Such a widen memory load/store has at least one underlying Instruction 645 /// that is in a basic block that needs predication and after vectorization 646 /// the generated instruction won't be predicated. 647 void collectPoisonGeneratingRecipes(VPTransformState &State); 648 649 /// Allow subclasses to override and print debug traces before/after vplan 650 /// execution, when trace information is requested. 651 virtual void printDebugTracesAtStart(){}; 652 virtual void printDebugTracesAtEnd(){}; 653 654 /// The original loop. 655 Loop *OrigLoop; 656 657 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 658 /// dynamic knowledge to simplify SCEV expressions and converts them to a 659 /// more usable form. 660 PredicatedScalarEvolution &PSE; 661 662 /// Loop Info. 663 LoopInfo *LI; 664 665 /// Dominator Tree. 666 DominatorTree *DT; 667 668 /// Alias Analysis. 669 AAResults *AA; 670 671 /// Target Library Info. 672 const TargetLibraryInfo *TLI; 673 674 /// Target Transform Info. 675 const TargetTransformInfo *TTI; 676 677 /// Assumption Cache. 678 AssumptionCache *AC; 679 680 /// Interface to emit optimization remarks. 681 OptimizationRemarkEmitter *ORE; 682 683 /// LoopVersioning. It's only set up (non-null) if memchecks were 684 /// used. 685 /// 686 /// This is currently only used to add no-alias metadata based on the 687 /// memchecks. The actually versioning is performed manually. 688 std::unique_ptr<LoopVersioning> LVer; 689 690 /// The vectorization SIMD factor to use. Each vector will have this many 691 /// vector elements. 692 ElementCount VF; 693 694 /// The vectorization unroll factor to use. Each scalar is vectorized to this 695 /// many different vector instructions. 696 unsigned UF; 697 698 /// The builder that we use 699 IRBuilder<> Builder; 700 701 // --- Vectorization state --- 702 703 /// The vector-loop preheader. 704 BasicBlock *LoopVectorPreHeader; 705 706 /// The scalar-loop preheader. 707 BasicBlock *LoopScalarPreHeader; 708 709 /// Middle Block between the vector and the scalar. 710 BasicBlock *LoopMiddleBlock; 711 712 /// The unique ExitBlock of the scalar loop if one exists. Note that 713 /// there can be multiple exiting edges reaching this block. 714 BasicBlock *LoopExitBlock; 715 716 /// The scalar loop body. 717 BasicBlock *LoopScalarBody; 718 719 /// A list of all bypass blocks. The first block is the entry of the loop. 720 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 721 722 /// Store instructions that were predicated. 723 SmallVector<Instruction *, 4> PredicatedInstructions; 724 725 /// Trip count of the original loop. 726 Value *TripCount = nullptr; 727 728 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 729 Value *VectorTripCount = nullptr; 730 731 /// The legality analysis. 732 LoopVectorizationLegality *Legal; 733 734 /// The profitablity analysis. 735 LoopVectorizationCostModel *Cost; 736 737 // Record whether runtime checks are added. 738 bool AddedSafetyChecks = false; 739 740 // Holds the end values for each induction variable. We save the end values 741 // so we can later fix-up the external users of the induction variables. 742 DenseMap<PHINode *, Value *> IVEndValues; 743 744 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 745 // fixed up at the end of vector code generation. 746 SmallVector<PHINode *, 8> OrigPHIsToFix; 747 748 /// BFI and PSI are used to check for profile guided size optimizations. 749 BlockFrequencyInfo *BFI; 750 ProfileSummaryInfo *PSI; 751 752 // Whether this loop should be optimized for size based on profile guided size 753 // optimizatios. 754 bool OptForSizeBasedOnProfile; 755 756 /// Structure to hold information about generated runtime checks, responsible 757 /// for cleaning the checks, if vectorization turns out unprofitable. 758 GeneratedRTChecks &RTChecks; 759 760 // Holds the resume values for reductions in the loops, used to set the 761 // correct start value of reduction PHIs when vectorizing the epilogue. 762 SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4> 763 ReductionResumeValues; 764 }; 765 766 class InnerLoopUnroller : public InnerLoopVectorizer { 767 public: 768 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 769 LoopInfo *LI, DominatorTree *DT, 770 const TargetLibraryInfo *TLI, 771 const TargetTransformInfo *TTI, AssumptionCache *AC, 772 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 773 LoopVectorizationLegality *LVL, 774 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 775 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 776 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 777 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 778 BFI, PSI, Check) {} 779 780 private: 781 Value *getBroadcastInstrs(Value *V) override; 782 }; 783 784 /// Encapsulate information regarding vectorization of a loop and its epilogue. 785 /// This information is meant to be updated and used across two stages of 786 /// epilogue vectorization. 787 struct EpilogueLoopVectorizationInfo { 788 ElementCount MainLoopVF = ElementCount::getFixed(0); 789 unsigned MainLoopUF = 0; 790 ElementCount EpilogueVF = ElementCount::getFixed(0); 791 unsigned EpilogueUF = 0; 792 BasicBlock *MainLoopIterationCountCheck = nullptr; 793 BasicBlock *EpilogueIterationCountCheck = nullptr; 794 BasicBlock *SCEVSafetyCheck = nullptr; 795 BasicBlock *MemSafetyCheck = nullptr; 796 Value *TripCount = nullptr; 797 Value *VectorTripCount = nullptr; 798 799 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 800 ElementCount EVF, unsigned EUF) 801 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 802 assert(EUF == 1 && 803 "A high UF for the epilogue loop is likely not beneficial."); 804 } 805 }; 806 807 /// An extension of the inner loop vectorizer that creates a skeleton for a 808 /// vectorized loop that has its epilogue (residual) also vectorized. 809 /// The idea is to run the vplan on a given loop twice, firstly to setup the 810 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 811 /// from the first step and vectorize the epilogue. This is achieved by 812 /// deriving two concrete strategy classes from this base class and invoking 813 /// them in succession from the loop vectorizer planner. 814 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 815 public: 816 InnerLoopAndEpilogueVectorizer( 817 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 818 DominatorTree *DT, const TargetLibraryInfo *TLI, 819 const TargetTransformInfo *TTI, AssumptionCache *AC, 820 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 821 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 822 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 823 GeneratedRTChecks &Checks) 824 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 825 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 826 Checks), 827 EPI(EPI) {} 828 829 // Override this function to handle the more complex control flow around the 830 // three loops. 831 std::pair<BasicBlock *, Value *> 832 createVectorizedLoopSkeleton() final override { 833 return createEpilogueVectorizedLoopSkeleton(); 834 } 835 836 /// The interface for creating a vectorized skeleton using one of two 837 /// different strategies, each corresponding to one execution of the vplan 838 /// as described above. 839 virtual std::pair<BasicBlock *, Value *> 840 createEpilogueVectorizedLoopSkeleton() = 0; 841 842 /// Holds and updates state information required to vectorize the main loop 843 /// and its epilogue in two separate passes. This setup helps us avoid 844 /// regenerating and recomputing runtime safety checks. It also helps us to 845 /// shorten the iteration-count-check path length for the cases where the 846 /// iteration count of the loop is so small that the main vector loop is 847 /// completely skipped. 848 EpilogueLoopVectorizationInfo &EPI; 849 }; 850 851 /// A specialized derived class of inner loop vectorizer that performs 852 /// vectorization of *main* loops in the process of vectorizing loops and their 853 /// epilogues. 854 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 855 public: 856 EpilogueVectorizerMainLoop( 857 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 858 DominatorTree *DT, const TargetLibraryInfo *TLI, 859 const TargetTransformInfo *TTI, AssumptionCache *AC, 860 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 861 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 862 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 863 GeneratedRTChecks &Check) 864 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 865 EPI, LVL, CM, BFI, PSI, Check) {} 866 /// Implements the interface for creating a vectorized skeleton using the 867 /// *main loop* strategy (ie the first pass of vplan execution). 868 std::pair<BasicBlock *, Value *> 869 createEpilogueVectorizedLoopSkeleton() final override; 870 871 protected: 872 /// Emits an iteration count bypass check once for the main loop (when \p 873 /// ForEpilogue is false) and once for the epilogue loop (when \p 874 /// ForEpilogue is true). 875 BasicBlock *emitIterationCountCheck(BasicBlock *Bypass, bool ForEpilogue); 876 void printDebugTracesAtStart() override; 877 void printDebugTracesAtEnd() override; 878 }; 879 880 // A specialized derived class of inner loop vectorizer that performs 881 // vectorization of *epilogue* loops in the process of vectorizing loops and 882 // their epilogues. 883 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 884 public: 885 EpilogueVectorizerEpilogueLoop( 886 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 887 DominatorTree *DT, const TargetLibraryInfo *TLI, 888 const TargetTransformInfo *TTI, AssumptionCache *AC, 889 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 890 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 891 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 892 GeneratedRTChecks &Checks) 893 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 894 EPI, LVL, CM, BFI, PSI, Checks) { 895 TripCount = EPI.TripCount; 896 } 897 /// Implements the interface for creating a vectorized skeleton using the 898 /// *epilogue loop* strategy (ie the second pass of vplan execution). 899 std::pair<BasicBlock *, Value *> 900 createEpilogueVectorizedLoopSkeleton() final override; 901 902 protected: 903 /// Emits an iteration count bypass check after the main vector loop has 904 /// finished to see if there are any iterations left to execute by either 905 /// the vector epilogue or the scalar epilogue. 906 BasicBlock *emitMinimumVectorEpilogueIterCountCheck( 907 BasicBlock *Bypass, 908 BasicBlock *Insert); 909 void printDebugTracesAtStart() override; 910 void printDebugTracesAtEnd() override; 911 }; 912 } // end namespace llvm 913 914 /// Look for a meaningful debug location on the instruction or it's 915 /// operands. 916 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 917 if (!I) 918 return I; 919 920 DebugLoc Empty; 921 if (I->getDebugLoc() != Empty) 922 return I; 923 924 for (Use &Op : I->operands()) { 925 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 926 if (OpInst->getDebugLoc() != Empty) 927 return OpInst; 928 } 929 930 return I; 931 } 932 933 void InnerLoopVectorizer::setDebugLocFromInst( 934 const Value *V, Optional<IRBuilderBase *> CustomBuilder) { 935 IRBuilderBase *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 936 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 937 const DILocation *DIL = Inst->getDebugLoc(); 938 939 // When a FSDiscriminator is enabled, we don't need to add the multiply 940 // factors to the discriminators. 941 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 942 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 943 // FIXME: For scalable vectors, assume vscale=1. 944 auto NewDIL = 945 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 946 if (NewDIL) 947 B->SetCurrentDebugLocation(NewDIL.getValue()); 948 else 949 LLVM_DEBUG(dbgs() 950 << "Failed to create new discriminator: " 951 << DIL->getFilename() << " Line: " << DIL->getLine()); 952 } else 953 B->SetCurrentDebugLocation(DIL); 954 } else 955 B->SetCurrentDebugLocation(DebugLoc()); 956 } 957 958 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 959 /// is passed, the message relates to that particular instruction. 960 #ifndef NDEBUG 961 static void debugVectorizationMessage(const StringRef Prefix, 962 const StringRef DebugMsg, 963 Instruction *I) { 964 dbgs() << "LV: " << Prefix << DebugMsg; 965 if (I != nullptr) 966 dbgs() << " " << *I; 967 else 968 dbgs() << '.'; 969 dbgs() << '\n'; 970 } 971 #endif 972 973 /// Create an analysis remark that explains why vectorization failed 974 /// 975 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 976 /// RemarkName is the identifier for the remark. If \p I is passed it is an 977 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 978 /// the location of the remark. \return the remark object that can be 979 /// streamed to. 980 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 981 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 982 Value *CodeRegion = TheLoop->getHeader(); 983 DebugLoc DL = TheLoop->getStartLoc(); 984 985 if (I) { 986 CodeRegion = I->getParent(); 987 // If there is no debug location attached to the instruction, revert back to 988 // using the loop's. 989 if (I->getDebugLoc()) 990 DL = I->getDebugLoc(); 991 } 992 993 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 994 } 995 996 namespace llvm { 997 998 /// Return a value for Step multiplied by VF. 999 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, 1000 int64_t Step) { 1001 assert(Ty->isIntegerTy() && "Expected an integer step"); 1002 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 1003 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1004 } 1005 1006 /// Return the runtime value for VF. 1007 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) { 1008 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1009 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1010 } 1011 1012 static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy, 1013 ElementCount VF) { 1014 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 1015 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 1016 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 1017 return B.CreateUIToFP(RuntimeVF, FTy); 1018 } 1019 1020 void reportVectorizationFailure(const StringRef DebugMsg, 1021 const StringRef OREMsg, const StringRef ORETag, 1022 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1023 Instruction *I) { 1024 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1025 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1026 ORE->emit( 1027 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1028 << "loop not vectorized: " << OREMsg); 1029 } 1030 1031 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1032 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1033 Instruction *I) { 1034 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1035 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1036 ORE->emit( 1037 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1038 << Msg); 1039 } 1040 1041 } // end namespace llvm 1042 1043 #ifndef NDEBUG 1044 /// \return string containing a file name and a line # for the given loop. 1045 static std::string getDebugLocString(const Loop *L) { 1046 std::string Result; 1047 if (L) { 1048 raw_string_ostream OS(Result); 1049 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1050 LoopDbgLoc.print(OS); 1051 else 1052 // Just print the module name. 1053 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1054 OS.flush(); 1055 } 1056 return Result; 1057 } 1058 #endif 1059 1060 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1061 const Instruction *Orig) { 1062 // If the loop was versioned with memchecks, add the corresponding no-alias 1063 // metadata. 1064 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1065 LVer->annotateInstWithNoAlias(To, Orig); 1066 } 1067 1068 void InnerLoopVectorizer::collectPoisonGeneratingRecipes( 1069 VPTransformState &State) { 1070 1071 // Collect recipes in the backward slice of `Root` that may generate a poison 1072 // value that is used after vectorization. 1073 SmallPtrSet<VPRecipeBase *, 16> Visited; 1074 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1075 SmallVector<VPRecipeBase *, 16> Worklist; 1076 Worklist.push_back(Root); 1077 1078 // Traverse the backward slice of Root through its use-def chain. 1079 while (!Worklist.empty()) { 1080 VPRecipeBase *CurRec = Worklist.back(); 1081 Worklist.pop_back(); 1082 1083 if (!Visited.insert(CurRec).second) 1084 continue; 1085 1086 // Prune search if we find another recipe generating a widen memory 1087 // instruction. Widen memory instructions involved in address computation 1088 // will lead to gather/scatter instructions, which don't need to be 1089 // handled. 1090 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) || 1091 isa<VPInterleaveRecipe>(CurRec) || 1092 isa<VPScalarIVStepsRecipe>(CurRec) || 1093 isa<VPCanonicalIVPHIRecipe>(CurRec)) 1094 continue; 1095 1096 // This recipe contributes to the address computation of a widen 1097 // load/store. Collect recipe if its underlying instruction has 1098 // poison-generating flags. 1099 Instruction *Instr = CurRec->getUnderlyingInstr(); 1100 if (Instr && Instr->hasPoisonGeneratingFlags()) 1101 State.MayGeneratePoisonRecipes.insert(CurRec); 1102 1103 // Add new definitions to the worklist. 1104 for (VPValue *operand : CurRec->operands()) 1105 if (VPDef *OpDef = operand->getDef()) 1106 Worklist.push_back(cast<VPRecipeBase>(OpDef)); 1107 } 1108 }); 1109 1110 // Traverse all the recipes in the VPlan and collect the poison-generating 1111 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1112 // VPInterleaveRecipe. 1113 auto Iter = depth_first( 1114 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry())); 1115 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1116 for (VPRecipeBase &Recipe : *VPBB) { 1117 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) { 1118 Instruction &UnderlyingInstr = WidenRec->getIngredient(); 1119 VPDef *AddrDef = WidenRec->getAddr()->getDef(); 1120 if (AddrDef && WidenRec->isConsecutive() && 1121 Legal->blockNeedsPredication(UnderlyingInstr.getParent())) 1122 collectPoisonGeneratingInstrsInBackwardSlice( 1123 cast<VPRecipeBase>(AddrDef)); 1124 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1125 VPDef *AddrDef = InterleaveRec->getAddr()->getDef(); 1126 if (AddrDef) { 1127 // Check if any member of the interleave group needs predication. 1128 const InterleaveGroup<Instruction> *InterGroup = 1129 InterleaveRec->getInterleaveGroup(); 1130 bool NeedPredication = false; 1131 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1132 I < NumMembers; ++I) { 1133 Instruction *Member = InterGroup->getMember(I); 1134 if (Member) 1135 NeedPredication |= 1136 Legal->blockNeedsPredication(Member->getParent()); 1137 } 1138 1139 if (NeedPredication) 1140 collectPoisonGeneratingInstrsInBackwardSlice( 1141 cast<VPRecipeBase>(AddrDef)); 1142 } 1143 } 1144 } 1145 } 1146 } 1147 1148 void InnerLoopVectorizer::addMetadata(Instruction *To, 1149 Instruction *From) { 1150 propagateMetadata(To, From); 1151 addNewMetadata(To, From); 1152 } 1153 1154 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1155 Instruction *From) { 1156 for (Value *V : To) { 1157 if (Instruction *I = dyn_cast<Instruction>(V)) 1158 addMetadata(I, From); 1159 } 1160 } 1161 1162 PHINode *InnerLoopVectorizer::getReductionResumeValue( 1163 const RecurrenceDescriptor &RdxDesc) { 1164 auto It = ReductionResumeValues.find(&RdxDesc); 1165 assert(It != ReductionResumeValues.end() && 1166 "Expected to find a resume value for the reduction."); 1167 return It->second; 1168 } 1169 1170 namespace llvm { 1171 1172 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1173 // lowered. 1174 enum ScalarEpilogueLowering { 1175 1176 // The default: allowing scalar epilogues. 1177 CM_ScalarEpilogueAllowed, 1178 1179 // Vectorization with OptForSize: don't allow epilogues. 1180 CM_ScalarEpilogueNotAllowedOptSize, 1181 1182 // A special case of vectorisation with OptForSize: loops with a very small 1183 // trip count are considered for vectorization under OptForSize, thereby 1184 // making sure the cost of their loop body is dominant, free of runtime 1185 // guards and scalar iteration overheads. 1186 CM_ScalarEpilogueNotAllowedLowTripLoop, 1187 1188 // Loop hint predicate indicating an epilogue is undesired. 1189 CM_ScalarEpilogueNotNeededUsePredicate, 1190 1191 // Directive indicating we must either tail fold or not vectorize 1192 CM_ScalarEpilogueNotAllowedUsePredicate 1193 }; 1194 1195 /// ElementCountComparator creates a total ordering for ElementCount 1196 /// for the purposes of using it in a set structure. 1197 struct ElementCountComparator { 1198 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1199 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1200 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1201 } 1202 }; 1203 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1204 1205 /// LoopVectorizationCostModel - estimates the expected speedups due to 1206 /// vectorization. 1207 /// In many cases vectorization is not profitable. This can happen because of 1208 /// a number of reasons. In this class we mainly attempt to predict the 1209 /// expected speedup/slowdowns due to the supported instruction set. We use the 1210 /// TargetTransformInfo to query the different backends for the cost of 1211 /// different operations. 1212 class LoopVectorizationCostModel { 1213 public: 1214 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1215 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1216 LoopVectorizationLegality *Legal, 1217 const TargetTransformInfo &TTI, 1218 const TargetLibraryInfo *TLI, DemandedBits *DB, 1219 AssumptionCache *AC, 1220 OptimizationRemarkEmitter *ORE, const Function *F, 1221 const LoopVectorizeHints *Hints, 1222 InterleavedAccessInfo &IAI) 1223 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1224 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1225 Hints(Hints), InterleaveInfo(IAI) {} 1226 1227 /// \return An upper bound for the vectorization factors (both fixed and 1228 /// scalable). If the factors are 0, vectorization and interleaving should be 1229 /// avoided up front. 1230 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1231 1232 /// \return True if runtime checks are required for vectorization, and false 1233 /// otherwise. 1234 bool runtimeChecksRequired(); 1235 1236 /// \return The most profitable vectorization factor and the cost of that VF. 1237 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1238 /// then this vectorization factor will be selected if vectorization is 1239 /// possible. 1240 VectorizationFactor 1241 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1242 1243 VectorizationFactor 1244 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1245 const LoopVectorizationPlanner &LVP); 1246 1247 /// Setup cost-based decisions for user vectorization factor. 1248 /// \return true if the UserVF is a feasible VF to be chosen. 1249 bool selectUserVectorizationFactor(ElementCount UserVF) { 1250 collectUniformsAndScalars(UserVF); 1251 collectInstsToScalarize(UserVF); 1252 return expectedCost(UserVF).first.isValid(); 1253 } 1254 1255 /// \return The size (in bits) of the smallest and widest types in the code 1256 /// that needs to be vectorized. We ignore values that remain scalar such as 1257 /// 64 bit loop indices. 1258 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1259 1260 /// \return The desired interleave count. 1261 /// If interleave count has been specified by metadata it will be returned. 1262 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1263 /// are the selected vectorization factor and the cost of the selected VF. 1264 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1265 1266 /// Memory access instruction may be vectorized in more than one way. 1267 /// Form of instruction after vectorization depends on cost. 1268 /// This function takes cost-based decisions for Load/Store instructions 1269 /// and collects them in a map. This decisions map is used for building 1270 /// the lists of loop-uniform and loop-scalar instructions. 1271 /// The calculated cost is saved with widening decision in order to 1272 /// avoid redundant calculations. 1273 void setCostBasedWideningDecision(ElementCount VF); 1274 1275 /// A struct that represents some properties of the register usage 1276 /// of a loop. 1277 struct RegisterUsage { 1278 /// Holds the number of loop invariant values that are used in the loop. 1279 /// The key is ClassID of target-provided register class. 1280 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1281 /// Holds the maximum number of concurrent live intervals in the loop. 1282 /// The key is ClassID of target-provided register class. 1283 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1284 }; 1285 1286 /// \return Returns information about the register usages of the loop for the 1287 /// given vectorization factors. 1288 SmallVector<RegisterUsage, 8> 1289 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1290 1291 /// Collect values we want to ignore in the cost model. 1292 void collectValuesToIgnore(); 1293 1294 /// Collect all element types in the loop for which widening is needed. 1295 void collectElementTypesForWidening(); 1296 1297 /// Split reductions into those that happen in the loop, and those that happen 1298 /// outside. In loop reductions are collected into InLoopReductionChains. 1299 void collectInLoopReductions(); 1300 1301 /// Returns true if we should use strict in-order reductions for the given 1302 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1303 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1304 /// of FP operations. 1305 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const { 1306 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1307 } 1308 1309 /// \returns The smallest bitwidth each instruction can be represented with. 1310 /// The vector equivalents of these instructions should be truncated to this 1311 /// type. 1312 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1313 return MinBWs; 1314 } 1315 1316 /// \returns True if it is more profitable to scalarize instruction \p I for 1317 /// vectorization factor \p VF. 1318 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1319 assert(VF.isVector() && 1320 "Profitable to scalarize relevant only for VF > 1."); 1321 1322 // Cost model is not run in the VPlan-native path - return conservative 1323 // result until this changes. 1324 if (EnableVPlanNativePath) 1325 return false; 1326 1327 auto Scalars = InstsToScalarize.find(VF); 1328 assert(Scalars != InstsToScalarize.end() && 1329 "VF not yet analyzed for scalarization profitability"); 1330 return Scalars->second.find(I) != Scalars->second.end(); 1331 } 1332 1333 /// Returns true if \p I is known to be uniform after vectorization. 1334 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1335 if (VF.isScalar()) 1336 return true; 1337 1338 // Cost model is not run in the VPlan-native path - return conservative 1339 // result until this changes. 1340 if (EnableVPlanNativePath) 1341 return false; 1342 1343 auto UniformsPerVF = Uniforms.find(VF); 1344 assert(UniformsPerVF != Uniforms.end() && 1345 "VF not yet analyzed for uniformity"); 1346 return UniformsPerVF->second.count(I); 1347 } 1348 1349 /// Returns true if \p I is known to be scalar after vectorization. 1350 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1351 if (VF.isScalar()) 1352 return true; 1353 1354 // Cost model is not run in the VPlan-native path - return conservative 1355 // result until this changes. 1356 if (EnableVPlanNativePath) 1357 return false; 1358 1359 auto ScalarsPerVF = Scalars.find(VF); 1360 assert(ScalarsPerVF != Scalars.end() && 1361 "Scalar values are not calculated for VF"); 1362 return ScalarsPerVF->second.count(I); 1363 } 1364 1365 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1366 /// for vectorization factor \p VF. 1367 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1368 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1369 !isProfitableToScalarize(I, VF) && 1370 !isScalarAfterVectorization(I, VF); 1371 } 1372 1373 /// Decision that was taken during cost calculation for memory instruction. 1374 enum InstWidening { 1375 CM_Unknown, 1376 CM_Widen, // For consecutive accesses with stride +1. 1377 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1378 CM_Interleave, 1379 CM_GatherScatter, 1380 CM_Scalarize 1381 }; 1382 1383 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1384 /// instruction \p I and vector width \p VF. 1385 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1386 InstructionCost Cost) { 1387 assert(VF.isVector() && "Expected VF >=2"); 1388 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1389 } 1390 1391 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1392 /// interleaving group \p Grp and vector width \p VF. 1393 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1394 ElementCount VF, InstWidening W, 1395 InstructionCost Cost) { 1396 assert(VF.isVector() && "Expected VF >=2"); 1397 /// Broadcast this decicion to all instructions inside the group. 1398 /// But the cost will be assigned to one instruction only. 1399 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1400 if (auto *I = Grp->getMember(i)) { 1401 if (Grp->getInsertPos() == I) 1402 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1403 else 1404 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1405 } 1406 } 1407 } 1408 1409 /// Return the cost model decision for the given instruction \p I and vector 1410 /// width \p VF. Return CM_Unknown if this instruction did not pass 1411 /// through the cost modeling. 1412 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1413 assert(VF.isVector() && "Expected VF to be a vector VF"); 1414 // Cost model is not run in the VPlan-native path - return conservative 1415 // result until this changes. 1416 if (EnableVPlanNativePath) 1417 return CM_GatherScatter; 1418 1419 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1420 auto Itr = WideningDecisions.find(InstOnVF); 1421 if (Itr == WideningDecisions.end()) 1422 return CM_Unknown; 1423 return Itr->second.first; 1424 } 1425 1426 /// Return the vectorization cost for the given instruction \p I and vector 1427 /// width \p VF. 1428 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1429 assert(VF.isVector() && "Expected VF >=2"); 1430 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1431 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1432 "The cost is not calculated"); 1433 return WideningDecisions[InstOnVF].second; 1434 } 1435 1436 /// Return True if instruction \p I is an optimizable truncate whose operand 1437 /// is an induction variable. Such a truncate will be removed by adding a new 1438 /// induction variable with the destination type. 1439 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1440 // If the instruction is not a truncate, return false. 1441 auto *Trunc = dyn_cast<TruncInst>(I); 1442 if (!Trunc) 1443 return false; 1444 1445 // Get the source and destination types of the truncate. 1446 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1447 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1448 1449 // If the truncate is free for the given types, return false. Replacing a 1450 // free truncate with an induction variable would add an induction variable 1451 // update instruction to each iteration of the loop. We exclude from this 1452 // check the primary induction variable since it will need an update 1453 // instruction regardless. 1454 Value *Op = Trunc->getOperand(0); 1455 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1456 return false; 1457 1458 // If the truncated value is not an induction variable, return false. 1459 return Legal->isInductionPhi(Op); 1460 } 1461 1462 /// Collects the instructions to scalarize for each predicated instruction in 1463 /// the loop. 1464 void collectInstsToScalarize(ElementCount VF); 1465 1466 /// Collect Uniform and Scalar values for the given \p VF. 1467 /// The sets depend on CM decision for Load/Store instructions 1468 /// that may be vectorized as interleave, gather-scatter or scalarized. 1469 void collectUniformsAndScalars(ElementCount VF) { 1470 // Do the analysis once. 1471 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1472 return; 1473 setCostBasedWideningDecision(VF); 1474 collectLoopUniforms(VF); 1475 collectLoopScalars(VF); 1476 } 1477 1478 /// Returns true if the target machine supports masked store operation 1479 /// for the given \p DataType and kind of access to \p Ptr. 1480 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1481 return Legal->isConsecutivePtr(DataType, Ptr) && 1482 TTI.isLegalMaskedStore(DataType, Alignment); 1483 } 1484 1485 /// Returns true if the target machine supports masked load operation 1486 /// for the given \p DataType and kind of access to \p Ptr. 1487 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1488 return Legal->isConsecutivePtr(DataType, Ptr) && 1489 TTI.isLegalMaskedLoad(DataType, Alignment); 1490 } 1491 1492 /// Returns true if the target machine can represent \p V as a masked gather 1493 /// or scatter operation. 1494 bool isLegalGatherOrScatter(Value *V, 1495 ElementCount VF = ElementCount::getFixed(1)) { 1496 bool LI = isa<LoadInst>(V); 1497 bool SI = isa<StoreInst>(V); 1498 if (!LI && !SI) 1499 return false; 1500 auto *Ty = getLoadStoreType(V); 1501 Align Align = getLoadStoreAlignment(V); 1502 if (VF.isVector()) 1503 Ty = VectorType::get(Ty, VF); 1504 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1505 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1506 } 1507 1508 /// Returns true if the target machine supports all of the reduction 1509 /// variables found for the given VF. 1510 bool canVectorizeReductions(ElementCount VF) const { 1511 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1512 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1513 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1514 })); 1515 } 1516 1517 /// Returns true if \p I is an instruction that will be scalarized with 1518 /// predication when vectorizing \p I with vectorization factor \p VF. Such 1519 /// instructions include conditional stores and instructions that may divide 1520 /// by zero. 1521 bool isScalarWithPredication(Instruction *I, ElementCount VF) const; 1522 1523 // Returns true if \p I is an instruction that will be predicated either 1524 // through scalar predication or masked load/store or masked gather/scatter. 1525 // \p VF is the vectorization factor that will be used to vectorize \p I. 1526 // Superset of instructions that return true for isScalarWithPredication. 1527 bool isPredicatedInst(Instruction *I, ElementCount VF, 1528 bool IsKnownUniform = false) { 1529 // When we know the load is uniform and the original scalar loop was not 1530 // predicated we don't need to mark it as a predicated instruction. Any 1531 // vectorised blocks created when tail-folding are something artificial we 1532 // have introduced and we know there is always at least one active lane. 1533 // That's why we call Legal->blockNeedsPredication here because it doesn't 1534 // query tail-folding. 1535 if (IsKnownUniform && isa<LoadInst>(I) && 1536 !Legal->blockNeedsPredication(I->getParent())) 1537 return false; 1538 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1539 return false; 1540 // Loads and stores that need some form of masked operation are predicated 1541 // instructions. 1542 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1543 return Legal->isMaskRequired(I); 1544 return isScalarWithPredication(I, VF); 1545 } 1546 1547 /// Returns true if \p I is a memory instruction with consecutive memory 1548 /// access that can be widened. 1549 bool 1550 memoryInstructionCanBeWidened(Instruction *I, 1551 ElementCount VF = ElementCount::getFixed(1)); 1552 1553 /// Returns true if \p I is a memory instruction in an interleaved-group 1554 /// of memory accesses that can be vectorized with wide vector loads/stores 1555 /// and shuffles. 1556 bool 1557 interleavedAccessCanBeWidened(Instruction *I, 1558 ElementCount VF = ElementCount::getFixed(1)); 1559 1560 /// Check if \p Instr belongs to any interleaved access group. 1561 bool isAccessInterleaved(Instruction *Instr) { 1562 return InterleaveInfo.isInterleaved(Instr); 1563 } 1564 1565 /// Get the interleaved access group that \p Instr belongs to. 1566 const InterleaveGroup<Instruction> * 1567 getInterleavedAccessGroup(Instruction *Instr) { 1568 return InterleaveInfo.getInterleaveGroup(Instr); 1569 } 1570 1571 /// Returns true if we're required to use a scalar epilogue for at least 1572 /// the final iteration of the original loop. 1573 bool requiresScalarEpilogue(ElementCount VF) const { 1574 if (!isScalarEpilogueAllowed()) 1575 return false; 1576 // If we might exit from anywhere but the latch, must run the exiting 1577 // iteration in scalar form. 1578 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1579 return true; 1580 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1581 } 1582 1583 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1584 /// loop hint annotation. 1585 bool isScalarEpilogueAllowed() const { 1586 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1587 } 1588 1589 /// Returns true if all loop blocks should be masked to fold tail loop. 1590 bool foldTailByMasking() const { return FoldTailByMasking; } 1591 1592 /// Returns true if the instructions in this block requires predication 1593 /// for any reason, e.g. because tail folding now requires a predicate 1594 /// or because the block in the original loop was predicated. 1595 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1596 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1597 } 1598 1599 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1600 /// nodes to the chain of instructions representing the reductions. Uses a 1601 /// MapVector to ensure deterministic iteration order. 1602 using ReductionChainMap = 1603 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1604 1605 /// Return the chain of instructions representing an inloop reduction. 1606 const ReductionChainMap &getInLoopReductionChains() const { 1607 return InLoopReductionChains; 1608 } 1609 1610 /// Returns true if the Phi is part of an inloop reduction. 1611 bool isInLoopReduction(PHINode *Phi) const { 1612 return InLoopReductionChains.count(Phi); 1613 } 1614 1615 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1616 /// with factor VF. Return the cost of the instruction, including 1617 /// scalarization overhead if it's needed. 1618 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1619 1620 /// Estimate cost of a call instruction CI if it were vectorized with factor 1621 /// VF. Return the cost of the instruction, including scalarization overhead 1622 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1623 /// scalarized - 1624 /// i.e. either vector version isn't available, or is too expensive. 1625 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1626 bool &NeedToScalarize) const; 1627 1628 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1629 /// that of B. 1630 bool isMoreProfitable(const VectorizationFactor &A, 1631 const VectorizationFactor &B) const; 1632 1633 /// Invalidates decisions already taken by the cost model. 1634 void invalidateCostModelingDecisions() { 1635 WideningDecisions.clear(); 1636 Uniforms.clear(); 1637 Scalars.clear(); 1638 } 1639 1640 private: 1641 unsigned NumPredStores = 0; 1642 1643 /// Convenience function that returns the value of vscale_range iff 1644 /// vscale_range.min == vscale_range.max or otherwise returns the value 1645 /// returned by the corresponding TLI method. 1646 Optional<unsigned> getVScaleForTuning() const; 1647 1648 /// \return An upper bound for the vectorization factors for both 1649 /// fixed and scalable vectorization, where the minimum-known number of 1650 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1651 /// disabled or unsupported, then the scalable part will be equal to 1652 /// ElementCount::getScalable(0). 1653 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1654 ElementCount UserVF, 1655 bool FoldTailByMasking); 1656 1657 /// \return the maximized element count based on the targets vector 1658 /// registers and the loop trip-count, but limited to a maximum safe VF. 1659 /// This is a helper function of computeFeasibleMaxVF. 1660 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1661 /// issue that occurred on one of the buildbots which cannot be reproduced 1662 /// without having access to the properietary compiler (see comments on 1663 /// D98509). The issue is currently under investigation and this workaround 1664 /// will be removed as soon as possible. 1665 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1666 unsigned SmallestType, 1667 unsigned WidestType, 1668 const ElementCount &MaxSafeVF, 1669 bool FoldTailByMasking); 1670 1671 /// \return the maximum legal scalable VF, based on the safe max number 1672 /// of elements. 1673 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1674 1675 /// The vectorization cost is a combination of the cost itself and a boolean 1676 /// indicating whether any of the contributing operations will actually 1677 /// operate on vector values after type legalization in the backend. If this 1678 /// latter value is false, then all operations will be scalarized (i.e. no 1679 /// vectorization has actually taken place). 1680 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1681 1682 /// Returns the expected execution cost. The unit of the cost does 1683 /// not matter because we use the 'cost' units to compare different 1684 /// vector widths. The cost that is returned is *not* normalized by 1685 /// the factor width. If \p Invalid is not nullptr, this function 1686 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1687 /// each instruction that has an Invalid cost for the given VF. 1688 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1689 VectorizationCostTy 1690 expectedCost(ElementCount VF, 1691 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1692 1693 /// Returns the execution time cost of an instruction for a given vector 1694 /// width. Vector width of one means scalar. 1695 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1696 1697 /// The cost-computation logic from getInstructionCost which provides 1698 /// the vector type as an output parameter. 1699 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1700 Type *&VectorTy); 1701 1702 /// Return the cost of instructions in an inloop reduction pattern, if I is 1703 /// part of that pattern. 1704 Optional<InstructionCost> 1705 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1706 TTI::TargetCostKind CostKind); 1707 1708 /// Calculate vectorization cost of memory instruction \p I. 1709 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1710 1711 /// The cost computation for scalarized memory instruction. 1712 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1713 1714 /// The cost computation for interleaving group of memory instructions. 1715 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1716 1717 /// The cost computation for Gather/Scatter instruction. 1718 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1719 1720 /// The cost computation for widening instruction \p I with consecutive 1721 /// memory access. 1722 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1723 1724 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1725 /// Load: scalar load + broadcast. 1726 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1727 /// element) 1728 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1729 1730 /// Estimate the overhead of scalarizing an instruction. This is a 1731 /// convenience wrapper for the type-based getScalarizationOverhead API. 1732 InstructionCost getScalarizationOverhead(Instruction *I, 1733 ElementCount VF) const; 1734 1735 /// Returns whether the instruction is a load or store and will be a emitted 1736 /// as a vector operation. 1737 bool isConsecutiveLoadOrStore(Instruction *I); 1738 1739 /// Returns true if an artificially high cost for emulated masked memrefs 1740 /// should be used. 1741 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF); 1742 1743 /// Map of scalar integer values to the smallest bitwidth they can be legally 1744 /// represented as. The vector equivalents of these values should be truncated 1745 /// to this type. 1746 MapVector<Instruction *, uint64_t> MinBWs; 1747 1748 /// A type representing the costs for instructions if they were to be 1749 /// scalarized rather than vectorized. The entries are Instruction-Cost 1750 /// pairs. 1751 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1752 1753 /// A set containing all BasicBlocks that are known to present after 1754 /// vectorization as a predicated block. 1755 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1756 1757 /// Records whether it is allowed to have the original scalar loop execute at 1758 /// least once. This may be needed as a fallback loop in case runtime 1759 /// aliasing/dependence checks fail, or to handle the tail/remainder 1760 /// iterations when the trip count is unknown or doesn't divide by the VF, 1761 /// or as a peel-loop to handle gaps in interleave-groups. 1762 /// Under optsize and when the trip count is very small we don't allow any 1763 /// iterations to execute in the scalar loop. 1764 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1765 1766 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1767 bool FoldTailByMasking = false; 1768 1769 /// A map holding scalar costs for different vectorization factors. The 1770 /// presence of a cost for an instruction in the mapping indicates that the 1771 /// instruction will be scalarized when vectorizing with the associated 1772 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1773 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1774 1775 /// Holds the instructions known to be uniform after vectorization. 1776 /// The data is collected per VF. 1777 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1778 1779 /// Holds the instructions known to be scalar after vectorization. 1780 /// The data is collected per VF. 1781 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1782 1783 /// Holds the instructions (address computations) that are forced to be 1784 /// scalarized. 1785 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1786 1787 /// PHINodes of the reductions that should be expanded in-loop along with 1788 /// their associated chains of reduction operations, in program order from top 1789 /// (PHI) to bottom 1790 ReductionChainMap InLoopReductionChains; 1791 1792 /// A Map of inloop reduction operations and their immediate chain operand. 1793 /// FIXME: This can be removed once reductions can be costed correctly in 1794 /// vplan. This was added to allow quick lookup to the inloop operations, 1795 /// without having to loop through InLoopReductionChains. 1796 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1797 1798 /// Returns the expected difference in cost from scalarizing the expression 1799 /// feeding a predicated instruction \p PredInst. The instructions to 1800 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1801 /// non-negative return value implies the expression will be scalarized. 1802 /// Currently, only single-use chains are considered for scalarization. 1803 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1804 ElementCount VF); 1805 1806 /// Collect the instructions that are uniform after vectorization. An 1807 /// instruction is uniform if we represent it with a single scalar value in 1808 /// the vectorized loop corresponding to each vector iteration. Examples of 1809 /// uniform instructions include pointer operands of consecutive or 1810 /// interleaved memory accesses. Note that although uniformity implies an 1811 /// instruction will be scalar, the reverse is not true. In general, a 1812 /// scalarized instruction will be represented by VF scalar values in the 1813 /// vectorized loop, each corresponding to an iteration of the original 1814 /// scalar loop. 1815 void collectLoopUniforms(ElementCount VF); 1816 1817 /// Collect the instructions that are scalar after vectorization. An 1818 /// instruction is scalar if it is known to be uniform or will be scalarized 1819 /// during vectorization. collectLoopScalars should only add non-uniform nodes 1820 /// to the list if they are used by a load/store instruction that is marked as 1821 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by 1822 /// VF values in the vectorized loop, each corresponding to an iteration of 1823 /// the original scalar loop. 1824 void collectLoopScalars(ElementCount VF); 1825 1826 /// Keeps cost model vectorization decision and cost for instructions. 1827 /// Right now it is used for memory instructions only. 1828 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1829 std::pair<InstWidening, InstructionCost>>; 1830 1831 DecisionList WideningDecisions; 1832 1833 /// Returns true if \p V is expected to be vectorized and it needs to be 1834 /// extracted. 1835 bool needsExtract(Value *V, ElementCount VF) const { 1836 Instruction *I = dyn_cast<Instruction>(V); 1837 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1838 TheLoop->isLoopInvariant(I)) 1839 return false; 1840 1841 // Assume we can vectorize V (and hence we need extraction) if the 1842 // scalars are not computed yet. This can happen, because it is called 1843 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1844 // the scalars are collected. That should be a safe assumption in most 1845 // cases, because we check if the operands have vectorizable types 1846 // beforehand in LoopVectorizationLegality. 1847 return Scalars.find(VF) == Scalars.end() || 1848 !isScalarAfterVectorization(I, VF); 1849 }; 1850 1851 /// Returns a range containing only operands needing to be extracted. 1852 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1853 ElementCount VF) const { 1854 return SmallVector<Value *, 4>(make_filter_range( 1855 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1856 } 1857 1858 /// Determines if we have the infrastructure to vectorize loop \p L and its 1859 /// epilogue, assuming the main loop is vectorized by \p VF. 1860 bool isCandidateForEpilogueVectorization(const Loop &L, 1861 const ElementCount VF) const; 1862 1863 /// Returns true if epilogue vectorization is considered profitable, and 1864 /// false otherwise. 1865 /// \p VF is the vectorization factor chosen for the original loop. 1866 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1867 1868 public: 1869 /// The loop that we evaluate. 1870 Loop *TheLoop; 1871 1872 /// Predicated scalar evolution analysis. 1873 PredicatedScalarEvolution &PSE; 1874 1875 /// Loop Info analysis. 1876 LoopInfo *LI; 1877 1878 /// Vectorization legality. 1879 LoopVectorizationLegality *Legal; 1880 1881 /// Vector target information. 1882 const TargetTransformInfo &TTI; 1883 1884 /// Target Library Info. 1885 const TargetLibraryInfo *TLI; 1886 1887 /// Demanded bits analysis. 1888 DemandedBits *DB; 1889 1890 /// Assumption cache. 1891 AssumptionCache *AC; 1892 1893 /// Interface to emit optimization remarks. 1894 OptimizationRemarkEmitter *ORE; 1895 1896 const Function *TheFunction; 1897 1898 /// Loop Vectorize Hint. 1899 const LoopVectorizeHints *Hints; 1900 1901 /// The interleave access information contains groups of interleaved accesses 1902 /// with the same stride and close to each other. 1903 InterleavedAccessInfo &InterleaveInfo; 1904 1905 /// Values to ignore in the cost model. 1906 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1907 1908 /// Values to ignore in the cost model when VF > 1. 1909 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1910 1911 /// All element types found in the loop. 1912 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1913 1914 /// Profitable vector factors. 1915 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1916 }; 1917 } // end namespace llvm 1918 1919 /// Helper struct to manage generating runtime checks for vectorization. 1920 /// 1921 /// The runtime checks are created up-front in temporary blocks to allow better 1922 /// estimating the cost and un-linked from the existing IR. After deciding to 1923 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1924 /// temporary blocks are completely removed. 1925 class GeneratedRTChecks { 1926 /// Basic block which contains the generated SCEV checks, if any. 1927 BasicBlock *SCEVCheckBlock = nullptr; 1928 1929 /// The value representing the result of the generated SCEV checks. If it is 1930 /// nullptr, either no SCEV checks have been generated or they have been used. 1931 Value *SCEVCheckCond = nullptr; 1932 1933 /// Basic block which contains the generated memory runtime checks, if any. 1934 BasicBlock *MemCheckBlock = nullptr; 1935 1936 /// The value representing the result of the generated memory runtime checks. 1937 /// If it is nullptr, either no memory runtime checks have been generated or 1938 /// they have been used. 1939 Value *MemRuntimeCheckCond = nullptr; 1940 1941 DominatorTree *DT; 1942 LoopInfo *LI; 1943 1944 SCEVExpander SCEVExp; 1945 SCEVExpander MemCheckExp; 1946 1947 public: 1948 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1949 const DataLayout &DL) 1950 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1951 MemCheckExp(SE, DL, "scev.check") {} 1952 1953 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1954 /// accurately estimate the cost of the runtime checks. The blocks are 1955 /// un-linked from the IR and is added back during vector code generation. If 1956 /// there is no vector code generation, the check blocks are removed 1957 /// completely. 1958 void Create(Loop *L, const LoopAccessInfo &LAI, 1959 const SCEVPredicate &UnionPred, ElementCount VF, unsigned IC) { 1960 1961 BasicBlock *LoopHeader = L->getHeader(); 1962 BasicBlock *Preheader = L->getLoopPreheader(); 1963 1964 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1965 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1966 // may be used by SCEVExpander. The blocks will be un-linked from their 1967 // predecessors and removed from LI & DT at the end of the function. 1968 if (!UnionPred.isAlwaysTrue()) { 1969 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1970 nullptr, "vector.scevcheck"); 1971 1972 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1973 &UnionPred, SCEVCheckBlock->getTerminator()); 1974 } 1975 1976 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1977 if (RtPtrChecking.Need) { 1978 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1979 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1980 "vector.memcheck"); 1981 1982 auto DiffChecks = RtPtrChecking.getDiffChecks(); 1983 if (DiffChecks) { 1984 MemRuntimeCheckCond = addDiffRuntimeChecks( 1985 MemCheckBlock->getTerminator(), L, *DiffChecks, MemCheckExp, 1986 [VF](IRBuilderBase &B, unsigned Bits) { 1987 return getRuntimeVF(B, B.getIntNTy(Bits), VF); 1988 }, 1989 IC); 1990 } else { 1991 MemRuntimeCheckCond = 1992 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 1993 RtPtrChecking.getChecks(), MemCheckExp); 1994 } 1995 assert(MemRuntimeCheckCond && 1996 "no RT checks generated although RtPtrChecking " 1997 "claimed checks are required"); 1998 } 1999 2000 if (!MemCheckBlock && !SCEVCheckBlock) 2001 return; 2002 2003 // Unhook the temporary block with the checks, update various places 2004 // accordingly. 2005 if (SCEVCheckBlock) 2006 SCEVCheckBlock->replaceAllUsesWith(Preheader); 2007 if (MemCheckBlock) 2008 MemCheckBlock->replaceAllUsesWith(Preheader); 2009 2010 if (SCEVCheckBlock) { 2011 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2012 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 2013 Preheader->getTerminator()->eraseFromParent(); 2014 } 2015 if (MemCheckBlock) { 2016 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2017 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2018 Preheader->getTerminator()->eraseFromParent(); 2019 } 2020 2021 DT->changeImmediateDominator(LoopHeader, Preheader); 2022 if (MemCheckBlock) { 2023 DT->eraseNode(MemCheckBlock); 2024 LI->removeBlock(MemCheckBlock); 2025 } 2026 if (SCEVCheckBlock) { 2027 DT->eraseNode(SCEVCheckBlock); 2028 LI->removeBlock(SCEVCheckBlock); 2029 } 2030 } 2031 2032 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2033 /// unused. 2034 ~GeneratedRTChecks() { 2035 SCEVExpanderCleaner SCEVCleaner(SCEVExp); 2036 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp); 2037 if (!SCEVCheckCond) 2038 SCEVCleaner.markResultUsed(); 2039 2040 if (!MemRuntimeCheckCond) 2041 MemCheckCleaner.markResultUsed(); 2042 2043 if (MemRuntimeCheckCond) { 2044 auto &SE = *MemCheckExp.getSE(); 2045 // Memory runtime check generation creates compares that use expanded 2046 // values. Remove them before running the SCEVExpanderCleaners. 2047 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2048 if (MemCheckExp.isInsertedInstruction(&I)) 2049 continue; 2050 SE.forgetValue(&I); 2051 I.eraseFromParent(); 2052 } 2053 } 2054 MemCheckCleaner.cleanup(); 2055 SCEVCleaner.cleanup(); 2056 2057 if (SCEVCheckCond) 2058 SCEVCheckBlock->eraseFromParent(); 2059 if (MemRuntimeCheckCond) 2060 MemCheckBlock->eraseFromParent(); 2061 } 2062 2063 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2064 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2065 /// depending on the generated condition. 2066 BasicBlock *emitSCEVChecks(BasicBlock *Bypass, 2067 BasicBlock *LoopVectorPreHeader, 2068 BasicBlock *LoopExitBlock) { 2069 if (!SCEVCheckCond) 2070 return nullptr; 2071 2072 Value *Cond = SCEVCheckCond; 2073 // Mark the check as used, to prevent it from being removed during cleanup. 2074 SCEVCheckCond = nullptr; 2075 if (auto *C = dyn_cast<ConstantInt>(Cond)) 2076 if (C->isZero()) 2077 return nullptr; 2078 2079 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2080 2081 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2082 // Create new preheader for vector loop. 2083 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2084 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2085 2086 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2087 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2088 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2089 SCEVCheckBlock); 2090 2091 DT->addNewBlock(SCEVCheckBlock, Pred); 2092 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2093 2094 ReplaceInstWithInst(SCEVCheckBlock->getTerminator(), 2095 BranchInst::Create(Bypass, LoopVectorPreHeader, Cond)); 2096 return SCEVCheckBlock; 2097 } 2098 2099 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2100 /// the branches to branch to the vector preheader or \p Bypass, depending on 2101 /// the generated condition. 2102 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass, 2103 BasicBlock *LoopVectorPreHeader) { 2104 // Check if we generated code that checks in runtime if arrays overlap. 2105 if (!MemRuntimeCheckCond) 2106 return nullptr; 2107 2108 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2109 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2110 MemCheckBlock); 2111 2112 DT->addNewBlock(MemCheckBlock, Pred); 2113 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2114 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2115 2116 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2117 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2118 2119 ReplaceInstWithInst( 2120 MemCheckBlock->getTerminator(), 2121 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2122 MemCheckBlock->getTerminator()->setDebugLoc( 2123 Pred->getTerminator()->getDebugLoc()); 2124 2125 // Mark the check as used, to prevent it from being removed during cleanup. 2126 MemRuntimeCheckCond = nullptr; 2127 return MemCheckBlock; 2128 } 2129 }; 2130 2131 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2132 // vectorization. The loop needs to be annotated with #pragma omp simd 2133 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2134 // vector length information is not provided, vectorization is not considered 2135 // explicit. Interleave hints are not allowed either. These limitations will be 2136 // relaxed in the future. 2137 // Please, note that we are currently forced to abuse the pragma 'clang 2138 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2139 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2140 // provides *explicit vectorization hints* (LV can bypass legal checks and 2141 // assume that vectorization is legal). However, both hints are implemented 2142 // using the same metadata (llvm.loop.vectorize, processed by 2143 // LoopVectorizeHints). This will be fixed in the future when the native IR 2144 // representation for pragma 'omp simd' is introduced. 2145 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2146 OptimizationRemarkEmitter *ORE) { 2147 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2148 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2149 2150 // Only outer loops with an explicit vectorization hint are supported. 2151 // Unannotated outer loops are ignored. 2152 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2153 return false; 2154 2155 Function *Fn = OuterLp->getHeader()->getParent(); 2156 if (!Hints.allowVectorization(Fn, OuterLp, 2157 true /*VectorizeOnlyWhenForced*/)) { 2158 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2159 return false; 2160 } 2161 2162 if (Hints.getInterleave() > 1) { 2163 // TODO: Interleave support is future work. 2164 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2165 "outer loops.\n"); 2166 Hints.emitRemarkWithHints(); 2167 return false; 2168 } 2169 2170 return true; 2171 } 2172 2173 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2174 OptimizationRemarkEmitter *ORE, 2175 SmallVectorImpl<Loop *> &V) { 2176 // Collect inner loops and outer loops without irreducible control flow. For 2177 // now, only collect outer loops that have explicit vectorization hints. If we 2178 // are stress testing the VPlan H-CFG construction, we collect the outermost 2179 // loop of every loop nest. 2180 if (L.isInnermost() || VPlanBuildStressTest || 2181 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2182 LoopBlocksRPO RPOT(&L); 2183 RPOT.perform(LI); 2184 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2185 V.push_back(&L); 2186 // TODO: Collect inner loops inside marked outer loops in case 2187 // vectorization fails for the outer loop. Do not invoke 2188 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2189 // already known to be reducible. We can use an inherited attribute for 2190 // that. 2191 return; 2192 } 2193 } 2194 for (Loop *InnerL : L) 2195 collectSupportedLoops(*InnerL, LI, ORE, V); 2196 } 2197 2198 namespace { 2199 2200 /// The LoopVectorize Pass. 2201 struct LoopVectorize : public FunctionPass { 2202 /// Pass identification, replacement for typeid 2203 static char ID; 2204 2205 LoopVectorizePass Impl; 2206 2207 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2208 bool VectorizeOnlyWhenForced = false) 2209 : FunctionPass(ID), 2210 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2211 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2212 } 2213 2214 bool runOnFunction(Function &F) override { 2215 if (skipFunction(F)) 2216 return false; 2217 2218 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2219 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2220 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2221 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2222 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2223 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2224 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2225 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2226 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2227 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2228 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2229 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2230 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2231 2232 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2233 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2234 2235 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2236 GetLAA, *ORE, PSI).MadeAnyChange; 2237 } 2238 2239 void getAnalysisUsage(AnalysisUsage &AU) const override { 2240 AU.addRequired<AssumptionCacheTracker>(); 2241 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2242 AU.addRequired<DominatorTreeWrapperPass>(); 2243 AU.addRequired<LoopInfoWrapperPass>(); 2244 AU.addRequired<ScalarEvolutionWrapperPass>(); 2245 AU.addRequired<TargetTransformInfoWrapperPass>(); 2246 AU.addRequired<AAResultsWrapperPass>(); 2247 AU.addRequired<LoopAccessLegacyAnalysis>(); 2248 AU.addRequired<DemandedBitsWrapperPass>(); 2249 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2250 AU.addRequired<InjectTLIMappingsLegacy>(); 2251 2252 // We currently do not preserve loopinfo/dominator analyses with outer loop 2253 // vectorization. Until this is addressed, mark these analyses as preserved 2254 // only for non-VPlan-native path. 2255 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2256 if (!EnableVPlanNativePath) { 2257 AU.addPreserved<LoopInfoWrapperPass>(); 2258 AU.addPreserved<DominatorTreeWrapperPass>(); 2259 } 2260 2261 AU.addPreserved<BasicAAWrapperPass>(); 2262 AU.addPreserved<GlobalsAAWrapperPass>(); 2263 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2264 } 2265 }; 2266 2267 } // end anonymous namespace 2268 2269 //===----------------------------------------------------------------------===// 2270 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2271 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2272 //===----------------------------------------------------------------------===// 2273 2274 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2275 // We need to place the broadcast of invariant variables outside the loop, 2276 // but only if it's proven safe to do so. Else, broadcast will be inside 2277 // vector loop body. 2278 Instruction *Instr = dyn_cast<Instruction>(V); 2279 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2280 (!Instr || 2281 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2282 // Place the code for broadcasting invariant variables in the new preheader. 2283 IRBuilder<>::InsertPointGuard Guard(Builder); 2284 if (SafeToHoist) 2285 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2286 2287 // Broadcast the scalar into all locations in the vector. 2288 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2289 2290 return Shuf; 2291 } 2292 2293 /// This function adds 2294 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 2295 /// to each vector element of Val. The sequence starts at StartIndex. 2296 /// \p Opcode is relevant for FP induction variable. 2297 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step, 2298 Instruction::BinaryOps BinOp, ElementCount VF, 2299 IRBuilderBase &Builder) { 2300 assert(VF.isVector() && "only vector VFs are supported"); 2301 2302 // Create and check the types. 2303 auto *ValVTy = cast<VectorType>(Val->getType()); 2304 ElementCount VLen = ValVTy->getElementCount(); 2305 2306 Type *STy = Val->getType()->getScalarType(); 2307 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2308 "Induction Step must be an integer or FP"); 2309 assert(Step->getType() == STy && "Step has wrong type"); 2310 2311 SmallVector<Constant *, 8> Indices; 2312 2313 // Create a vector of consecutive numbers from zero to VF. 2314 VectorType *InitVecValVTy = ValVTy; 2315 if (STy->isFloatingPointTy()) { 2316 Type *InitVecValSTy = 2317 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2318 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2319 } 2320 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2321 2322 // Splat the StartIdx 2323 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2324 2325 if (STy->isIntegerTy()) { 2326 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2327 Step = Builder.CreateVectorSplat(VLen, Step); 2328 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2329 // FIXME: The newly created binary instructions should contain nsw/nuw 2330 // flags, which can be found from the original scalar operations. 2331 Step = Builder.CreateMul(InitVec, Step); 2332 return Builder.CreateAdd(Val, Step, "induction"); 2333 } 2334 2335 // Floating point induction. 2336 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2337 "Binary Opcode should be specified for FP induction"); 2338 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2339 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2340 2341 Step = Builder.CreateVectorSplat(VLen, Step); 2342 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2343 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2344 } 2345 2346 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 2347 /// variable on which to base the steps, \p Step is the size of the step. 2348 static void buildScalarSteps(Value *ScalarIV, Value *Step, 2349 const InductionDescriptor &ID, VPValue *Def, 2350 VPTransformState &State) { 2351 IRBuilderBase &Builder = State.Builder; 2352 // We shouldn't have to build scalar steps if we aren't vectorizing. 2353 assert(State.VF.isVector() && "VF should be greater than one"); 2354 // Get the value type and ensure it and the step have the same integer type. 2355 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2356 assert(ScalarIVTy == Step->getType() && 2357 "Val and Step should have the same type"); 2358 2359 // We build scalar steps for both integer and floating-point induction 2360 // variables. Here, we determine the kind of arithmetic we will perform. 2361 Instruction::BinaryOps AddOp; 2362 Instruction::BinaryOps MulOp; 2363 if (ScalarIVTy->isIntegerTy()) { 2364 AddOp = Instruction::Add; 2365 MulOp = Instruction::Mul; 2366 } else { 2367 AddOp = ID.getInductionOpcode(); 2368 MulOp = Instruction::FMul; 2369 } 2370 2371 // Determine the number of scalars we need to generate for each unroll 2372 // iteration. 2373 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def); 2374 unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue(); 2375 // Compute the scalar steps and save the results in State. 2376 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2377 ScalarIVTy->getScalarSizeInBits()); 2378 Type *VecIVTy = nullptr; 2379 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2380 if (!FirstLaneOnly && State.VF.isScalable()) { 2381 VecIVTy = VectorType::get(ScalarIVTy, State.VF); 2382 UnitStepVec = 2383 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF)); 2384 SplatStep = Builder.CreateVectorSplat(State.VF, Step); 2385 SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV); 2386 } 2387 2388 for (unsigned Part = 0; Part < State.UF; ++Part) { 2389 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part); 2390 2391 if (!FirstLaneOnly && State.VF.isScalable()) { 2392 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0); 2393 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2394 if (ScalarIVTy->isFloatingPointTy()) 2395 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2396 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2397 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2398 State.set(Def, Add, Part); 2399 // It's useful to record the lane values too for the known minimum number 2400 // of elements so we do those below. This improves the code quality when 2401 // trying to extract the first element, for example. 2402 } 2403 2404 if (ScalarIVTy->isFloatingPointTy()) 2405 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2406 2407 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2408 Value *StartIdx = Builder.CreateBinOp( 2409 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2410 // The step returned by `createStepForVF` is a runtime-evaluated value 2411 // when VF is scalable. Otherwise, it should be folded into a Constant. 2412 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) && 2413 "Expected StartIdx to be folded to a constant when VF is not " 2414 "scalable"); 2415 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2416 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2417 State.set(Def, Add, VPIteration(Part, Lane)); 2418 } 2419 } 2420 } 2421 2422 // Generate code for the induction step. Note that induction steps are 2423 // required to be loop-invariant 2424 static Value *CreateStepValue(const SCEV *Step, ScalarEvolution &SE, 2425 Instruction *InsertBefore, 2426 Loop *OrigLoop = nullptr) { 2427 const DataLayout &DL = SE.getDataLayout(); 2428 assert((!OrigLoop || SE.isLoopInvariant(Step, OrigLoop)) && 2429 "Induction step should be loop invariant"); 2430 if (auto *E = dyn_cast<SCEVUnknown>(Step)) 2431 return E->getValue(); 2432 2433 SCEVExpander Exp(SE, DL, "induction"); 2434 return Exp.expandCodeFor(Step, Step->getType(), InsertBefore); 2435 } 2436 2437 /// Compute the transformed value of Index at offset StartValue using step 2438 /// StepValue. 2439 /// For integer induction, returns StartValue + Index * StepValue. 2440 /// For pointer induction, returns StartValue[Index * StepValue]. 2441 /// FIXME: The newly created binary instructions should contain nsw/nuw 2442 /// flags, which can be found from the original scalar operations. 2443 static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index, 2444 Value *StartValue, Value *Step, 2445 const InductionDescriptor &ID) { 2446 assert(Index->getType()->getScalarType() == Step->getType() && 2447 "Index scalar type does not match StepValue type"); 2448 2449 // Note: the IR at this point is broken. We cannot use SE to create any new 2450 // SCEV and then expand it, hoping that SCEV's simplification will give us 2451 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2452 // lead to various SCEV crashes. So all we can do is to use builder and rely 2453 // on InstCombine for future simplifications. Here we handle some trivial 2454 // cases only. 2455 auto CreateAdd = [&B](Value *X, Value *Y) { 2456 assert(X->getType() == Y->getType() && "Types don't match!"); 2457 if (auto *CX = dyn_cast<ConstantInt>(X)) 2458 if (CX->isZero()) 2459 return Y; 2460 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2461 if (CY->isZero()) 2462 return X; 2463 return B.CreateAdd(X, Y); 2464 }; 2465 2466 // We allow X to be a vector type, in which case Y will potentially be 2467 // splatted into a vector with the same element count. 2468 auto CreateMul = [&B](Value *X, Value *Y) { 2469 assert(X->getType()->getScalarType() == Y->getType() && 2470 "Types don't match!"); 2471 if (auto *CX = dyn_cast<ConstantInt>(X)) 2472 if (CX->isOne()) 2473 return Y; 2474 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2475 if (CY->isOne()) 2476 return X; 2477 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 2478 if (XVTy && !isa<VectorType>(Y->getType())) 2479 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 2480 return B.CreateMul(X, Y); 2481 }; 2482 2483 switch (ID.getKind()) { 2484 case InductionDescriptor::IK_IntInduction: { 2485 assert(!isa<VectorType>(Index->getType()) && 2486 "Vector indices not supported for integer inductions yet"); 2487 assert(Index->getType() == StartValue->getType() && 2488 "Index type does not match StartValue type"); 2489 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne()) 2490 return B.CreateSub(StartValue, Index); 2491 auto *Offset = CreateMul(Index, Step); 2492 return CreateAdd(StartValue, Offset); 2493 } 2494 case InductionDescriptor::IK_PtrInduction: { 2495 assert(isa<Constant>(Step) && 2496 "Expected constant step for pointer induction"); 2497 return B.CreateGEP(ID.getElementType(), StartValue, CreateMul(Index, Step)); 2498 } 2499 case InductionDescriptor::IK_FpInduction: { 2500 assert(!isa<VectorType>(Index->getType()) && 2501 "Vector indices not supported for FP inductions yet"); 2502 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2503 auto InductionBinOp = ID.getInductionBinOp(); 2504 assert(InductionBinOp && 2505 (InductionBinOp->getOpcode() == Instruction::FAdd || 2506 InductionBinOp->getOpcode() == Instruction::FSub) && 2507 "Original bin op should be defined for FP induction"); 2508 2509 Value *MulExp = B.CreateFMul(Step, Index); 2510 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2511 "induction"); 2512 } 2513 case InductionDescriptor::IK_NoInduction: 2514 return nullptr; 2515 } 2516 llvm_unreachable("invalid enum"); 2517 } 2518 2519 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2520 const VPIteration &Instance, 2521 VPTransformState &State) { 2522 Value *ScalarInst = State.get(Def, Instance); 2523 Value *VectorValue = State.get(Def, Instance.Part); 2524 VectorValue = Builder.CreateInsertElement( 2525 VectorValue, ScalarInst, 2526 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2527 State.set(Def, VectorValue, Instance.Part); 2528 } 2529 2530 // Return whether we allow using masked interleave-groups (for dealing with 2531 // strided loads/stores that reside in predicated blocks, or for dealing 2532 // with gaps). 2533 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2534 // If an override option has been passed in for interleaved accesses, use it. 2535 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2536 return EnableMaskedInterleavedMemAccesses; 2537 2538 return TTI.enableMaskedInterleavedAccessVectorization(); 2539 } 2540 2541 // Try to vectorize the interleave group that \p Instr belongs to. 2542 // 2543 // E.g. Translate following interleaved load group (factor = 3): 2544 // for (i = 0; i < N; i+=3) { 2545 // R = Pic[i]; // Member of index 0 2546 // G = Pic[i+1]; // Member of index 1 2547 // B = Pic[i+2]; // Member of index 2 2548 // ... // do something to R, G, B 2549 // } 2550 // To: 2551 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2552 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2553 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2554 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2555 // 2556 // Or translate following interleaved store group (factor = 3): 2557 // for (i = 0; i < N; i+=3) { 2558 // ... do something to R, G, B 2559 // Pic[i] = R; // Member of index 0 2560 // Pic[i+1] = G; // Member of index 1 2561 // Pic[i+2] = B; // Member of index 2 2562 // } 2563 // To: 2564 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2565 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2566 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2567 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2568 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2569 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2570 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2571 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2572 VPValue *BlockInMask) { 2573 Instruction *Instr = Group->getInsertPos(); 2574 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2575 2576 // Prepare for the vector type of the interleaved load/store. 2577 Type *ScalarTy = getLoadStoreType(Instr); 2578 unsigned InterleaveFactor = Group->getFactor(); 2579 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2580 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2581 2582 // Prepare for the new pointers. 2583 SmallVector<Value *, 2> AddrParts; 2584 unsigned Index = Group->getIndex(Instr); 2585 2586 // TODO: extend the masked interleaved-group support to reversed access. 2587 assert((!BlockInMask || !Group->isReverse()) && 2588 "Reversed masked interleave-group not supported."); 2589 2590 // If the group is reverse, adjust the index to refer to the last vector lane 2591 // instead of the first. We adjust the index from the first vector lane, 2592 // rather than directly getting the pointer for lane VF - 1, because the 2593 // pointer operand of the interleaved access is supposed to be uniform. For 2594 // uniform instructions, we're only required to generate a value for the 2595 // first vector lane in each unroll iteration. 2596 if (Group->isReverse()) 2597 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2598 2599 for (unsigned Part = 0; Part < UF; Part++) { 2600 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2601 setDebugLocFromInst(AddrPart); 2602 2603 // Notice current instruction could be any index. Need to adjust the address 2604 // to the member of index 0. 2605 // 2606 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2607 // b = A[i]; // Member of index 0 2608 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2609 // 2610 // E.g. A[i+1] = a; // Member of index 1 2611 // A[i] = b; // Member of index 0 2612 // A[i+2] = c; // Member of index 2 (Current instruction) 2613 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2614 2615 bool InBounds = false; 2616 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2617 InBounds = gep->isInBounds(); 2618 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2619 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2620 2621 // Cast to the vector pointer type. 2622 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2623 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2624 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2625 } 2626 2627 setDebugLocFromInst(Instr); 2628 Value *PoisonVec = PoisonValue::get(VecTy); 2629 2630 Value *MaskForGaps = nullptr; 2631 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2632 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2633 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2634 } 2635 2636 // Vectorize the interleaved load group. 2637 if (isa<LoadInst>(Instr)) { 2638 // For each unroll part, create a wide load for the group. 2639 SmallVector<Value *, 2> NewLoads; 2640 for (unsigned Part = 0; Part < UF; Part++) { 2641 Instruction *NewLoad; 2642 if (BlockInMask || MaskForGaps) { 2643 assert(useMaskedInterleavedAccesses(*TTI) && 2644 "masked interleaved groups are not allowed."); 2645 Value *GroupMask = MaskForGaps; 2646 if (BlockInMask) { 2647 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2648 Value *ShuffledMask = Builder.CreateShuffleVector( 2649 BlockInMaskPart, 2650 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2651 "interleaved.mask"); 2652 GroupMask = MaskForGaps 2653 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2654 MaskForGaps) 2655 : ShuffledMask; 2656 } 2657 NewLoad = 2658 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2659 GroupMask, PoisonVec, "wide.masked.vec"); 2660 } 2661 else 2662 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2663 Group->getAlign(), "wide.vec"); 2664 Group->addMetadata(NewLoad); 2665 NewLoads.push_back(NewLoad); 2666 } 2667 2668 // For each member in the group, shuffle out the appropriate data from the 2669 // wide loads. 2670 unsigned J = 0; 2671 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2672 Instruction *Member = Group->getMember(I); 2673 2674 // Skip the gaps in the group. 2675 if (!Member) 2676 continue; 2677 2678 auto StrideMask = 2679 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2680 for (unsigned Part = 0; Part < UF; Part++) { 2681 Value *StridedVec = Builder.CreateShuffleVector( 2682 NewLoads[Part], StrideMask, "strided.vec"); 2683 2684 // If this member has different type, cast the result type. 2685 if (Member->getType() != ScalarTy) { 2686 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2687 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2688 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2689 } 2690 2691 if (Group->isReverse()) 2692 StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse"); 2693 2694 State.set(VPDefs[J], StridedVec, Part); 2695 } 2696 ++J; 2697 } 2698 return; 2699 } 2700 2701 // The sub vector type for current instruction. 2702 auto *SubVT = VectorType::get(ScalarTy, VF); 2703 2704 // Vectorize the interleaved store group. 2705 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2706 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2707 "masked interleaved groups are not allowed."); 2708 assert((!MaskForGaps || !VF.isScalable()) && 2709 "masking gaps for scalable vectors is not yet supported."); 2710 for (unsigned Part = 0; Part < UF; Part++) { 2711 // Collect the stored vector from each member. 2712 SmallVector<Value *, 4> StoredVecs; 2713 for (unsigned i = 0; i < InterleaveFactor; i++) { 2714 assert((Group->getMember(i) || MaskForGaps) && 2715 "Fail to get a member from an interleaved store group"); 2716 Instruction *Member = Group->getMember(i); 2717 2718 // Skip the gaps in the group. 2719 if (!Member) { 2720 Value *Undef = PoisonValue::get(SubVT); 2721 StoredVecs.push_back(Undef); 2722 continue; 2723 } 2724 2725 Value *StoredVec = State.get(StoredValues[i], Part); 2726 2727 if (Group->isReverse()) 2728 StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse"); 2729 2730 // If this member has different type, cast it to a unified type. 2731 2732 if (StoredVec->getType() != SubVT) 2733 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2734 2735 StoredVecs.push_back(StoredVec); 2736 } 2737 2738 // Concatenate all vectors into a wide vector. 2739 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2740 2741 // Interleave the elements in the wide vector. 2742 Value *IVec = Builder.CreateShuffleVector( 2743 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2744 "interleaved.vec"); 2745 2746 Instruction *NewStoreInstr; 2747 if (BlockInMask || MaskForGaps) { 2748 Value *GroupMask = MaskForGaps; 2749 if (BlockInMask) { 2750 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2751 Value *ShuffledMask = Builder.CreateShuffleVector( 2752 BlockInMaskPart, 2753 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2754 "interleaved.mask"); 2755 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2756 ShuffledMask, MaskForGaps) 2757 : ShuffledMask; 2758 } 2759 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2760 Group->getAlign(), GroupMask); 2761 } else 2762 NewStoreInstr = 2763 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2764 2765 Group->addMetadata(NewStoreInstr); 2766 } 2767 } 2768 2769 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2770 VPReplicateRecipe *RepRecipe, 2771 const VPIteration &Instance, 2772 bool IfPredicateInstr, 2773 VPTransformState &State) { 2774 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2775 2776 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2777 // the first lane and part. 2778 if (isa<NoAliasScopeDeclInst>(Instr)) 2779 if (!Instance.isFirstIteration()) 2780 return; 2781 2782 // Does this instruction return a value ? 2783 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2784 2785 Instruction *Cloned = Instr->clone(); 2786 if (!IsVoidRetTy) 2787 Cloned->setName(Instr->getName() + ".cloned"); 2788 2789 // If the scalarized instruction contributes to the address computation of a 2790 // widen masked load/store which was in a basic block that needed predication 2791 // and is not predicated after vectorization, we can't propagate 2792 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized 2793 // instruction could feed a poison value to the base address of the widen 2794 // load/store. 2795 if (State.MayGeneratePoisonRecipes.contains(RepRecipe)) 2796 Cloned->dropPoisonGeneratingFlags(); 2797 2798 if (Instr->getDebugLoc()) 2799 setDebugLocFromInst(Instr); 2800 2801 // Replace the operands of the cloned instructions with their scalar 2802 // equivalents in the new loop. 2803 for (auto &I : enumerate(RepRecipe->operands())) { 2804 auto InputInstance = Instance; 2805 VPValue *Operand = I.value(); 2806 VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand); 2807 if (OperandR && OperandR->isUniform()) 2808 InputInstance.Lane = VPLane::getFirstLane(); 2809 Cloned->setOperand(I.index(), State.get(Operand, InputInstance)); 2810 } 2811 addNewMetadata(Cloned, Instr); 2812 2813 // Place the cloned scalar in the new loop. 2814 State.Builder.Insert(Cloned); 2815 2816 State.set(RepRecipe, Cloned, Instance); 2817 2818 // If we just cloned a new assumption, add it the assumption cache. 2819 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 2820 AC->registerAssumption(II); 2821 2822 // End if-block. 2823 if (IfPredicateInstr) 2824 PredicatedInstructions.push_back(Cloned); 2825 } 2826 2827 Value *InnerLoopVectorizer::getOrCreateTripCount(BasicBlock *InsertBlock) { 2828 if (TripCount) 2829 return TripCount; 2830 2831 assert(InsertBlock); 2832 IRBuilder<> Builder(InsertBlock->getTerminator()); 2833 // Find the loop boundaries. 2834 ScalarEvolution *SE = PSE.getSE(); 2835 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2836 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 2837 "Invalid loop count"); 2838 2839 Type *IdxTy = Legal->getWidestInductionType(); 2840 assert(IdxTy && "No type for induction"); 2841 2842 // The exit count might have the type of i64 while the phi is i32. This can 2843 // happen if we have an induction variable that is sign extended before the 2844 // compare. The only way that we get a backedge taken count is that the 2845 // induction variable was signed and as such will not overflow. In such a case 2846 // truncation is legal. 2847 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2848 IdxTy->getPrimitiveSizeInBits()) 2849 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2850 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2851 2852 // Get the total trip count from the count by adding 1. 2853 const SCEV *ExitCount = SE->getAddExpr( 2854 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2855 2856 const DataLayout &DL = InsertBlock->getModule()->getDataLayout(); 2857 2858 // Expand the trip count and place the new instructions in the preheader. 2859 // Notice that the pre-header does not change, only the loop body. 2860 SCEVExpander Exp(*SE, DL, "induction"); 2861 2862 // Count holds the overall loop count (N). 2863 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2864 InsertBlock->getTerminator()); 2865 2866 if (TripCount->getType()->isPointerTy()) 2867 TripCount = 2868 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2869 InsertBlock->getTerminator()); 2870 2871 return TripCount; 2872 } 2873 2874 Value * 2875 InnerLoopVectorizer::getOrCreateVectorTripCount(BasicBlock *InsertBlock) { 2876 if (VectorTripCount) 2877 return VectorTripCount; 2878 2879 Value *TC = getOrCreateTripCount(InsertBlock); 2880 IRBuilder<> Builder(InsertBlock->getTerminator()); 2881 2882 Type *Ty = TC->getType(); 2883 // This is where we can make the step a runtime constant. 2884 Value *Step = createStepForVF(Builder, Ty, VF, UF); 2885 2886 // If the tail is to be folded by masking, round the number of iterations N 2887 // up to a multiple of Step instead of rounding down. This is done by first 2888 // adding Step-1 and then rounding down. Note that it's ok if this addition 2889 // overflows: the vector induction variable will eventually wrap to zero given 2890 // that it starts at zero and its Step is a power of two; the loop will then 2891 // exit, with the last early-exit vector comparison also producing all-true. 2892 // For scalable vectors the VF is not guaranteed to be a power of 2, but this 2893 // is accounted for in emitIterationCountCheck that adds an overflow check. 2894 if (Cost->foldTailByMasking()) { 2895 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 2896 "VF*UF must be a power of 2 when folding tail by masking"); 2897 Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF); 2898 TC = Builder.CreateAdd( 2899 TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up"); 2900 } 2901 2902 // Now we need to generate the expression for the part of the loop that the 2903 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2904 // iterations are not required for correctness, or N - Step, otherwise. Step 2905 // is equal to the vectorization factor (number of SIMD elements) times the 2906 // unroll factor (number of SIMD instructions). 2907 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2908 2909 // There are cases where we *must* run at least one iteration in the remainder 2910 // loop. See the cost model for when this can happen. If the step evenly 2911 // divides the trip count, we set the remainder to be equal to the step. If 2912 // the step does not evenly divide the trip count, no adjustment is necessary 2913 // since there will already be scalar iterations. Note that the minimum 2914 // iterations check ensures that N >= Step. 2915 if (Cost->requiresScalarEpilogue(VF)) { 2916 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2917 R = Builder.CreateSelect(IsZero, Step, R); 2918 } 2919 2920 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2921 2922 return VectorTripCount; 2923 } 2924 2925 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2926 const DataLayout &DL) { 2927 // Verify that V is a vector type with same number of elements as DstVTy. 2928 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 2929 unsigned VF = DstFVTy->getNumElements(); 2930 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 2931 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2932 Type *SrcElemTy = SrcVecTy->getElementType(); 2933 Type *DstElemTy = DstFVTy->getElementType(); 2934 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2935 "Vector elements must have same size"); 2936 2937 // Do a direct cast if element types are castable. 2938 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2939 return Builder.CreateBitOrPointerCast(V, DstFVTy); 2940 } 2941 // V cannot be directly casted to desired vector type. 2942 // May happen when V is a floating point vector but DstVTy is a vector of 2943 // pointers or vice-versa. Handle this using a two-step bitcast using an 2944 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2945 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2946 "Only one type should be a pointer type"); 2947 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2948 "Only one type should be a floating point type"); 2949 Type *IntTy = 2950 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2951 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 2952 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2953 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 2954 } 2955 2956 void InnerLoopVectorizer::emitIterationCountCheck(BasicBlock *Bypass) { 2957 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 2958 // Reuse existing vector loop preheader for TC checks. 2959 // Note that new preheader block is generated for vector loop. 2960 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 2961 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 2962 2963 // Generate code to check if the loop's trip count is less than VF * UF, or 2964 // equal to it in case a scalar epilogue is required; this implies that the 2965 // vector trip count is zero. This check also covers the case where adding one 2966 // to the backedge-taken count overflowed leading to an incorrect trip count 2967 // of zero. In this case we will also jump to the scalar loop. 2968 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 2969 : ICmpInst::ICMP_ULT; 2970 2971 // If tail is to be folded, vector loop takes care of all iterations. 2972 Type *CountTy = Count->getType(); 2973 Value *CheckMinIters = Builder.getFalse(); 2974 Value *Step = createStepForVF(Builder, CountTy, VF, UF); 2975 if (!Cost->foldTailByMasking()) 2976 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 2977 else if (VF.isScalable()) { 2978 // vscale is not necessarily a power-of-2, which means we cannot guarantee 2979 // an overflow to zero when updating induction variables and so an 2980 // additional overflow check is required before entering the vector loop. 2981 2982 // Get the maximum unsigned value for the type. 2983 Value *MaxUIntTripCount = 2984 ConstantInt::get(CountTy, cast<IntegerType>(CountTy)->getMask()); 2985 Value *LHS = Builder.CreateSub(MaxUIntTripCount, Count); 2986 2987 // Don't execute the vector loop if (UMax - n) < (VF * UF). 2988 CheckMinIters = Builder.CreateICmp(ICmpInst::ICMP_ULT, LHS, Step); 2989 } 2990 // Create new preheader for vector loop. 2991 LoopVectorPreHeader = 2992 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 2993 "vector.ph"); 2994 2995 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 2996 DT->getNode(Bypass)->getIDom()) && 2997 "TC check is expected to dominate Bypass"); 2998 2999 // Update dominator for Bypass & LoopExit (if needed). 3000 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3001 if (!Cost->requiresScalarEpilogue(VF)) 3002 // If there is an epilogue which must run, there's no edge from the 3003 // middle block to exit blocks and thus no need to update the immediate 3004 // dominator of the exit blocks. 3005 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3006 3007 ReplaceInstWithInst( 3008 TCCheckBlock->getTerminator(), 3009 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3010 LoopBypassBlocks.push_back(TCCheckBlock); 3011 } 3012 3013 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) { 3014 3015 BasicBlock *const SCEVCheckBlock = 3016 RTChecks.emitSCEVChecks(Bypass, LoopVectorPreHeader, LoopExitBlock); 3017 if (!SCEVCheckBlock) 3018 return nullptr; 3019 3020 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3021 (OptForSizeBasedOnProfile && 3022 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3023 "Cannot SCEV check stride or overflow when optimizing for size"); 3024 3025 3026 // Update dominator only if this is first RT check. 3027 if (LoopBypassBlocks.empty()) { 3028 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3029 if (!Cost->requiresScalarEpilogue(VF)) 3030 // If there is an epilogue which must run, there's no edge from the 3031 // middle block to exit blocks and thus no need to update the immediate 3032 // dominator of the exit blocks. 3033 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3034 } 3035 3036 LoopBypassBlocks.push_back(SCEVCheckBlock); 3037 AddedSafetyChecks = true; 3038 return SCEVCheckBlock; 3039 } 3040 3041 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) { 3042 // VPlan-native path does not do any analysis for runtime checks currently. 3043 if (EnableVPlanNativePath) 3044 return nullptr; 3045 3046 BasicBlock *const MemCheckBlock = 3047 RTChecks.emitMemRuntimeChecks(Bypass, LoopVectorPreHeader); 3048 3049 // Check if we generated code that checks in runtime if arrays overlap. We put 3050 // the checks into a separate block to make the more common case of few 3051 // elements faster. 3052 if (!MemCheckBlock) 3053 return nullptr; 3054 3055 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3056 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3057 "Cannot emit memory checks when optimizing for size, unless forced " 3058 "to vectorize."); 3059 ORE->emit([&]() { 3060 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3061 OrigLoop->getStartLoc(), 3062 OrigLoop->getHeader()) 3063 << "Code-size may be reduced by not forcing " 3064 "vectorization, or by source-code modifications " 3065 "eliminating the need for runtime checks " 3066 "(e.g., adding 'restrict')."; 3067 }); 3068 } 3069 3070 LoopBypassBlocks.push_back(MemCheckBlock); 3071 3072 AddedSafetyChecks = true; 3073 3074 // Only use noalias metadata when using memory checks guaranteeing no overlap 3075 // across all iterations. 3076 if (!Legal->getLAI()->getRuntimePointerChecking()->getDiffChecks()) { 3077 // We currently don't use LoopVersioning for the actual loop cloning but we 3078 // still use it to add the noalias metadata. 3079 LVer = std::make_unique<LoopVersioning>( 3080 *Legal->getLAI(), 3081 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3082 DT, PSE.getSE()); 3083 LVer->prepareNoAliasMetadata(); 3084 } 3085 return MemCheckBlock; 3086 } 3087 3088 void InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3089 LoopScalarBody = OrigLoop->getHeader(); 3090 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3091 assert(LoopVectorPreHeader && "Invalid loop structure"); 3092 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3093 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3094 "multiple exit loop without required epilogue?"); 3095 3096 LoopMiddleBlock = 3097 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3098 LI, nullptr, Twine(Prefix) + "middle.block"); 3099 LoopScalarPreHeader = 3100 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3101 nullptr, Twine(Prefix) + "scalar.ph"); 3102 3103 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3104 3105 // Set up the middle block terminator. Two cases: 3106 // 1) If we know that we must execute the scalar epilogue, emit an 3107 // unconditional branch. 3108 // 2) Otherwise, we must have a single unique exit block (due to how we 3109 // implement the multiple exit case). In this case, set up a conditonal 3110 // branch from the middle block to the loop scalar preheader, and the 3111 // exit block. completeLoopSkeleton will update the condition to use an 3112 // iteration check, if required to decide whether to execute the remainder. 3113 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3114 BranchInst::Create(LoopScalarPreHeader) : 3115 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3116 Builder.getTrue()); 3117 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3118 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3119 3120 // Update dominator for loop exit. During skeleton creation, only the vector 3121 // pre-header and the middle block are created. The vector loop is entirely 3122 // created during VPlan exection. 3123 if (!Cost->requiresScalarEpilogue(VF)) 3124 // If there is an epilogue which must run, there's no edge from the 3125 // middle block to exit blocks and thus no need to update the immediate 3126 // dominator of the exit blocks. 3127 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3128 } 3129 3130 void InnerLoopVectorizer::createInductionResumeValues( 3131 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3132 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3133 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3134 "Inconsistent information about additional bypass."); 3135 3136 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3137 assert(VectorTripCount && "Expected valid arguments"); 3138 // We are going to resume the execution of the scalar loop. 3139 // Go over all of the induction variables that we found and fix the 3140 // PHIs that are left in the scalar version of the loop. 3141 // The starting values of PHI nodes depend on the counter of the last 3142 // iteration in the vectorized loop. 3143 // If we come from a bypass edge then we need to start from the original 3144 // start value. 3145 Instruction *OldInduction = Legal->getPrimaryInduction(); 3146 for (auto &InductionEntry : Legal->getInductionVars()) { 3147 PHINode *OrigPhi = InductionEntry.first; 3148 InductionDescriptor II = InductionEntry.second; 3149 3150 // Create phi nodes to merge from the backedge-taken check block. 3151 PHINode *BCResumeVal = 3152 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3153 LoopScalarPreHeader->getTerminator()); 3154 // Copy original phi DL over to the new one. 3155 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3156 Value *&EndValue = IVEndValues[OrigPhi]; 3157 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3158 if (OrigPhi == OldInduction) { 3159 // We know what the end value is. 3160 EndValue = VectorTripCount; 3161 } else { 3162 IRBuilder<> B(LoopVectorPreHeader->getTerminator()); 3163 3164 // Fast-math-flags propagate from the original induction instruction. 3165 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3166 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3167 3168 Type *StepType = II.getStep()->getType(); 3169 Instruction::CastOps CastOp = 3170 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3171 Value *VTC = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.vtc"); 3172 Value *Step = 3173 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3174 EndValue = emitTransformedIndex(B, VTC, II.getStartValue(), Step, II); 3175 EndValue->setName("ind.end"); 3176 3177 // Compute the end value for the additional bypass (if applicable). 3178 if (AdditionalBypass.first) { 3179 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3180 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3181 StepType, true); 3182 Value *Step = 3183 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3184 VTC = 3185 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.vtc"); 3186 EndValueFromAdditionalBypass = 3187 emitTransformedIndex(B, VTC, II.getStartValue(), Step, II); 3188 EndValueFromAdditionalBypass->setName("ind.end"); 3189 } 3190 } 3191 // The new PHI merges the original incoming value, in case of a bypass, 3192 // or the value at the end of the vectorized loop. 3193 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3194 3195 // Fix the scalar body counter (PHI node). 3196 // The old induction's phi node in the scalar body needs the truncated 3197 // value. 3198 for (BasicBlock *BB : LoopBypassBlocks) 3199 BCResumeVal->addIncoming(II.getStartValue(), BB); 3200 3201 if (AdditionalBypass.first) 3202 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3203 EndValueFromAdditionalBypass); 3204 3205 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3206 } 3207 } 3208 3209 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(MDNode *OrigLoopID) { 3210 // The trip counts should be cached by now. 3211 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 3212 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3213 3214 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3215 3216 // Add a check in the middle block to see if we have completed 3217 // all of the iterations in the first vector loop. Three cases: 3218 // 1) If we require a scalar epilogue, there is no conditional branch as 3219 // we unconditionally branch to the scalar preheader. Do nothing. 3220 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3221 // Thus if tail is to be folded, we know we don't need to run the 3222 // remainder and we can use the previous value for the condition (true). 3223 // 3) Otherwise, construct a runtime check. 3224 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3225 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3226 Count, VectorTripCount, "cmp.n", 3227 LoopMiddleBlock->getTerminator()); 3228 3229 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3230 // of the corresponding compare because they may have ended up with 3231 // different line numbers and we want to avoid awkward line stepping while 3232 // debugging. Eg. if the compare has got a line number inside the loop. 3233 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3234 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3235 } 3236 3237 #ifdef EXPENSIVE_CHECKS 3238 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3239 #endif 3240 3241 return LoopVectorPreHeader; 3242 } 3243 3244 std::pair<BasicBlock *, Value *> 3245 InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3246 /* 3247 In this function we generate a new loop. The new loop will contain 3248 the vectorized instructions while the old loop will continue to run the 3249 scalar remainder. 3250 3251 [ ] <-- loop iteration number check. 3252 / | 3253 / v 3254 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3255 | / | 3256 | / v 3257 || [ ] <-- vector pre header. 3258 |/ | 3259 | v 3260 | [ ] \ 3261 | [ ]_| <-- vector loop (created during VPlan execution). 3262 | | 3263 | v 3264 \ -[ ] <--- middle-block. 3265 \/ | 3266 /\ v 3267 | ->[ ] <--- new preheader. 3268 | | 3269 (opt) v <-- edge from middle to exit iff epilogue is not required. 3270 | [ ] \ 3271 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3272 \ | 3273 \ v 3274 >[ ] <-- exit block(s). 3275 ... 3276 */ 3277 3278 // Get the metadata of the original loop before it gets modified. 3279 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3280 3281 // Workaround! Compute the trip count of the original loop and cache it 3282 // before we start modifying the CFG. This code has a systemic problem 3283 // wherein it tries to run analysis over partially constructed IR; this is 3284 // wrong, and not simply for SCEV. The trip count of the original loop 3285 // simply happens to be prone to hitting this in practice. In theory, we 3286 // can hit the same issue for any SCEV, or ValueTracking query done during 3287 // mutation. See PR49900. 3288 getOrCreateTripCount(OrigLoop->getLoopPreheader()); 3289 3290 // Create an empty vector loop, and prepare basic blocks for the runtime 3291 // checks. 3292 createVectorLoopSkeleton(""); 3293 3294 // Now, compare the new count to zero. If it is zero skip the vector loop and 3295 // jump to the scalar loop. This check also covers the case where the 3296 // backedge-taken count is uint##_max: adding one to it will overflow leading 3297 // to an incorrect trip count of zero. In this (rare) case we will also jump 3298 // to the scalar loop. 3299 emitIterationCountCheck(LoopScalarPreHeader); 3300 3301 // Generate the code to check any assumptions that we've made for SCEV 3302 // expressions. 3303 emitSCEVChecks(LoopScalarPreHeader); 3304 3305 // Generate the code that checks in runtime if arrays overlap. We put the 3306 // checks into a separate block to make the more common case of few elements 3307 // faster. 3308 emitMemRuntimeChecks(LoopScalarPreHeader); 3309 3310 // Emit phis for the new starting index of the scalar loop. 3311 createInductionResumeValues(); 3312 3313 return {completeLoopSkeleton(OrigLoopID), nullptr}; 3314 } 3315 3316 // Fix up external users of the induction variable. At this point, we are 3317 // in LCSSA form, with all external PHIs that use the IV having one input value, 3318 // coming from the remainder loop. We need those PHIs to also have a correct 3319 // value for the IV when arriving directly from the middle block. 3320 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3321 const InductionDescriptor &II, 3322 Value *VectorTripCount, Value *EndValue, 3323 BasicBlock *MiddleBlock, 3324 BasicBlock *VectorHeader, VPlan &Plan) { 3325 // There are two kinds of external IV usages - those that use the value 3326 // computed in the last iteration (the PHI) and those that use the penultimate 3327 // value (the value that feeds into the phi from the loop latch). 3328 // We allow both, but they, obviously, have different values. 3329 3330 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3331 3332 DenseMap<Value *, Value *> MissingVals; 3333 3334 // An external user of the last iteration's value should see the value that 3335 // the remainder loop uses to initialize its own IV. 3336 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3337 for (User *U : PostInc->users()) { 3338 Instruction *UI = cast<Instruction>(U); 3339 if (!OrigLoop->contains(UI)) { 3340 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3341 MissingVals[UI] = EndValue; 3342 } 3343 } 3344 3345 // An external user of the penultimate value need to see EndValue - Step. 3346 // The simplest way to get this is to recompute it from the constituent SCEVs, 3347 // that is Start + (Step * (CRD - 1)). 3348 for (User *U : OrigPhi->users()) { 3349 auto *UI = cast<Instruction>(U); 3350 if (!OrigLoop->contains(UI)) { 3351 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3352 3353 IRBuilder<> B(MiddleBlock->getTerminator()); 3354 3355 // Fast-math-flags propagate from the original induction instruction. 3356 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3357 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3358 3359 Value *CountMinusOne = B.CreateSub( 3360 VectorTripCount, ConstantInt::get(VectorTripCount->getType(), 1)); 3361 Value *CMO = 3362 !II.getStep()->getType()->isIntegerTy() 3363 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3364 II.getStep()->getType()) 3365 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3366 CMO->setName("cast.cmo"); 3367 3368 Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(), 3369 VectorHeader->getTerminator()); 3370 Value *Escape = 3371 emitTransformedIndex(B, CMO, II.getStartValue(), Step, II); 3372 Escape->setName("ind.escape"); 3373 MissingVals[UI] = Escape; 3374 } 3375 } 3376 3377 for (auto &I : MissingVals) { 3378 PHINode *PHI = cast<PHINode>(I.first); 3379 // One corner case we have to handle is two IVs "chasing" each-other, 3380 // that is %IV2 = phi [...], [ %IV1, %latch ] 3381 // In this case, if IV1 has an external use, we need to avoid adding both 3382 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3383 // don't already have an incoming value for the middle block. 3384 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) { 3385 PHI->addIncoming(I.second, MiddleBlock); 3386 Plan.removeLiveOut(PHI); 3387 } 3388 } 3389 } 3390 3391 namespace { 3392 3393 struct CSEDenseMapInfo { 3394 static bool canHandle(const Instruction *I) { 3395 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3396 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3397 } 3398 3399 static inline Instruction *getEmptyKey() { 3400 return DenseMapInfo<Instruction *>::getEmptyKey(); 3401 } 3402 3403 static inline Instruction *getTombstoneKey() { 3404 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3405 } 3406 3407 static unsigned getHashValue(const Instruction *I) { 3408 assert(canHandle(I) && "Unknown instruction!"); 3409 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3410 I->value_op_end())); 3411 } 3412 3413 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3414 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3415 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3416 return LHS == RHS; 3417 return LHS->isIdenticalTo(RHS); 3418 } 3419 }; 3420 3421 } // end anonymous namespace 3422 3423 ///Perform cse of induction variable instructions. 3424 static void cse(BasicBlock *BB) { 3425 // Perform simple cse. 3426 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3427 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3428 if (!CSEDenseMapInfo::canHandle(&In)) 3429 continue; 3430 3431 // Check if we can replace this instruction with any of the 3432 // visited instructions. 3433 if (Instruction *V = CSEMap.lookup(&In)) { 3434 In.replaceAllUsesWith(V); 3435 In.eraseFromParent(); 3436 continue; 3437 } 3438 3439 CSEMap[&In] = &In; 3440 } 3441 } 3442 3443 InstructionCost 3444 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3445 bool &NeedToScalarize) const { 3446 Function *F = CI->getCalledFunction(); 3447 Type *ScalarRetTy = CI->getType(); 3448 SmallVector<Type *, 4> Tys, ScalarTys; 3449 for (auto &ArgOp : CI->args()) 3450 ScalarTys.push_back(ArgOp->getType()); 3451 3452 // Estimate cost of scalarized vector call. The source operands are assumed 3453 // to be vectors, so we need to extract individual elements from there, 3454 // execute VF scalar calls, and then gather the result into the vector return 3455 // value. 3456 InstructionCost ScalarCallCost = 3457 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3458 if (VF.isScalar()) 3459 return ScalarCallCost; 3460 3461 // Compute corresponding vector type for return value and arguments. 3462 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3463 for (Type *ScalarTy : ScalarTys) 3464 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3465 3466 // Compute costs of unpacking argument values for the scalar calls and 3467 // packing the return values to a vector. 3468 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3469 3470 InstructionCost Cost = 3471 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3472 3473 // If we can't emit a vector call for this function, then the currently found 3474 // cost is the cost we need to return. 3475 NeedToScalarize = true; 3476 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3477 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3478 3479 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3480 return Cost; 3481 3482 // If the corresponding vector cost is cheaper, return its cost. 3483 InstructionCost VectorCallCost = 3484 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3485 if (VectorCallCost < Cost) { 3486 NeedToScalarize = false; 3487 Cost = VectorCallCost; 3488 } 3489 return Cost; 3490 } 3491 3492 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3493 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3494 return Elt; 3495 return VectorType::get(Elt, VF); 3496 } 3497 3498 InstructionCost 3499 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3500 ElementCount VF) const { 3501 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3502 assert(ID && "Expected intrinsic call!"); 3503 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3504 FastMathFlags FMF; 3505 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3506 FMF = FPMO->getFastMathFlags(); 3507 3508 SmallVector<const Value *> Arguments(CI->args()); 3509 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3510 SmallVector<Type *> ParamTys; 3511 std::transform(FTy->param_begin(), FTy->param_end(), 3512 std::back_inserter(ParamTys), 3513 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3514 3515 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3516 dyn_cast<IntrinsicInst>(CI)); 3517 return TTI.getIntrinsicInstrCost(CostAttrs, 3518 TargetTransformInfo::TCK_RecipThroughput); 3519 } 3520 3521 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3522 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3523 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3524 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3525 } 3526 3527 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3528 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3529 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3530 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3531 } 3532 3533 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3534 // For every instruction `I` in MinBWs, truncate the operands, create a 3535 // truncated version of `I` and reextend its result. InstCombine runs 3536 // later and will remove any ext/trunc pairs. 3537 SmallPtrSet<Value *, 4> Erased; 3538 for (const auto &KV : Cost->getMinimalBitwidths()) { 3539 // If the value wasn't vectorized, we must maintain the original scalar 3540 // type. The absence of the value from State indicates that it 3541 // wasn't vectorized. 3542 // FIXME: Should not rely on getVPValue at this point. 3543 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3544 if (!State.hasAnyVectorValue(Def)) 3545 continue; 3546 for (unsigned Part = 0; Part < UF; ++Part) { 3547 Value *I = State.get(Def, Part); 3548 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3549 continue; 3550 Type *OriginalTy = I->getType(); 3551 Type *ScalarTruncatedTy = 3552 IntegerType::get(OriginalTy->getContext(), KV.second); 3553 auto *TruncatedTy = VectorType::get( 3554 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3555 if (TruncatedTy == OriginalTy) 3556 continue; 3557 3558 IRBuilder<> B(cast<Instruction>(I)); 3559 auto ShrinkOperand = [&](Value *V) -> Value * { 3560 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3561 if (ZI->getSrcTy() == TruncatedTy) 3562 return ZI->getOperand(0); 3563 return B.CreateZExtOrTrunc(V, TruncatedTy); 3564 }; 3565 3566 // The actual instruction modification depends on the instruction type, 3567 // unfortunately. 3568 Value *NewI = nullptr; 3569 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3570 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3571 ShrinkOperand(BO->getOperand(1))); 3572 3573 // Any wrapping introduced by shrinking this operation shouldn't be 3574 // considered undefined behavior. So, we can't unconditionally copy 3575 // arithmetic wrapping flags to NewI. 3576 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3577 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3578 NewI = 3579 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3580 ShrinkOperand(CI->getOperand(1))); 3581 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3582 NewI = B.CreateSelect(SI->getCondition(), 3583 ShrinkOperand(SI->getTrueValue()), 3584 ShrinkOperand(SI->getFalseValue())); 3585 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3586 switch (CI->getOpcode()) { 3587 default: 3588 llvm_unreachable("Unhandled cast!"); 3589 case Instruction::Trunc: 3590 NewI = ShrinkOperand(CI->getOperand(0)); 3591 break; 3592 case Instruction::SExt: 3593 NewI = B.CreateSExtOrTrunc( 3594 CI->getOperand(0), 3595 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3596 break; 3597 case Instruction::ZExt: 3598 NewI = B.CreateZExtOrTrunc( 3599 CI->getOperand(0), 3600 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3601 break; 3602 } 3603 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3604 auto Elements0 = 3605 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 3606 auto *O0 = B.CreateZExtOrTrunc( 3607 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3608 auto Elements1 = 3609 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 3610 auto *O1 = B.CreateZExtOrTrunc( 3611 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3612 3613 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3614 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3615 // Don't do anything with the operands, just extend the result. 3616 continue; 3617 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3618 auto Elements = 3619 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 3620 auto *O0 = B.CreateZExtOrTrunc( 3621 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3622 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3623 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3624 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3625 auto Elements = 3626 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 3627 auto *O0 = B.CreateZExtOrTrunc( 3628 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3629 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3630 } else { 3631 // If we don't know what to do, be conservative and don't do anything. 3632 continue; 3633 } 3634 3635 // Lastly, extend the result. 3636 NewI->takeName(cast<Instruction>(I)); 3637 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3638 I->replaceAllUsesWith(Res); 3639 cast<Instruction>(I)->eraseFromParent(); 3640 Erased.insert(I); 3641 State.reset(Def, Res, Part); 3642 } 3643 } 3644 3645 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3646 for (const auto &KV : Cost->getMinimalBitwidths()) { 3647 // If the value wasn't vectorized, we must maintain the original scalar 3648 // type. The absence of the value from State indicates that it 3649 // wasn't vectorized. 3650 // FIXME: Should not rely on getVPValue at this point. 3651 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3652 if (!State.hasAnyVectorValue(Def)) 3653 continue; 3654 for (unsigned Part = 0; Part < UF; ++Part) { 3655 Value *I = State.get(Def, Part); 3656 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3657 if (Inst && Inst->use_empty()) { 3658 Value *NewI = Inst->getOperand(0); 3659 Inst->eraseFromParent(); 3660 State.reset(Def, NewI, Part); 3661 } 3662 } 3663 } 3664 } 3665 3666 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State, 3667 VPlan &Plan) { 3668 // Insert truncates and extends for any truncated instructions as hints to 3669 // InstCombine. 3670 if (VF.isVector()) 3671 truncateToMinimalBitwidths(State); 3672 3673 // Fix widened non-induction PHIs by setting up the PHI operands. 3674 if (OrigPHIsToFix.size()) { 3675 assert(EnableVPlanNativePath && 3676 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3677 fixNonInductionPHIs(State); 3678 } 3679 3680 // At this point every instruction in the original loop is widened to a 3681 // vector form. Now we need to fix the recurrences in the loop. These PHI 3682 // nodes are currently empty because we did not want to introduce cycles. 3683 // This is the second stage of vectorizing recurrences. 3684 fixCrossIterationPHIs(State); 3685 3686 // Forget the original basic block. 3687 PSE.getSE()->forgetLoop(OrigLoop); 3688 3689 VPBasicBlock *LatchVPBB = Plan.getVectorLoopRegion()->getExitingBasicBlock(); 3690 Loop *VectorLoop = LI->getLoopFor(State.CFG.VPBB2IRBB[LatchVPBB]); 3691 if (Cost->requiresScalarEpilogue(VF)) { 3692 // No edge from the middle block to the unique exit block has been inserted 3693 // and there is nothing to fix from vector loop; phis should have incoming 3694 // from scalar loop only. 3695 Plan.clearLiveOuts(); 3696 } else { 3697 // If we inserted an edge from the middle block to the unique exit block, 3698 // update uses outside the loop (phis) to account for the newly inserted 3699 // edge. 3700 3701 // Fix-up external users of the induction variables. 3702 for (auto &Entry : Legal->getInductionVars()) 3703 fixupIVUsers(Entry.first, Entry.second, 3704 getOrCreateVectorTripCount(VectorLoop->getLoopPreheader()), 3705 IVEndValues[Entry.first], LoopMiddleBlock, 3706 VectorLoop->getHeader(), Plan); 3707 } 3708 3709 // Fix LCSSA phis not already fixed earlier. Extracts may need to be generated 3710 // in the exit block, so update the builder. 3711 State.Builder.SetInsertPoint(State.CFG.ExitBB->getFirstNonPHI()); 3712 for (auto &KV : Plan.getLiveOuts()) 3713 KV.second->fixPhi(Plan, State); 3714 3715 for (Instruction *PI : PredicatedInstructions) 3716 sinkScalarOperands(&*PI); 3717 3718 // Remove redundant induction instructions. 3719 cse(VectorLoop->getHeader()); 3720 3721 // Set/update profile weights for the vector and remainder loops as original 3722 // loop iterations are now distributed among them. Note that original loop 3723 // represented by LoopScalarBody becomes remainder loop after vectorization. 3724 // 3725 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3726 // end up getting slightly roughened result but that should be OK since 3727 // profile is not inherently precise anyway. Note also possible bypass of 3728 // vector code caused by legality checks is ignored, assigning all the weight 3729 // to the vector loop, optimistically. 3730 // 3731 // For scalable vectorization we can't know at compile time how many iterations 3732 // of the loop are handled in one vector iteration, so instead assume a pessimistic 3733 // vscale of '1'. 3734 setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), VectorLoop, 3735 LI->getLoopFor(LoopScalarBody), 3736 VF.getKnownMinValue() * UF); 3737 } 3738 3739 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 3740 // In order to support recurrences we need to be able to vectorize Phi nodes. 3741 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3742 // stage #2: We now need to fix the recurrences by adding incoming edges to 3743 // the currently empty PHI nodes. At this point every instruction in the 3744 // original loop is widened to a vector form so we can use them to construct 3745 // the incoming edges. 3746 VPBasicBlock *Header = 3747 State.Plan->getVectorLoopRegion()->getEntryBasicBlock(); 3748 for (VPRecipeBase &R : Header->phis()) { 3749 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 3750 fixReduction(ReductionPhi, State); 3751 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 3752 fixFirstOrderRecurrence(FOR, State); 3753 } 3754 } 3755 3756 void InnerLoopVectorizer::fixFirstOrderRecurrence( 3757 VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) { 3758 // This is the second phase of vectorizing first-order recurrences. An 3759 // overview of the transformation is described below. Suppose we have the 3760 // following loop. 3761 // 3762 // for (int i = 0; i < n; ++i) 3763 // b[i] = a[i] - a[i - 1]; 3764 // 3765 // There is a first-order recurrence on "a". For this loop, the shorthand 3766 // scalar IR looks like: 3767 // 3768 // scalar.ph: 3769 // s_init = a[-1] 3770 // br scalar.body 3771 // 3772 // scalar.body: 3773 // i = phi [0, scalar.ph], [i+1, scalar.body] 3774 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3775 // s2 = a[i] 3776 // b[i] = s2 - s1 3777 // br cond, scalar.body, ... 3778 // 3779 // In this example, s1 is a recurrence because it's value depends on the 3780 // previous iteration. In the first phase of vectorization, we created a 3781 // vector phi v1 for s1. We now complete the vectorization and produce the 3782 // shorthand vector IR shown below (for VF = 4, UF = 1). 3783 // 3784 // vector.ph: 3785 // v_init = vector(..., ..., ..., a[-1]) 3786 // br vector.body 3787 // 3788 // vector.body 3789 // i = phi [0, vector.ph], [i+4, vector.body] 3790 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3791 // v2 = a[i, i+1, i+2, i+3]; 3792 // v3 = vector(v1(3), v2(0, 1, 2)) 3793 // b[i, i+1, i+2, i+3] = v2 - v3 3794 // br cond, vector.body, middle.block 3795 // 3796 // middle.block: 3797 // x = v2(3) 3798 // br scalar.ph 3799 // 3800 // scalar.ph: 3801 // s_init = phi [x, middle.block], [a[-1], otherwise] 3802 // br scalar.body 3803 // 3804 // After execution completes the vector loop, we extract the next value of 3805 // the recurrence (x) to use as the initial value in the scalar loop. 3806 3807 // Extract the last vector element in the middle block. This will be the 3808 // initial value for the recurrence when jumping to the scalar loop. 3809 VPValue *PreviousDef = PhiR->getBackedgeValue(); 3810 Value *Incoming = State.get(PreviousDef, UF - 1); 3811 auto *ExtractForScalar = Incoming; 3812 auto *IdxTy = Builder.getInt32Ty(); 3813 if (VF.isVector()) { 3814 auto *One = ConstantInt::get(IdxTy, 1); 3815 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3816 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3817 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 3818 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 3819 "vector.recur.extract"); 3820 } 3821 // Extract the second last element in the middle block if the 3822 // Phi is used outside the loop. We need to extract the phi itself 3823 // and not the last element (the phi update in the current iteration). This 3824 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3825 // when the scalar loop is not run at all. 3826 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3827 if (VF.isVector()) { 3828 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3829 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 3830 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3831 Incoming, Idx, "vector.recur.extract.for.phi"); 3832 } else if (UF > 1) 3833 // When loop is unrolled without vectorizing, initialize 3834 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 3835 // of `Incoming`. This is analogous to the vectorized case above: extracting 3836 // the second last element when VF > 1. 3837 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 3838 3839 // Fix the initial value of the original recurrence in the scalar loop. 3840 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3841 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 3842 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3843 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 3844 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3845 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3846 Start->addIncoming(Incoming, BB); 3847 } 3848 3849 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 3850 Phi->setName("scalar.recur"); 3851 3852 // Finally, fix users of the recurrence outside the loop. The users will need 3853 // either the last value of the scalar recurrence or the last value of the 3854 // vector recurrence we extracted in the middle block. Since the loop is in 3855 // LCSSA form, we just need to find all the phi nodes for the original scalar 3856 // recurrence in the exit block, and then add an edge for the middle block. 3857 // Note that LCSSA does not imply single entry when the original scalar loop 3858 // had multiple exiting edges (as we always run the last iteration in the 3859 // scalar epilogue); in that case, there is no edge from middle to exit and 3860 // and thus no phis which needed updated. 3861 if (!Cost->requiresScalarEpilogue(VF)) 3862 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 3863 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) { 3864 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3865 State.Plan->removeLiveOut(&LCSSAPhi); 3866 } 3867 } 3868 3869 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 3870 VPTransformState &State) { 3871 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 3872 // Get it's reduction variable descriptor. 3873 assert(Legal->isReductionVariable(OrigPhi) && 3874 "Unable to find the reduction variable"); 3875 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 3876 3877 RecurKind RK = RdxDesc.getRecurrenceKind(); 3878 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3879 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3880 setDebugLocFromInst(ReductionStartValue); 3881 3882 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 3883 // This is the vector-clone of the value that leaves the loop. 3884 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 3885 3886 // Wrap flags are in general invalid after vectorization, clear them. 3887 clearReductionWrapFlags(PhiR, State); 3888 3889 // Before each round, move the insertion point right between 3890 // the PHIs and the values we are going to write. 3891 // This allows us to write both PHINodes and the extractelement 3892 // instructions. 3893 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3894 3895 setDebugLocFromInst(LoopExitInst); 3896 3897 Type *PhiTy = OrigPhi->getType(); 3898 3899 VPBasicBlock *LatchVPBB = 3900 PhiR->getParent()->getEnclosingLoopRegion()->getExitingBasicBlock(); 3901 BasicBlock *VectorLoopLatch = State.CFG.VPBB2IRBB[LatchVPBB]; 3902 // If tail is folded by masking, the vector value to leave the loop should be 3903 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 3904 // instead of the former. For an inloop reduction the reduction will already 3905 // be predicated, and does not need to be handled here. 3906 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 3907 for (unsigned Part = 0; Part < UF; ++Part) { 3908 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 3909 Value *Sel = nullptr; 3910 for (User *U : VecLoopExitInst->users()) { 3911 if (isa<SelectInst>(U)) { 3912 assert(!Sel && "Reduction exit feeding two selects"); 3913 Sel = U; 3914 } else 3915 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 3916 } 3917 assert(Sel && "Reduction exit feeds no select"); 3918 State.reset(LoopExitInstDef, Sel, Part); 3919 3920 // If the target can create a predicated operator for the reduction at no 3921 // extra cost in the loop (for example a predicated vadd), it can be 3922 // cheaper for the select to remain in the loop than be sunk out of it, 3923 // and so use the select value for the phi instead of the old 3924 // LoopExitValue. 3925 if (PreferPredicatedReductionSelect || 3926 TTI->preferPredicatedReductionSelect( 3927 RdxDesc.getOpcode(), PhiTy, 3928 TargetTransformInfo::ReductionFlags())) { 3929 auto *VecRdxPhi = 3930 cast<PHINode>(State.get(PhiR, Part)); 3931 VecRdxPhi->setIncomingValueForBlock(VectorLoopLatch, Sel); 3932 } 3933 } 3934 } 3935 3936 // If the vector reduction can be performed in a smaller type, we truncate 3937 // then extend the loop exit value to enable InstCombine to evaluate the 3938 // entire expression in the smaller type. 3939 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 3940 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 3941 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3942 Builder.SetInsertPoint(VectorLoopLatch->getTerminator()); 3943 VectorParts RdxParts(UF); 3944 for (unsigned Part = 0; Part < UF; ++Part) { 3945 RdxParts[Part] = State.get(LoopExitInstDef, Part); 3946 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3947 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3948 : Builder.CreateZExt(Trunc, VecTy); 3949 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 3950 if (U != Trunc) { 3951 U->replaceUsesOfWith(RdxParts[Part], Extnd); 3952 RdxParts[Part] = Extnd; 3953 } 3954 } 3955 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3956 for (unsigned Part = 0; Part < UF; ++Part) { 3957 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3958 State.reset(LoopExitInstDef, RdxParts[Part], Part); 3959 } 3960 } 3961 3962 // Reduce all of the unrolled parts into a single vector. 3963 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 3964 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 3965 3966 // The middle block terminator has already been assigned a DebugLoc here (the 3967 // OrigLoop's single latch terminator). We want the whole middle block to 3968 // appear to execute on this line because: (a) it is all compiler generated, 3969 // (b) these instructions are always executed after evaluating the latch 3970 // conditional branch, and (c) other passes may add new predecessors which 3971 // terminate on this line. This is the easiest way to ensure we don't 3972 // accidentally cause an extra step back into the loop while debugging. 3973 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 3974 if (PhiR->isOrdered()) 3975 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 3976 else { 3977 // Floating-point operations should have some FMF to enable the reduction. 3978 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 3979 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 3980 for (unsigned Part = 1; Part < UF; ++Part) { 3981 Value *RdxPart = State.get(LoopExitInstDef, Part); 3982 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 3983 ReducedPartRdx = Builder.CreateBinOp( 3984 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 3985 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 3986 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 3987 ReducedPartRdx, RdxPart); 3988 else 3989 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 3990 } 3991 } 3992 3993 // Create the reduction after the loop. Note that inloop reductions create the 3994 // target reduction in the loop using a Reduction recipe. 3995 if (VF.isVector() && !PhiR->isInLoop()) { 3996 ReducedPartRdx = 3997 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 3998 // If the reduction can be performed in a smaller type, we need to extend 3999 // the reduction to the wider type before we branch to the original loop. 4000 if (PhiTy != RdxDesc.getRecurrenceType()) 4001 ReducedPartRdx = RdxDesc.isSigned() 4002 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4003 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4004 } 4005 4006 PHINode *ResumePhi = 4007 dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue()); 4008 4009 // Create a phi node that merges control-flow from the backedge-taken check 4010 // block and the middle block. 4011 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4012 LoopScalarPreHeader->getTerminator()); 4013 4014 // If we are fixing reductions in the epilogue loop then we should already 4015 // have created a bc.merge.rdx Phi after the main vector body. Ensure that 4016 // we carry over the incoming values correctly. 4017 for (auto *Incoming : predecessors(LoopScalarPreHeader)) { 4018 if (Incoming == LoopMiddleBlock) 4019 BCBlockPhi->addIncoming(ReducedPartRdx, Incoming); 4020 else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming)) 4021 BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming), 4022 Incoming); 4023 else 4024 BCBlockPhi->addIncoming(ReductionStartValue, Incoming); 4025 } 4026 4027 // Set the resume value for this reduction 4028 ReductionResumeValues.insert({&RdxDesc, BCBlockPhi}); 4029 4030 // If there were stores of the reduction value to a uniform memory address 4031 // inside the loop, create the final store here. 4032 if (StoreInst *SI = RdxDesc.IntermediateStore) { 4033 StoreInst *NewSI = 4034 Builder.CreateStore(ReducedPartRdx, SI->getPointerOperand()); 4035 propagateMetadata(NewSI, SI); 4036 4037 // If the reduction value is used in other places, 4038 // then let the code below create PHI's for that. 4039 } 4040 4041 // Now, we need to fix the users of the reduction variable 4042 // inside and outside of the scalar remainder loop. 4043 4044 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4045 // in the exit blocks. See comment on analogous loop in 4046 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4047 if (!Cost->requiresScalarEpilogue(VF)) 4048 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4049 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) { 4050 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4051 State.Plan->removeLiveOut(&LCSSAPhi); 4052 } 4053 4054 // Fix the scalar loop reduction variable with the incoming reduction sum 4055 // from the vector body and from the backedge value. 4056 int IncomingEdgeBlockIdx = 4057 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4058 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4059 // Pick the other block. 4060 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4061 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4062 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4063 } 4064 4065 void InnerLoopVectorizer::clearReductionWrapFlags(VPReductionPHIRecipe *PhiR, 4066 VPTransformState &State) { 4067 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 4068 RecurKind RK = RdxDesc.getRecurrenceKind(); 4069 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4070 return; 4071 4072 SmallVector<VPValue *, 8> Worklist; 4073 SmallPtrSet<VPValue *, 8> Visited; 4074 Worklist.push_back(PhiR); 4075 Visited.insert(PhiR); 4076 4077 while (!Worklist.empty()) { 4078 VPValue *Cur = Worklist.pop_back_val(); 4079 for (unsigned Part = 0; Part < UF; ++Part) { 4080 Value *V = State.get(Cur, Part); 4081 if (!isa<OverflowingBinaryOperator>(V)) 4082 break; 4083 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4084 } 4085 4086 for (VPUser *U : Cur->users()) { 4087 auto *UserRecipe = dyn_cast<VPRecipeBase>(U); 4088 if (!UserRecipe) 4089 continue; 4090 for (VPValue *V : UserRecipe->definedValues()) 4091 if (Visited.insert(V).second) 4092 Worklist.push_back(V); 4093 } 4094 } 4095 } 4096 4097 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4098 // The basic block and loop containing the predicated instruction. 4099 auto *PredBB = PredInst->getParent(); 4100 auto *VectorLoop = LI->getLoopFor(PredBB); 4101 4102 // Initialize a worklist with the operands of the predicated instruction. 4103 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4104 4105 // Holds instructions that we need to analyze again. An instruction may be 4106 // reanalyzed if we don't yet know if we can sink it or not. 4107 SmallVector<Instruction *, 8> InstsToReanalyze; 4108 4109 // Returns true if a given use occurs in the predicated block. Phi nodes use 4110 // their operands in their corresponding predecessor blocks. 4111 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4112 auto *I = cast<Instruction>(U.getUser()); 4113 BasicBlock *BB = I->getParent(); 4114 if (auto *Phi = dyn_cast<PHINode>(I)) 4115 BB = Phi->getIncomingBlock( 4116 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4117 return BB == PredBB; 4118 }; 4119 4120 // Iteratively sink the scalarized operands of the predicated instruction 4121 // into the block we created for it. When an instruction is sunk, it's 4122 // operands are then added to the worklist. The algorithm ends after one pass 4123 // through the worklist doesn't sink a single instruction. 4124 bool Changed; 4125 do { 4126 // Add the instructions that need to be reanalyzed to the worklist, and 4127 // reset the changed indicator. 4128 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4129 InstsToReanalyze.clear(); 4130 Changed = false; 4131 4132 while (!Worklist.empty()) { 4133 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4134 4135 // We can't sink an instruction if it is a phi node, is not in the loop, 4136 // or may have side effects. 4137 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4138 I->mayHaveSideEffects()) 4139 continue; 4140 4141 // If the instruction is already in PredBB, check if we can sink its 4142 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4143 // sinking the scalar instruction I, hence it appears in PredBB; but it 4144 // may have failed to sink I's operands (recursively), which we try 4145 // (again) here. 4146 if (I->getParent() == PredBB) { 4147 Worklist.insert(I->op_begin(), I->op_end()); 4148 continue; 4149 } 4150 4151 // It's legal to sink the instruction if all its uses occur in the 4152 // predicated block. Otherwise, there's nothing to do yet, and we may 4153 // need to reanalyze the instruction. 4154 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4155 InstsToReanalyze.push_back(I); 4156 continue; 4157 } 4158 4159 // Move the instruction to the beginning of the predicated block, and add 4160 // it's operands to the worklist. 4161 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4162 Worklist.insert(I->op_begin(), I->op_end()); 4163 4164 // The sinking may have enabled other instructions to be sunk, so we will 4165 // need to iterate. 4166 Changed = true; 4167 } 4168 } while (Changed); 4169 } 4170 4171 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4172 for (PHINode *OrigPhi : OrigPHIsToFix) { 4173 VPWidenPHIRecipe *VPPhi = 4174 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4175 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4176 // Make sure the builder has a valid insert point. 4177 Builder.SetInsertPoint(NewPhi); 4178 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4179 VPValue *Inc = VPPhi->getIncomingValue(i); 4180 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4181 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4182 } 4183 } 4184 } 4185 4186 bool InnerLoopVectorizer::useOrderedReductions( 4187 const RecurrenceDescriptor &RdxDesc) { 4188 return Cost->useOrderedReductions(RdxDesc); 4189 } 4190 4191 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4192 VPWidenPHIRecipe *PhiR, 4193 VPTransformState &State) { 4194 assert(EnableVPlanNativePath && 4195 "Non-native vplans are not expected to have VPWidenPHIRecipes."); 4196 // Currently we enter here in the VPlan-native path for non-induction 4197 // PHIs where all control flow is uniform. We simply widen these PHIs. 4198 // Create a vector phi with no operands - the vector phi operands will be 4199 // set at the end of vector code generation. 4200 Type *VecTy = (State.VF.isScalar()) 4201 ? PN->getType() 4202 : VectorType::get(PN->getType(), State.VF); 4203 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4204 State.set(PhiR, VecPhi, 0); 4205 OrigPHIsToFix.push_back(cast<PHINode>(PN)); 4206 } 4207 4208 /// A helper function for checking whether an integer division-related 4209 /// instruction may divide by zero (in which case it must be predicated if 4210 /// executed conditionally in the scalar code). 4211 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4212 /// Non-zero divisors that are non compile-time constants will not be 4213 /// converted into multiplication, so we will still end up scalarizing 4214 /// the division, but can do so w/o predication. 4215 static bool mayDivideByZero(Instruction &I) { 4216 assert((I.getOpcode() == Instruction::UDiv || 4217 I.getOpcode() == Instruction::SDiv || 4218 I.getOpcode() == Instruction::URem || 4219 I.getOpcode() == Instruction::SRem) && 4220 "Unexpected instruction"); 4221 Value *Divisor = I.getOperand(1); 4222 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4223 return !CInt || CInt->isZero(); 4224 } 4225 4226 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4227 VPUser &ArgOperands, 4228 VPTransformState &State) { 4229 assert(!isa<DbgInfoIntrinsic>(I) && 4230 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4231 setDebugLocFromInst(&I); 4232 4233 Module *M = I.getParent()->getParent()->getParent(); 4234 auto *CI = cast<CallInst>(&I); 4235 4236 SmallVector<Type *, 4> Tys; 4237 for (Value *ArgOperand : CI->args()) 4238 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4239 4240 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4241 4242 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4243 // version of the instruction. 4244 // Is it beneficial to perform intrinsic call compared to lib call? 4245 bool NeedToScalarize = false; 4246 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4247 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4248 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4249 assert((UseVectorIntrinsic || !NeedToScalarize) && 4250 "Instruction should be scalarized elsewhere."); 4251 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4252 "Either the intrinsic cost or vector call cost must be valid"); 4253 4254 for (unsigned Part = 0; Part < UF; ++Part) { 4255 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 4256 SmallVector<Value *, 4> Args; 4257 for (auto &I : enumerate(ArgOperands.operands())) { 4258 // Some intrinsics have a scalar argument - don't replace it with a 4259 // vector. 4260 Value *Arg; 4261 if (!UseVectorIntrinsic || 4262 !isVectorIntrinsicWithScalarOpAtArg(ID, I.index())) 4263 Arg = State.get(I.value(), Part); 4264 else 4265 Arg = State.get(I.value(), VPIteration(0, 0)); 4266 if (isVectorIntrinsicWithOverloadTypeAtArg(ID, I.index())) 4267 TysForDecl.push_back(Arg->getType()); 4268 Args.push_back(Arg); 4269 } 4270 4271 Function *VectorF; 4272 if (UseVectorIntrinsic) { 4273 // Use vector version of the intrinsic. 4274 if (VF.isVector()) 4275 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4276 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4277 assert(VectorF && "Can't retrieve vector intrinsic."); 4278 } else { 4279 // Use vector version of the function call. 4280 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4281 #ifndef NDEBUG 4282 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4283 "Can't create vector function."); 4284 #endif 4285 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4286 } 4287 SmallVector<OperandBundleDef, 1> OpBundles; 4288 CI->getOperandBundlesAsDefs(OpBundles); 4289 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4290 4291 if (isa<FPMathOperator>(V)) 4292 V->copyFastMathFlags(CI); 4293 4294 State.set(Def, V, Part); 4295 addMetadata(V, &I); 4296 } 4297 } 4298 4299 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4300 // We should not collect Scalars more than once per VF. Right now, this 4301 // function is called from collectUniformsAndScalars(), which already does 4302 // this check. Collecting Scalars for VF=1 does not make any sense. 4303 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4304 "This function should not be visited twice for the same VF"); 4305 4306 // This avoids any chances of creating a REPLICATE recipe during planning 4307 // since that would result in generation of scalarized code during execution, 4308 // which is not supported for scalable vectors. 4309 if (VF.isScalable()) { 4310 Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4311 return; 4312 } 4313 4314 SmallSetVector<Instruction *, 8> Worklist; 4315 4316 // These sets are used to seed the analysis with pointers used by memory 4317 // accesses that will remain scalar. 4318 SmallSetVector<Instruction *, 8> ScalarPtrs; 4319 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4320 auto *Latch = TheLoop->getLoopLatch(); 4321 4322 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4323 // The pointer operands of loads and stores will be scalar as long as the 4324 // memory access is not a gather or scatter operation. The value operand of a 4325 // store will remain scalar if the store is scalarized. 4326 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4327 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4328 assert(WideningDecision != CM_Unknown && 4329 "Widening decision should be ready at this moment"); 4330 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4331 if (Ptr == Store->getValueOperand()) 4332 return WideningDecision == CM_Scalarize; 4333 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4334 "Ptr is neither a value or pointer operand"); 4335 return WideningDecision != CM_GatherScatter; 4336 }; 4337 4338 // A helper that returns true if the given value is a bitcast or 4339 // getelementptr instruction contained in the loop. 4340 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4341 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4342 isa<GetElementPtrInst>(V)) && 4343 !TheLoop->isLoopInvariant(V); 4344 }; 4345 4346 // A helper that evaluates a memory access's use of a pointer. If the use will 4347 // be a scalar use and the pointer is only used by memory accesses, we place 4348 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4349 // PossibleNonScalarPtrs. 4350 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4351 // We only care about bitcast and getelementptr instructions contained in 4352 // the loop. 4353 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4354 return; 4355 4356 // If the pointer has already been identified as scalar (e.g., if it was 4357 // also identified as uniform), there's nothing to do. 4358 auto *I = cast<Instruction>(Ptr); 4359 if (Worklist.count(I)) 4360 return; 4361 4362 // If the use of the pointer will be a scalar use, and all users of the 4363 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4364 // place the pointer in PossibleNonScalarPtrs. 4365 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4366 return isa<LoadInst>(U) || isa<StoreInst>(U); 4367 })) 4368 ScalarPtrs.insert(I); 4369 else 4370 PossibleNonScalarPtrs.insert(I); 4371 }; 4372 4373 // We seed the scalars analysis with three classes of instructions: (1) 4374 // instructions marked uniform-after-vectorization and (2) bitcast, 4375 // getelementptr and (pointer) phi instructions used by memory accesses 4376 // requiring a scalar use. 4377 // 4378 // (1) Add to the worklist all instructions that have been identified as 4379 // uniform-after-vectorization. 4380 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4381 4382 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4383 // memory accesses requiring a scalar use. The pointer operands of loads and 4384 // stores will be scalar as long as the memory accesses is not a gather or 4385 // scatter operation. The value operand of a store will remain scalar if the 4386 // store is scalarized. 4387 for (auto *BB : TheLoop->blocks()) 4388 for (auto &I : *BB) { 4389 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4390 evaluatePtrUse(Load, Load->getPointerOperand()); 4391 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4392 evaluatePtrUse(Store, Store->getPointerOperand()); 4393 evaluatePtrUse(Store, Store->getValueOperand()); 4394 } 4395 } 4396 for (auto *I : ScalarPtrs) 4397 if (!PossibleNonScalarPtrs.count(I)) { 4398 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4399 Worklist.insert(I); 4400 } 4401 4402 // Insert the forced scalars. 4403 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4404 // induction variable when the PHI user is scalarized. 4405 auto ForcedScalar = ForcedScalars.find(VF); 4406 if (ForcedScalar != ForcedScalars.end()) 4407 for (auto *I : ForcedScalar->second) 4408 Worklist.insert(I); 4409 4410 // Expand the worklist by looking through any bitcasts and getelementptr 4411 // instructions we've already identified as scalar. This is similar to the 4412 // expansion step in collectLoopUniforms(); however, here we're only 4413 // expanding to include additional bitcasts and getelementptr instructions. 4414 unsigned Idx = 0; 4415 while (Idx != Worklist.size()) { 4416 Instruction *Dst = Worklist[Idx++]; 4417 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4418 continue; 4419 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4420 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4421 auto *J = cast<Instruction>(U); 4422 return !TheLoop->contains(J) || Worklist.count(J) || 4423 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4424 isScalarUse(J, Src)); 4425 })) { 4426 Worklist.insert(Src); 4427 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4428 } 4429 } 4430 4431 // An induction variable will remain scalar if all users of the induction 4432 // variable and induction variable update remain scalar. 4433 for (auto &Induction : Legal->getInductionVars()) { 4434 auto *Ind = Induction.first; 4435 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4436 4437 // If tail-folding is applied, the primary induction variable will be used 4438 // to feed a vector compare. 4439 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 4440 continue; 4441 4442 // Returns true if \p Indvar is a pointer induction that is used directly by 4443 // load/store instruction \p I. 4444 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar, 4445 Instruction *I) { 4446 return Induction.second.getKind() == 4447 InductionDescriptor::IK_PtrInduction && 4448 (isa<LoadInst>(I) || isa<StoreInst>(I)) && 4449 Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar); 4450 }; 4451 4452 // Determine if all users of the induction variable are scalar after 4453 // vectorization. 4454 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4455 auto *I = cast<Instruction>(U); 4456 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4457 IsDirectLoadStoreFromPtrIndvar(Ind, I); 4458 }); 4459 if (!ScalarInd) 4460 continue; 4461 4462 // Determine if all users of the induction variable update instruction are 4463 // scalar after vectorization. 4464 auto ScalarIndUpdate = 4465 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4466 auto *I = cast<Instruction>(U); 4467 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4468 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I); 4469 }); 4470 if (!ScalarIndUpdate) 4471 continue; 4472 4473 // The induction variable and its update instruction will remain scalar. 4474 Worklist.insert(Ind); 4475 Worklist.insert(IndUpdate); 4476 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4477 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4478 << "\n"); 4479 } 4480 4481 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4482 } 4483 4484 bool LoopVectorizationCostModel::isScalarWithPredication( 4485 Instruction *I, ElementCount VF) const { 4486 if (!blockNeedsPredicationForAnyReason(I->getParent())) 4487 return false; 4488 switch(I->getOpcode()) { 4489 default: 4490 break; 4491 case Instruction::Load: 4492 case Instruction::Store: { 4493 if (!Legal->isMaskRequired(I)) 4494 return false; 4495 auto *Ptr = getLoadStorePointerOperand(I); 4496 auto *Ty = getLoadStoreType(I); 4497 Type *VTy = Ty; 4498 if (VF.isVector()) 4499 VTy = VectorType::get(Ty, VF); 4500 const Align Alignment = getLoadStoreAlignment(I); 4501 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4502 TTI.isLegalMaskedGather(VTy, Alignment)) 4503 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4504 TTI.isLegalMaskedScatter(VTy, Alignment)); 4505 } 4506 case Instruction::UDiv: 4507 case Instruction::SDiv: 4508 case Instruction::SRem: 4509 case Instruction::URem: 4510 return mayDivideByZero(*I); 4511 } 4512 return false; 4513 } 4514 4515 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 4516 Instruction *I, ElementCount VF) { 4517 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4518 assert(getWideningDecision(I, VF) == CM_Unknown && 4519 "Decision should not be set yet."); 4520 auto *Group = getInterleavedAccessGroup(I); 4521 assert(Group && "Must have a group."); 4522 4523 // If the instruction's allocated size doesn't equal it's type size, it 4524 // requires padding and will be scalarized. 4525 auto &DL = I->getModule()->getDataLayout(); 4526 auto *ScalarTy = getLoadStoreType(I); 4527 if (hasIrregularType(ScalarTy, DL)) 4528 return false; 4529 4530 // If the group involves a non-integral pointer, we may not be able to 4531 // losslessly cast all values to a common type. 4532 unsigned InterleaveFactor = Group->getFactor(); 4533 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy); 4534 for (unsigned i = 0; i < InterleaveFactor; i++) { 4535 Instruction *Member = Group->getMember(i); 4536 if (!Member) 4537 continue; 4538 auto *MemberTy = getLoadStoreType(Member); 4539 bool MemberNI = DL.isNonIntegralPointerType(MemberTy); 4540 // Don't coerce non-integral pointers to integers or vice versa. 4541 if (MemberNI != ScalarNI) { 4542 // TODO: Consider adding special nullptr value case here 4543 return false; 4544 } else if (MemberNI && ScalarNI && 4545 ScalarTy->getPointerAddressSpace() != 4546 MemberTy->getPointerAddressSpace()) { 4547 return false; 4548 } 4549 } 4550 4551 // Check if masking is required. 4552 // A Group may need masking for one of two reasons: it resides in a block that 4553 // needs predication, or it was decided to use masking to deal with gaps 4554 // (either a gap at the end of a load-access that may result in a speculative 4555 // load, or any gaps in a store-access). 4556 bool PredicatedAccessRequiresMasking = 4557 blockNeedsPredicationForAnyReason(I->getParent()) && 4558 Legal->isMaskRequired(I); 4559 bool LoadAccessWithGapsRequiresEpilogMasking = 4560 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 4561 !isScalarEpilogueAllowed(); 4562 bool StoreAccessWithGapsRequiresMasking = 4563 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 4564 if (!PredicatedAccessRequiresMasking && 4565 !LoadAccessWithGapsRequiresEpilogMasking && 4566 !StoreAccessWithGapsRequiresMasking) 4567 return true; 4568 4569 // If masked interleaving is required, we expect that the user/target had 4570 // enabled it, because otherwise it either wouldn't have been created or 4571 // it should have been invalidated by the CostModel. 4572 assert(useMaskedInterleavedAccesses(TTI) && 4573 "Masked interleave-groups for predicated accesses are not enabled."); 4574 4575 if (Group->isReverse()) 4576 return false; 4577 4578 auto *Ty = getLoadStoreType(I); 4579 const Align Alignment = getLoadStoreAlignment(I); 4580 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 4581 : TTI.isLegalMaskedStore(Ty, Alignment); 4582 } 4583 4584 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 4585 Instruction *I, ElementCount VF) { 4586 // Get and ensure we have a valid memory instruction. 4587 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 4588 4589 auto *Ptr = getLoadStorePointerOperand(I); 4590 auto *ScalarTy = getLoadStoreType(I); 4591 4592 // In order to be widened, the pointer should be consecutive, first of all. 4593 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 4594 return false; 4595 4596 // If the instruction is a store located in a predicated block, it will be 4597 // scalarized. 4598 if (isScalarWithPredication(I, VF)) 4599 return false; 4600 4601 // If the instruction's allocated size doesn't equal it's type size, it 4602 // requires padding and will be scalarized. 4603 auto &DL = I->getModule()->getDataLayout(); 4604 if (hasIrregularType(ScalarTy, DL)) 4605 return false; 4606 4607 return true; 4608 } 4609 4610 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 4611 // We should not collect Uniforms more than once per VF. Right now, 4612 // this function is called from collectUniformsAndScalars(), which 4613 // already does this check. Collecting Uniforms for VF=1 does not make any 4614 // sense. 4615 4616 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 4617 "This function should not be visited twice for the same VF"); 4618 4619 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4620 // not analyze again. Uniforms.count(VF) will return 1. 4621 Uniforms[VF].clear(); 4622 4623 // We now know that the loop is vectorizable! 4624 // Collect instructions inside the loop that will remain uniform after 4625 // vectorization. 4626 4627 // Global values, params and instructions outside of current loop are out of 4628 // scope. 4629 auto isOutOfScope = [&](Value *V) -> bool { 4630 Instruction *I = dyn_cast<Instruction>(V); 4631 return (!I || !TheLoop->contains(I)); 4632 }; 4633 4634 // Worklist containing uniform instructions demanding lane 0. 4635 SetVector<Instruction *> Worklist; 4636 BasicBlock *Latch = TheLoop->getLoopLatch(); 4637 4638 // Add uniform instructions demanding lane 0 to the worklist. Instructions 4639 // that are scalar with predication must not be considered uniform after 4640 // vectorization, because that would create an erroneous replicating region 4641 // where only a single instance out of VF should be formed. 4642 // TODO: optimize such seldom cases if found important, see PR40816. 4643 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 4644 if (isOutOfScope(I)) { 4645 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 4646 << *I << "\n"); 4647 return; 4648 } 4649 if (isScalarWithPredication(I, VF)) { 4650 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 4651 << *I << "\n"); 4652 return; 4653 } 4654 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 4655 Worklist.insert(I); 4656 }; 4657 4658 // Start with the conditional branch. If the branch condition is an 4659 // instruction contained in the loop that is only used by the branch, it is 4660 // uniform. 4661 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4662 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 4663 addToWorklistIfAllowed(Cmp); 4664 4665 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 4666 InstWidening WideningDecision = getWideningDecision(I, VF); 4667 assert(WideningDecision != CM_Unknown && 4668 "Widening decision should be ready at this moment"); 4669 4670 // A uniform memory op is itself uniform. We exclude uniform stores 4671 // here as they demand the last lane, not the first one. 4672 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 4673 assert(WideningDecision == CM_Scalarize); 4674 return true; 4675 } 4676 4677 return (WideningDecision == CM_Widen || 4678 WideningDecision == CM_Widen_Reverse || 4679 WideningDecision == CM_Interleave); 4680 }; 4681 4682 4683 // Returns true if Ptr is the pointer operand of a memory access instruction 4684 // I, and I is known to not require scalarization. 4685 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4686 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4687 }; 4688 4689 // Holds a list of values which are known to have at least one uniform use. 4690 // Note that there may be other uses which aren't uniform. A "uniform use" 4691 // here is something which only demands lane 0 of the unrolled iterations; 4692 // it does not imply that all lanes produce the same value (e.g. this is not 4693 // the usual meaning of uniform) 4694 SetVector<Value *> HasUniformUse; 4695 4696 // Scan the loop for instructions which are either a) known to have only 4697 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 4698 for (auto *BB : TheLoop->blocks()) 4699 for (auto &I : *BB) { 4700 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 4701 switch (II->getIntrinsicID()) { 4702 case Intrinsic::sideeffect: 4703 case Intrinsic::experimental_noalias_scope_decl: 4704 case Intrinsic::assume: 4705 case Intrinsic::lifetime_start: 4706 case Intrinsic::lifetime_end: 4707 if (TheLoop->hasLoopInvariantOperands(&I)) 4708 addToWorklistIfAllowed(&I); 4709 break; 4710 default: 4711 break; 4712 } 4713 } 4714 4715 // ExtractValue instructions must be uniform, because the operands are 4716 // known to be loop-invariant. 4717 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 4718 assert(isOutOfScope(EVI->getAggregateOperand()) && 4719 "Expected aggregate value to be loop invariant"); 4720 addToWorklistIfAllowed(EVI); 4721 continue; 4722 } 4723 4724 // If there's no pointer operand, there's nothing to do. 4725 auto *Ptr = getLoadStorePointerOperand(&I); 4726 if (!Ptr) 4727 continue; 4728 4729 // A uniform memory op is itself uniform. We exclude uniform stores 4730 // here as they demand the last lane, not the first one. 4731 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 4732 addToWorklistIfAllowed(&I); 4733 4734 if (isUniformDecision(&I, VF)) { 4735 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 4736 HasUniformUse.insert(Ptr); 4737 } 4738 } 4739 4740 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 4741 // demanding) users. Since loops are assumed to be in LCSSA form, this 4742 // disallows uses outside the loop as well. 4743 for (auto *V : HasUniformUse) { 4744 if (isOutOfScope(V)) 4745 continue; 4746 auto *I = cast<Instruction>(V); 4747 auto UsersAreMemAccesses = 4748 llvm::all_of(I->users(), [&](User *U) -> bool { 4749 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 4750 }); 4751 if (UsersAreMemAccesses) 4752 addToWorklistIfAllowed(I); 4753 } 4754 4755 // Expand Worklist in topological order: whenever a new instruction 4756 // is added , its users should be already inside Worklist. It ensures 4757 // a uniform instruction will only be used by uniform instructions. 4758 unsigned idx = 0; 4759 while (idx != Worklist.size()) { 4760 Instruction *I = Worklist[idx++]; 4761 4762 for (auto OV : I->operand_values()) { 4763 // isOutOfScope operands cannot be uniform instructions. 4764 if (isOutOfScope(OV)) 4765 continue; 4766 // First order recurrence Phi's should typically be considered 4767 // non-uniform. 4768 auto *OP = dyn_cast<PHINode>(OV); 4769 if (OP && Legal->isFirstOrderRecurrence(OP)) 4770 continue; 4771 // If all the users of the operand are uniform, then add the 4772 // operand into the uniform worklist. 4773 auto *OI = cast<Instruction>(OV); 4774 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4775 auto *J = cast<Instruction>(U); 4776 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 4777 })) 4778 addToWorklistIfAllowed(OI); 4779 } 4780 } 4781 4782 // For an instruction to be added into Worklist above, all its users inside 4783 // the loop should also be in Worklist. However, this condition cannot be 4784 // true for phi nodes that form a cyclic dependence. We must process phi 4785 // nodes separately. An induction variable will remain uniform if all users 4786 // of the induction variable and induction variable update remain uniform. 4787 // The code below handles both pointer and non-pointer induction variables. 4788 for (auto &Induction : Legal->getInductionVars()) { 4789 auto *Ind = Induction.first; 4790 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4791 4792 // Determine if all users of the induction variable are uniform after 4793 // vectorization. 4794 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4795 auto *I = cast<Instruction>(U); 4796 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4797 isVectorizedMemAccessUse(I, Ind); 4798 }); 4799 if (!UniformInd) 4800 continue; 4801 4802 // Determine if all users of the induction variable update instruction are 4803 // uniform after vectorization. 4804 auto UniformIndUpdate = 4805 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4806 auto *I = cast<Instruction>(U); 4807 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4808 isVectorizedMemAccessUse(I, IndUpdate); 4809 }); 4810 if (!UniformIndUpdate) 4811 continue; 4812 4813 // The induction variable and its update instruction will remain uniform. 4814 addToWorklistIfAllowed(Ind); 4815 addToWorklistIfAllowed(IndUpdate); 4816 } 4817 4818 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4819 } 4820 4821 bool LoopVectorizationCostModel::runtimeChecksRequired() { 4822 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 4823 4824 if (Legal->getRuntimePointerChecking()->Need) { 4825 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 4826 "runtime pointer checks needed. Enable vectorization of this " 4827 "loop with '#pragma clang loop vectorize(enable)' when " 4828 "compiling with -Os/-Oz", 4829 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4830 return true; 4831 } 4832 4833 if (!PSE.getPredicate().isAlwaysTrue()) { 4834 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 4835 "runtime SCEV checks needed. Enable vectorization of this " 4836 "loop with '#pragma clang loop vectorize(enable)' when " 4837 "compiling with -Os/-Oz", 4838 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4839 return true; 4840 } 4841 4842 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4843 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4844 reportVectorizationFailure("Runtime stride check for small trip count", 4845 "runtime stride == 1 checks needed. Enable vectorization of " 4846 "this loop without such check by compiling with -Os/-Oz", 4847 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4848 return true; 4849 } 4850 4851 return false; 4852 } 4853 4854 ElementCount 4855 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 4856 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 4857 return ElementCount::getScalable(0); 4858 4859 if (Hints->isScalableVectorizationDisabled()) { 4860 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 4861 "ScalableVectorizationDisabled", ORE, TheLoop); 4862 return ElementCount::getScalable(0); 4863 } 4864 4865 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 4866 4867 auto MaxScalableVF = ElementCount::getScalable( 4868 std::numeric_limits<ElementCount::ScalarTy>::max()); 4869 4870 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 4871 // FIXME: While for scalable vectors this is currently sufficient, this should 4872 // be replaced by a more detailed mechanism that filters out specific VFs, 4873 // instead of invalidating vectorization for a whole set of VFs based on the 4874 // MaxVF. 4875 4876 // Disable scalable vectorization if the loop contains unsupported reductions. 4877 if (!canVectorizeReductions(MaxScalableVF)) { 4878 reportVectorizationInfo( 4879 "Scalable vectorization not supported for the reduction " 4880 "operations found in this loop.", 4881 "ScalableVFUnfeasible", ORE, TheLoop); 4882 return ElementCount::getScalable(0); 4883 } 4884 4885 // Disable scalable vectorization if the loop contains any instructions 4886 // with element types not supported for scalable vectors. 4887 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 4888 return !Ty->isVoidTy() && 4889 !this->TTI.isElementTypeLegalForScalableVector(Ty); 4890 })) { 4891 reportVectorizationInfo("Scalable vectorization is not supported " 4892 "for all element types found in this loop.", 4893 "ScalableVFUnfeasible", ORE, TheLoop); 4894 return ElementCount::getScalable(0); 4895 } 4896 4897 if (Legal->isSafeForAnyVectorWidth()) 4898 return MaxScalableVF; 4899 4900 // Limit MaxScalableVF by the maximum safe dependence distance. 4901 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 4902 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) 4903 MaxVScale = 4904 TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax(); 4905 MaxScalableVF = ElementCount::getScalable( 4906 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 4907 if (!MaxScalableVF) 4908 reportVectorizationInfo( 4909 "Max legal vector width too small, scalable vectorization " 4910 "unfeasible.", 4911 "ScalableVFUnfeasible", ORE, TheLoop); 4912 4913 return MaxScalableVF; 4914 } 4915 4916 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF( 4917 unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) { 4918 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4919 unsigned SmallestType, WidestType; 4920 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 4921 4922 // Get the maximum safe dependence distance in bits computed by LAA. 4923 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 4924 // the memory accesses that is most restrictive (involved in the smallest 4925 // dependence distance). 4926 unsigned MaxSafeElements = 4927 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 4928 4929 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 4930 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 4931 4932 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 4933 << ".\n"); 4934 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 4935 << ".\n"); 4936 4937 // First analyze the UserVF, fall back if the UserVF should be ignored. 4938 if (UserVF) { 4939 auto MaxSafeUserVF = 4940 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 4941 4942 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 4943 // If `VF=vscale x N` is safe, then so is `VF=N` 4944 if (UserVF.isScalable()) 4945 return FixedScalableVFPair( 4946 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 4947 else 4948 return UserVF; 4949 } 4950 4951 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 4952 4953 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 4954 // is better to ignore the hint and let the compiler choose a suitable VF. 4955 if (!UserVF.isScalable()) { 4956 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4957 << " is unsafe, clamping to max safe VF=" 4958 << MaxSafeFixedVF << ".\n"); 4959 ORE->emit([&]() { 4960 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4961 TheLoop->getStartLoc(), 4962 TheLoop->getHeader()) 4963 << "User-specified vectorization factor " 4964 << ore::NV("UserVectorizationFactor", UserVF) 4965 << " is unsafe, clamping to maximum safe vectorization factor " 4966 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 4967 }); 4968 return MaxSafeFixedVF; 4969 } 4970 4971 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 4972 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4973 << " is ignored because scalable vectors are not " 4974 "available.\n"); 4975 ORE->emit([&]() { 4976 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4977 TheLoop->getStartLoc(), 4978 TheLoop->getHeader()) 4979 << "User-specified vectorization factor " 4980 << ore::NV("UserVectorizationFactor", UserVF) 4981 << " is ignored because the target does not support scalable " 4982 "vectors. The compiler will pick a more suitable value."; 4983 }); 4984 } else { 4985 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4986 << " is unsafe. Ignoring scalable UserVF.\n"); 4987 ORE->emit([&]() { 4988 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4989 TheLoop->getStartLoc(), 4990 TheLoop->getHeader()) 4991 << "User-specified vectorization factor " 4992 << ore::NV("UserVectorizationFactor", UserVF) 4993 << " is unsafe. Ignoring the hint to let the compiler pick a " 4994 "more suitable value."; 4995 }); 4996 } 4997 } 4998 4999 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5000 << " / " << WidestType << " bits.\n"); 5001 5002 FixedScalableVFPair Result(ElementCount::getFixed(1), 5003 ElementCount::getScalable(0)); 5004 if (auto MaxVF = 5005 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 5006 MaxSafeFixedVF, FoldTailByMasking)) 5007 Result.FixedVF = MaxVF; 5008 5009 if (auto MaxVF = 5010 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 5011 MaxSafeScalableVF, FoldTailByMasking)) 5012 if (MaxVF.isScalable()) { 5013 Result.ScalableVF = MaxVF; 5014 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5015 << "\n"); 5016 } 5017 5018 return Result; 5019 } 5020 5021 FixedScalableVFPair 5022 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5023 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5024 // TODO: It may by useful to do since it's still likely to be dynamically 5025 // uniform if the target can skip. 5026 reportVectorizationFailure( 5027 "Not inserting runtime ptr check for divergent target", 5028 "runtime pointer checks needed. Not enabled for divergent target", 5029 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5030 return FixedScalableVFPair::getNone(); 5031 } 5032 5033 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5034 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5035 if (TC == 1) { 5036 reportVectorizationFailure("Single iteration (non) loop", 5037 "loop trip count is one, irrelevant for vectorization", 5038 "SingleIterationLoop", ORE, TheLoop); 5039 return FixedScalableVFPair::getNone(); 5040 } 5041 5042 switch (ScalarEpilogueStatus) { 5043 case CM_ScalarEpilogueAllowed: 5044 return computeFeasibleMaxVF(TC, UserVF, false); 5045 case CM_ScalarEpilogueNotAllowedUsePredicate: 5046 LLVM_FALLTHROUGH; 5047 case CM_ScalarEpilogueNotNeededUsePredicate: 5048 LLVM_DEBUG( 5049 dbgs() << "LV: vector predicate hint/switch found.\n" 5050 << "LV: Not allowing scalar epilogue, creating predicated " 5051 << "vector loop.\n"); 5052 break; 5053 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5054 // fallthrough as a special case of OptForSize 5055 case CM_ScalarEpilogueNotAllowedOptSize: 5056 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5057 LLVM_DEBUG( 5058 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5059 else 5060 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5061 << "count.\n"); 5062 5063 // Bail if runtime checks are required, which are not good when optimising 5064 // for size. 5065 if (runtimeChecksRequired()) 5066 return FixedScalableVFPair::getNone(); 5067 5068 break; 5069 } 5070 5071 // The only loops we can vectorize without a scalar epilogue, are loops with 5072 // a bottom-test and a single exiting block. We'd have to handle the fact 5073 // that not every instruction executes on the last iteration. This will 5074 // require a lane mask which varies through the vector loop body. (TODO) 5075 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5076 // If there was a tail-folding hint/switch, but we can't fold the tail by 5077 // masking, fallback to a vectorization with a scalar epilogue. 5078 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5079 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5080 "scalar epilogue instead.\n"); 5081 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5082 return computeFeasibleMaxVF(TC, UserVF, false); 5083 } 5084 return FixedScalableVFPair::getNone(); 5085 } 5086 5087 // Now try the tail folding 5088 5089 // Invalidate interleave groups that require an epilogue if we can't mask 5090 // the interleave-group. 5091 if (!useMaskedInterleavedAccesses(TTI)) { 5092 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5093 "No decisions should have been taken at this point"); 5094 // Note: There is no need to invalidate any cost modeling decisions here, as 5095 // non where taken so far. 5096 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5097 } 5098 5099 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true); 5100 // Avoid tail folding if the trip count is known to be a multiple of any VF 5101 // we chose. 5102 // FIXME: The condition below pessimises the case for fixed-width vectors, 5103 // when scalable VFs are also candidates for vectorization. 5104 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5105 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5106 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5107 "MaxFixedVF must be a power of 2"); 5108 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5109 : MaxFixedVF.getFixedValue(); 5110 ScalarEvolution *SE = PSE.getSE(); 5111 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5112 const SCEV *ExitCount = SE->getAddExpr( 5113 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5114 const SCEV *Rem = SE->getURemExpr( 5115 SE->applyLoopGuards(ExitCount, TheLoop), 5116 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5117 if (Rem->isZero()) { 5118 // Accept MaxFixedVF if we do not have a tail. 5119 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5120 return MaxFactors; 5121 } 5122 } 5123 5124 // If we don't know the precise trip count, or if the trip count that we 5125 // found modulo the vectorization factor is not zero, try to fold the tail 5126 // by masking. 5127 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5128 if (Legal->prepareToFoldTailByMasking()) { 5129 FoldTailByMasking = true; 5130 return MaxFactors; 5131 } 5132 5133 // If there was a tail-folding hint/switch, but we can't fold the tail by 5134 // masking, fallback to a vectorization with a scalar epilogue. 5135 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5136 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5137 "scalar epilogue instead.\n"); 5138 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5139 return MaxFactors; 5140 } 5141 5142 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5143 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5144 return FixedScalableVFPair::getNone(); 5145 } 5146 5147 if (TC == 0) { 5148 reportVectorizationFailure( 5149 "Unable to calculate the loop count due to complex control flow", 5150 "unable to calculate the loop count due to complex control flow", 5151 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5152 return FixedScalableVFPair::getNone(); 5153 } 5154 5155 reportVectorizationFailure( 5156 "Cannot optimize for size and vectorize at the same time.", 5157 "cannot optimize for size and vectorize at the same time. " 5158 "Enable vectorization of this loop with '#pragma clang loop " 5159 "vectorize(enable)' when compiling with -Os/-Oz", 5160 "NoTailLoopWithOptForSize", ORE, TheLoop); 5161 return FixedScalableVFPair::getNone(); 5162 } 5163 5164 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5165 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5166 const ElementCount &MaxSafeVF, bool FoldTailByMasking) { 5167 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5168 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5169 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5170 : TargetTransformInfo::RGK_FixedWidthVector); 5171 5172 // Convenience function to return the minimum of two ElementCounts. 5173 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5174 assert((LHS.isScalable() == RHS.isScalable()) && 5175 "Scalable flags must match"); 5176 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5177 }; 5178 5179 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5180 // Note that both WidestRegister and WidestType may not be a powers of 2. 5181 auto MaxVectorElementCount = ElementCount::get( 5182 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5183 ComputeScalableMaxVF); 5184 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5185 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5186 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5187 5188 if (!MaxVectorElementCount) { 5189 LLVM_DEBUG(dbgs() << "LV: The target has no " 5190 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5191 << " vector registers.\n"); 5192 return ElementCount::getFixed(1); 5193 } 5194 5195 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5196 if (ConstTripCount && 5197 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5198 (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) { 5199 // If loop trip count (TC) is known at compile time there is no point in 5200 // choosing VF greater than TC (as done in the loop below). Select maximum 5201 // power of two which doesn't exceed TC. 5202 // If MaxVectorElementCount is scalable, we only fall back on a fixed VF 5203 // when the TC is less than or equal to the known number of lanes. 5204 auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount); 5205 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not " 5206 "exceeding the constant trip count: " 5207 << ClampedConstTripCount << "\n"); 5208 return ElementCount::getFixed(ClampedConstTripCount); 5209 } 5210 5211 TargetTransformInfo::RegisterKind RegKind = 5212 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5213 : TargetTransformInfo::RGK_FixedWidthVector; 5214 ElementCount MaxVF = MaxVectorElementCount; 5215 if (MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 && 5216 TTI.shouldMaximizeVectorBandwidth(RegKind))) { 5217 auto MaxVectorElementCountMaxBW = ElementCount::get( 5218 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5219 ComputeScalableMaxVF); 5220 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5221 5222 // Collect all viable vectorization factors larger than the default MaxVF 5223 // (i.e. MaxVectorElementCount). 5224 SmallVector<ElementCount, 8> VFs; 5225 for (ElementCount VS = MaxVectorElementCount * 2; 5226 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5227 VFs.push_back(VS); 5228 5229 // For each VF calculate its register usage. 5230 auto RUs = calculateRegisterUsage(VFs); 5231 5232 // Select the largest VF which doesn't require more registers than existing 5233 // ones. 5234 for (int i = RUs.size() - 1; i >= 0; --i) { 5235 bool Selected = true; 5236 for (auto &pair : RUs[i].MaxLocalUsers) { 5237 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5238 if (pair.second > TargetNumRegisters) 5239 Selected = false; 5240 } 5241 if (Selected) { 5242 MaxVF = VFs[i]; 5243 break; 5244 } 5245 } 5246 if (ElementCount MinVF = 5247 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5248 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5249 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5250 << ") with target's minimum: " << MinVF << '\n'); 5251 MaxVF = MinVF; 5252 } 5253 } 5254 5255 // Invalidate any widening decisions we might have made, in case the loop 5256 // requires prediction (decided later), but we have already made some 5257 // load/store widening decisions. 5258 invalidateCostModelingDecisions(); 5259 } 5260 return MaxVF; 5261 } 5262 5263 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const { 5264 if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) { 5265 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange); 5266 auto Min = Attr.getVScaleRangeMin(); 5267 auto Max = Attr.getVScaleRangeMax(); 5268 if (Max && Min == Max) 5269 return Max; 5270 } 5271 5272 return TTI.getVScaleForTuning(); 5273 } 5274 5275 bool LoopVectorizationCostModel::isMoreProfitable( 5276 const VectorizationFactor &A, const VectorizationFactor &B) const { 5277 InstructionCost CostA = A.Cost; 5278 InstructionCost CostB = B.Cost; 5279 5280 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 5281 5282 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 5283 MaxTripCount) { 5284 // If we are folding the tail and the trip count is a known (possibly small) 5285 // constant, the trip count will be rounded up to an integer number of 5286 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 5287 // which we compare directly. When not folding the tail, the total cost will 5288 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 5289 // approximated with the per-lane cost below instead of using the tripcount 5290 // as here. 5291 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 5292 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 5293 return RTCostA < RTCostB; 5294 } 5295 5296 // Improve estimate for the vector width if it is scalable. 5297 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 5298 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 5299 if (Optional<unsigned> VScale = getVScaleForTuning()) { 5300 if (A.Width.isScalable()) 5301 EstimatedWidthA *= VScale.getValue(); 5302 if (B.Width.isScalable()) 5303 EstimatedWidthB *= VScale.getValue(); 5304 } 5305 5306 // Assume vscale may be larger than 1 (or the value being tuned for), 5307 // so that scalable vectorization is slightly favorable over fixed-width 5308 // vectorization. 5309 if (A.Width.isScalable() && !B.Width.isScalable()) 5310 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 5311 5312 // To avoid the need for FP division: 5313 // (CostA / A.Width) < (CostB / B.Width) 5314 // <=> (CostA * B.Width) < (CostB * A.Width) 5315 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 5316 } 5317 5318 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 5319 const ElementCountSet &VFCandidates) { 5320 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5321 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5322 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5323 assert(VFCandidates.count(ElementCount::getFixed(1)) && 5324 "Expected Scalar VF to be a candidate"); 5325 5326 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 5327 VectorizationFactor ChosenFactor = ScalarCost; 5328 5329 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5330 if (ForceVectorization && VFCandidates.size() > 1) { 5331 // Ignore scalar width, because the user explicitly wants vectorization. 5332 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5333 // evaluation. 5334 ChosenFactor.Cost = InstructionCost::getMax(); 5335 } 5336 5337 SmallVector<InstructionVFPair> InvalidCosts; 5338 for (const auto &i : VFCandidates) { 5339 // The cost for scalar VF=1 is already calculated, so ignore it. 5340 if (i.isScalar()) 5341 continue; 5342 5343 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 5344 VectorizationFactor Candidate(i, C.first); 5345 5346 #ifndef NDEBUG 5347 unsigned AssumedMinimumVscale = 1; 5348 if (Optional<unsigned> VScale = getVScaleForTuning()) 5349 AssumedMinimumVscale = VScale.getValue(); 5350 unsigned Width = 5351 Candidate.Width.isScalable() 5352 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 5353 : Candidate.Width.getFixedValue(); 5354 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5355 << " costs: " << (Candidate.Cost / Width)); 5356 if (i.isScalable()) 5357 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 5358 << AssumedMinimumVscale << ")"); 5359 LLVM_DEBUG(dbgs() << ".\n"); 5360 #endif 5361 5362 if (!C.second && !ForceVectorization) { 5363 LLVM_DEBUG( 5364 dbgs() << "LV: Not considering vector loop of width " << i 5365 << " because it will not generate any vector instructions.\n"); 5366 continue; 5367 } 5368 5369 // If profitable add it to ProfitableVF list. 5370 if (isMoreProfitable(Candidate, ScalarCost)) 5371 ProfitableVFs.push_back(Candidate); 5372 5373 if (isMoreProfitable(Candidate, ChosenFactor)) 5374 ChosenFactor = Candidate; 5375 } 5376 5377 // Emit a report of VFs with invalid costs in the loop. 5378 if (!InvalidCosts.empty()) { 5379 // Group the remarks per instruction, keeping the instruction order from 5380 // InvalidCosts. 5381 std::map<Instruction *, unsigned> Numbering; 5382 unsigned I = 0; 5383 for (auto &Pair : InvalidCosts) 5384 if (!Numbering.count(Pair.first)) 5385 Numbering[Pair.first] = I++; 5386 5387 // Sort the list, first on instruction(number) then on VF. 5388 llvm::sort(InvalidCosts, 5389 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 5390 if (Numbering[A.first] != Numbering[B.first]) 5391 return Numbering[A.first] < Numbering[B.first]; 5392 ElementCountComparator ECC; 5393 return ECC(A.second, B.second); 5394 }); 5395 5396 // For a list of ordered instruction-vf pairs: 5397 // [(load, vf1), (load, vf2), (store, vf1)] 5398 // Group the instructions together to emit separate remarks for: 5399 // load (vf1, vf2) 5400 // store (vf1) 5401 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 5402 auto Subset = ArrayRef<InstructionVFPair>(); 5403 do { 5404 if (Subset.empty()) 5405 Subset = Tail.take_front(1); 5406 5407 Instruction *I = Subset.front().first; 5408 5409 // If the next instruction is different, or if there are no other pairs, 5410 // emit a remark for the collated subset. e.g. 5411 // [(load, vf1), (load, vf2))] 5412 // to emit: 5413 // remark: invalid costs for 'load' at VF=(vf, vf2) 5414 if (Subset == Tail || Tail[Subset.size()].first != I) { 5415 std::string OutString; 5416 raw_string_ostream OS(OutString); 5417 assert(!Subset.empty() && "Unexpected empty range"); 5418 OS << "Instruction with invalid costs prevented vectorization at VF=("; 5419 for (auto &Pair : Subset) 5420 OS << (Pair.second == Subset.front().second ? "" : ", ") 5421 << Pair.second; 5422 OS << "):"; 5423 if (auto *CI = dyn_cast<CallInst>(I)) 5424 OS << " call to " << CI->getCalledFunction()->getName(); 5425 else 5426 OS << " " << I->getOpcodeName(); 5427 OS.flush(); 5428 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 5429 Tail = Tail.drop_front(Subset.size()); 5430 Subset = {}; 5431 } else 5432 // Grow the subset by one element 5433 Subset = Tail.take_front(Subset.size() + 1); 5434 } while (!Tail.empty()); 5435 } 5436 5437 if (!EnableCondStoresVectorization && NumPredStores) { 5438 reportVectorizationFailure("There are conditional stores.", 5439 "store that is conditionally executed prevents vectorization", 5440 "ConditionalStore", ORE, TheLoop); 5441 ChosenFactor = ScalarCost; 5442 } 5443 5444 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 5445 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 5446 << "LV: Vectorization seems to be not beneficial, " 5447 << "but was forced by a user.\n"); 5448 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 5449 return ChosenFactor; 5450 } 5451 5452 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5453 const Loop &L, ElementCount VF) const { 5454 // Cross iteration phis such as reductions need special handling and are 5455 // currently unsupported. 5456 if (any_of(L.getHeader()->phis(), 5457 [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); })) 5458 return false; 5459 5460 // Phis with uses outside of the loop require special handling and are 5461 // currently unsupported. 5462 for (auto &Entry : Legal->getInductionVars()) { 5463 // Look for uses of the value of the induction at the last iteration. 5464 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5465 for (User *U : PostInc->users()) 5466 if (!L.contains(cast<Instruction>(U))) 5467 return false; 5468 // Look for uses of penultimate value of the induction. 5469 for (User *U : Entry.first->users()) 5470 if (!L.contains(cast<Instruction>(U))) 5471 return false; 5472 } 5473 5474 // Induction variables that are widened require special handling that is 5475 // currently not supported. 5476 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5477 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5478 this->isProfitableToScalarize(Entry.first, VF)); 5479 })) 5480 return false; 5481 5482 // Epilogue vectorization code has not been auditted to ensure it handles 5483 // non-latch exits properly. It may be fine, but it needs auditted and 5484 // tested. 5485 if (L.getExitingBlock() != L.getLoopLatch()) 5486 return false; 5487 5488 return true; 5489 } 5490 5491 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5492 const ElementCount VF) const { 5493 // FIXME: We need a much better cost-model to take different parameters such 5494 // as register pressure, code size increase and cost of extra branches into 5495 // account. For now we apply a very crude heuristic and only consider loops 5496 // with vectorization factors larger than a certain value. 5497 // We also consider epilogue vectorization unprofitable for targets that don't 5498 // consider interleaving beneficial (eg. MVE). 5499 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5500 return false; 5501 // FIXME: We should consider changing the threshold for scalable 5502 // vectors to take VScaleForTuning into account. 5503 if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF) 5504 return true; 5505 return false; 5506 } 5507 5508 VectorizationFactor 5509 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5510 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5511 VectorizationFactor Result = VectorizationFactor::Disabled(); 5512 if (!EnableEpilogueVectorization) { 5513 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5514 return Result; 5515 } 5516 5517 if (!isScalarEpilogueAllowed()) { 5518 LLVM_DEBUG( 5519 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5520 "allowed.\n";); 5521 return Result; 5522 } 5523 5524 // Not really a cost consideration, but check for unsupported cases here to 5525 // simplify the logic. 5526 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5527 LLVM_DEBUG( 5528 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5529 "not a supported candidate.\n";); 5530 return Result; 5531 } 5532 5533 if (EpilogueVectorizationForceVF > 1) { 5534 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5535 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 5536 if (LVP.hasPlanWithVF(ForcedEC)) 5537 return {ForcedEC, 0}; 5538 else { 5539 LLVM_DEBUG( 5540 dbgs() 5541 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5542 return Result; 5543 } 5544 } 5545 5546 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5547 TheLoop->getHeader()->getParent()->hasMinSize()) { 5548 LLVM_DEBUG( 5549 dbgs() 5550 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5551 return Result; 5552 } 5553 5554 if (!isEpilogueVectorizationProfitable(MainLoopVF)) { 5555 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 5556 "this loop\n"); 5557 return Result; 5558 } 5559 5560 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know 5561 // the main loop handles 8 lanes per iteration. We could still benefit from 5562 // vectorizing the epilogue loop with VF=4. 5563 ElementCount EstimatedRuntimeVF = MainLoopVF; 5564 if (MainLoopVF.isScalable()) { 5565 EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 5566 if (Optional<unsigned> VScale = getVScaleForTuning()) 5567 EstimatedRuntimeVF *= VScale.getValue(); 5568 } 5569 5570 for (auto &NextVF : ProfitableVFs) 5571 if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() && 5572 ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) || 5573 ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) && 5574 (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) && 5575 LVP.hasPlanWithVF(NextVF.Width)) 5576 Result = NextVF; 5577 5578 if (Result != VectorizationFactor::Disabled()) 5579 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5580 << Result.Width << "\n";); 5581 return Result; 5582 } 5583 5584 std::pair<unsigned, unsigned> 5585 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5586 unsigned MinWidth = -1U; 5587 unsigned MaxWidth = 8; 5588 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5589 // For in-loop reductions, no element types are added to ElementTypesInLoop 5590 // if there are no loads/stores in the loop. In this case, check through the 5591 // reduction variables to determine the maximum width. 5592 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) { 5593 // Reset MaxWidth so that we can find the smallest type used by recurrences 5594 // in the loop. 5595 MaxWidth = -1U; 5596 for (auto &PhiDescriptorPair : Legal->getReductionVars()) { 5597 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second; 5598 // When finding the min width used by the recurrence we need to account 5599 // for casts on the input operands of the recurrence. 5600 MaxWidth = std::min<unsigned>( 5601 MaxWidth, std::min<unsigned>( 5602 RdxDesc.getMinWidthCastToRecurrenceTypeInBits(), 5603 RdxDesc.getRecurrenceType()->getScalarSizeInBits())); 5604 } 5605 } else { 5606 for (Type *T : ElementTypesInLoop) { 5607 MinWidth = std::min<unsigned>( 5608 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5609 MaxWidth = std::max<unsigned>( 5610 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5611 } 5612 } 5613 return {MinWidth, MaxWidth}; 5614 } 5615 5616 void LoopVectorizationCostModel::collectElementTypesForWidening() { 5617 ElementTypesInLoop.clear(); 5618 // For each block. 5619 for (BasicBlock *BB : TheLoop->blocks()) { 5620 // For each instruction in the loop. 5621 for (Instruction &I : BB->instructionsWithoutDebug()) { 5622 Type *T = I.getType(); 5623 5624 // Skip ignored values. 5625 if (ValuesToIgnore.count(&I)) 5626 continue; 5627 5628 // Only examine Loads, Stores and PHINodes. 5629 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5630 continue; 5631 5632 // Examine PHI nodes that are reduction variables. Update the type to 5633 // account for the recurrence type. 5634 if (auto *PN = dyn_cast<PHINode>(&I)) { 5635 if (!Legal->isReductionVariable(PN)) 5636 continue; 5637 const RecurrenceDescriptor &RdxDesc = 5638 Legal->getReductionVars().find(PN)->second; 5639 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 5640 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 5641 RdxDesc.getRecurrenceType(), 5642 TargetTransformInfo::ReductionFlags())) 5643 continue; 5644 T = RdxDesc.getRecurrenceType(); 5645 } 5646 5647 // Examine the stored values. 5648 if (auto *ST = dyn_cast<StoreInst>(&I)) 5649 T = ST->getValueOperand()->getType(); 5650 5651 assert(T->isSized() && 5652 "Expected the load/store/recurrence type to be sized"); 5653 5654 ElementTypesInLoop.insert(T); 5655 } 5656 } 5657 } 5658 5659 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 5660 unsigned LoopCost) { 5661 // -- The interleave heuristics -- 5662 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5663 // There are many micro-architectural considerations that we can't predict 5664 // at this level. For example, frontend pressure (on decode or fetch) due to 5665 // code size, or the number and capabilities of the execution ports. 5666 // 5667 // We use the following heuristics to select the interleave count: 5668 // 1. If the code has reductions, then we interleave to break the cross 5669 // iteration dependency. 5670 // 2. If the loop is really small, then we interleave to reduce the loop 5671 // overhead. 5672 // 3. We don't interleave if we think that we will spill registers to memory 5673 // due to the increased register pressure. 5674 5675 if (!isScalarEpilogueAllowed()) 5676 return 1; 5677 5678 // We used the distance for the interleave count. 5679 if (Legal->getMaxSafeDepDistBytes() != -1U) 5680 return 1; 5681 5682 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 5683 const bool HasReductions = !Legal->getReductionVars().empty(); 5684 // Do not interleave loops with a relatively small known or estimated trip 5685 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 5686 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 5687 // because with the above conditions interleaving can expose ILP and break 5688 // cross iteration dependences for reductions. 5689 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 5690 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 5691 return 1; 5692 5693 // If we did not calculate the cost for VF (because the user selected the VF) 5694 // then we calculate the cost of VF here. 5695 if (LoopCost == 0) { 5696 InstructionCost C = expectedCost(VF).first; 5697 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 5698 LoopCost = *C.getValue(); 5699 5700 // Loop body is free and there is no need for interleaving. 5701 if (LoopCost == 0) 5702 return 1; 5703 } 5704 5705 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5706 // We divide by these constants so assume that we have at least one 5707 // instruction that uses at least one register. 5708 for (auto& pair : R.MaxLocalUsers) { 5709 pair.second = std::max(pair.second, 1U); 5710 } 5711 5712 // We calculate the interleave count using the following formula. 5713 // Subtract the number of loop invariants from the number of available 5714 // registers. These registers are used by all of the interleaved instances. 5715 // Next, divide the remaining registers by the number of registers that is 5716 // required by the loop, in order to estimate how many parallel instances 5717 // fit without causing spills. All of this is rounded down if necessary to be 5718 // a power of two. We want power of two interleave count to simplify any 5719 // addressing operations or alignment considerations. 5720 // We also want power of two interleave counts to ensure that the induction 5721 // variable of the vector loop wraps to zero, when tail is folded by masking; 5722 // this currently happens when OptForSize, in which case IC is set to 1 above. 5723 unsigned IC = UINT_MAX; 5724 5725 for (auto& pair : R.MaxLocalUsers) { 5726 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5727 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5728 << " registers of " 5729 << TTI.getRegisterClassName(pair.first) << " register class\n"); 5730 if (VF.isScalar()) { 5731 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5732 TargetNumRegisters = ForceTargetNumScalarRegs; 5733 } else { 5734 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5735 TargetNumRegisters = ForceTargetNumVectorRegs; 5736 } 5737 unsigned MaxLocalUsers = pair.second; 5738 unsigned LoopInvariantRegs = 0; 5739 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 5740 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 5741 5742 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 5743 // Don't count the induction variable as interleaved. 5744 if (EnableIndVarRegisterHeur) { 5745 TmpIC = 5746 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 5747 std::max(1U, (MaxLocalUsers - 1))); 5748 } 5749 5750 IC = std::min(IC, TmpIC); 5751 } 5752 5753 // Clamp the interleave ranges to reasonable counts. 5754 unsigned MaxInterleaveCount = 5755 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 5756 5757 // Check if the user has overridden the max. 5758 if (VF.isScalar()) { 5759 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5760 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5761 } else { 5762 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5763 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5764 } 5765 5766 // If trip count is known or estimated compile time constant, limit the 5767 // interleave count to be less than the trip count divided by VF, provided it 5768 // is at least 1. 5769 // 5770 // For scalable vectors we can't know if interleaving is beneficial. It may 5771 // not be beneficial for small loops if none of the lanes in the second vector 5772 // iterations is enabled. However, for larger loops, there is likely to be a 5773 // similar benefit as for fixed-width vectors. For now, we choose to leave 5774 // the InterleaveCount as if vscale is '1', although if some information about 5775 // the vector is known (e.g. min vector size), we can make a better decision. 5776 if (BestKnownTC) { 5777 MaxInterleaveCount = 5778 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 5779 // Make sure MaxInterleaveCount is greater than 0. 5780 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 5781 } 5782 5783 assert(MaxInterleaveCount > 0 && 5784 "Maximum interleave count must be greater than 0"); 5785 5786 // Clamp the calculated IC to be between the 1 and the max interleave count 5787 // that the target and trip count allows. 5788 if (IC > MaxInterleaveCount) 5789 IC = MaxInterleaveCount; 5790 else 5791 // Make sure IC is greater than 0. 5792 IC = std::max(1u, IC); 5793 5794 assert(IC > 0 && "Interleave count must be greater than 0."); 5795 5796 // Interleave if we vectorized this loop and there is a reduction that could 5797 // benefit from interleaving. 5798 if (VF.isVector() && HasReductions) { 5799 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5800 return IC; 5801 } 5802 5803 // For any scalar loop that either requires runtime checks or predication we 5804 // are better off leaving this to the unroller. Note that if we've already 5805 // vectorized the loop we will have done the runtime check and so interleaving 5806 // won't require further checks. 5807 bool ScalarInterleavingRequiresPredication = 5808 (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) { 5809 return Legal->blockNeedsPredication(BB); 5810 })); 5811 bool ScalarInterleavingRequiresRuntimePointerCheck = 5812 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 5813 5814 // We want to interleave small loops in order to reduce the loop overhead and 5815 // potentially expose ILP opportunities. 5816 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 5817 << "LV: IC is " << IC << '\n' 5818 << "LV: VF is " << VF << '\n'); 5819 const bool AggressivelyInterleaveReductions = 5820 TTI.enableAggressiveInterleaving(HasReductions); 5821 if (!ScalarInterleavingRequiresRuntimePointerCheck && 5822 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) { 5823 // We assume that the cost overhead is 1 and we use the cost model 5824 // to estimate the cost of the loop and interleave until the cost of the 5825 // loop overhead is about 5% of the cost of the loop. 5826 unsigned SmallIC = 5827 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5828 5829 // Interleave until store/load ports (estimated by max interleave count) are 5830 // saturated. 5831 unsigned NumStores = Legal->getNumStores(); 5832 unsigned NumLoads = Legal->getNumLoads(); 5833 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5834 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5835 5836 // There is little point in interleaving for reductions containing selects 5837 // and compares when VF=1 since it may just create more overhead than it's 5838 // worth for loops with small trip counts. This is because we still have to 5839 // do the final reduction after the loop. 5840 bool HasSelectCmpReductions = 5841 HasReductions && 5842 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5843 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5844 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 5845 RdxDesc.getRecurrenceKind()); 5846 }); 5847 if (HasSelectCmpReductions) { 5848 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 5849 return 1; 5850 } 5851 5852 // If we have a scalar reduction (vector reductions are already dealt with 5853 // by this point), we can increase the critical path length if the loop 5854 // we're interleaving is inside another loop. For tree-wise reductions 5855 // set the limit to 2, and for ordered reductions it's best to disable 5856 // interleaving entirely. 5857 if (HasReductions && TheLoop->getLoopDepth() > 1) { 5858 bool HasOrderedReductions = 5859 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5860 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5861 return RdxDesc.isOrdered(); 5862 }); 5863 if (HasOrderedReductions) { 5864 LLVM_DEBUG( 5865 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 5866 return 1; 5867 } 5868 5869 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5870 SmallIC = std::min(SmallIC, F); 5871 StoresIC = std::min(StoresIC, F); 5872 LoadsIC = std::min(LoadsIC, F); 5873 } 5874 5875 if (EnableLoadStoreRuntimeInterleave && 5876 std::max(StoresIC, LoadsIC) > SmallIC) { 5877 LLVM_DEBUG( 5878 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5879 return std::max(StoresIC, LoadsIC); 5880 } 5881 5882 // If there are scalar reductions and TTI has enabled aggressive 5883 // interleaving for reductions, we will interleave to expose ILP. 5884 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 5885 AggressivelyInterleaveReductions) { 5886 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5887 // Interleave no less than SmallIC but not as aggressive as the normal IC 5888 // to satisfy the rare situation when resources are too limited. 5889 return std::max(IC / 2, SmallIC); 5890 } else { 5891 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5892 return SmallIC; 5893 } 5894 } 5895 5896 // Interleave if this is a large loop (small loops are already dealt with by 5897 // this point) that could benefit from interleaving. 5898 if (AggressivelyInterleaveReductions) { 5899 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5900 return IC; 5901 } 5902 5903 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5904 return 1; 5905 } 5906 5907 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5908 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 5909 // This function calculates the register usage by measuring the highest number 5910 // of values that are alive at a single location. Obviously, this is a very 5911 // rough estimation. We scan the loop in a topological order in order and 5912 // assign a number to each instruction. We use RPO to ensure that defs are 5913 // met before their users. We assume that each instruction that has in-loop 5914 // users starts an interval. We record every time that an in-loop value is 5915 // used, so we have a list of the first and last occurrences of each 5916 // instruction. Next, we transpose this data structure into a multi map that 5917 // holds the list of intervals that *end* at a specific location. This multi 5918 // map allows us to perform a linear search. We scan the instructions linearly 5919 // and record each time that a new interval starts, by placing it in a set. 5920 // If we find this value in the multi-map then we remove it from the set. 5921 // The max register usage is the maximum size of the set. 5922 // We also search for instructions that are defined outside the loop, but are 5923 // used inside the loop. We need this number separately from the max-interval 5924 // usage number because when we unroll, loop-invariant values do not take 5925 // more register. 5926 LoopBlocksDFS DFS(TheLoop); 5927 DFS.perform(LI); 5928 5929 RegisterUsage RU; 5930 5931 // Each 'key' in the map opens a new interval. The values 5932 // of the map are the index of the 'last seen' usage of the 5933 // instruction that is the key. 5934 using IntervalMap = DenseMap<Instruction *, unsigned>; 5935 5936 // Maps instruction to its index. 5937 SmallVector<Instruction *, 64> IdxToInstr; 5938 // Marks the end of each interval. 5939 IntervalMap EndPoint; 5940 // Saves the list of instruction indices that are used in the loop. 5941 SmallPtrSet<Instruction *, 8> Ends; 5942 // Saves the list of values that are used in the loop but are 5943 // defined outside the loop, such as arguments and constants. 5944 SmallPtrSet<Value *, 8> LoopInvariants; 5945 5946 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5947 for (Instruction &I : BB->instructionsWithoutDebug()) { 5948 IdxToInstr.push_back(&I); 5949 5950 // Save the end location of each USE. 5951 for (Value *U : I.operands()) { 5952 auto *Instr = dyn_cast<Instruction>(U); 5953 5954 // Ignore non-instruction values such as arguments, constants, etc. 5955 if (!Instr) 5956 continue; 5957 5958 // If this instruction is outside the loop then record it and continue. 5959 if (!TheLoop->contains(Instr)) { 5960 LoopInvariants.insert(Instr); 5961 continue; 5962 } 5963 5964 // Overwrite previous end points. 5965 EndPoint[Instr] = IdxToInstr.size(); 5966 Ends.insert(Instr); 5967 } 5968 } 5969 } 5970 5971 // Saves the list of intervals that end with the index in 'key'. 5972 using InstrList = SmallVector<Instruction *, 2>; 5973 DenseMap<unsigned, InstrList> TransposeEnds; 5974 5975 // Transpose the EndPoints to a list of values that end at each index. 5976 for (auto &Interval : EndPoint) 5977 TransposeEnds[Interval.second].push_back(Interval.first); 5978 5979 SmallPtrSet<Instruction *, 8> OpenIntervals; 5980 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5981 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 5982 5983 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5984 5985 auto GetRegUsage = [&TTI = TTI](Type *Ty, ElementCount VF) -> unsigned { 5986 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 5987 return 0; 5988 return TTI.getRegUsageForType(VectorType::get(Ty, VF)); 5989 }; 5990 5991 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 5992 Instruction *I = IdxToInstr[i]; 5993 5994 // Remove all of the instructions that end at this location. 5995 InstrList &List = TransposeEnds[i]; 5996 for (Instruction *ToRemove : List) 5997 OpenIntervals.erase(ToRemove); 5998 5999 // Ignore instructions that are never used within the loop. 6000 if (!Ends.count(I)) 6001 continue; 6002 6003 // Skip ignored values. 6004 if (ValuesToIgnore.count(I)) 6005 continue; 6006 6007 // For each VF find the maximum usage of registers. 6008 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6009 // Count the number of live intervals. 6010 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6011 6012 if (VFs[j].isScalar()) { 6013 for (auto Inst : OpenIntervals) { 6014 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6015 if (RegUsage.find(ClassID) == RegUsage.end()) 6016 RegUsage[ClassID] = 1; 6017 else 6018 RegUsage[ClassID] += 1; 6019 } 6020 } else { 6021 collectUniformsAndScalars(VFs[j]); 6022 for (auto Inst : OpenIntervals) { 6023 // Skip ignored values for VF > 1. 6024 if (VecValuesToIgnore.count(Inst)) 6025 continue; 6026 if (isScalarAfterVectorization(Inst, VFs[j])) { 6027 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6028 if (RegUsage.find(ClassID) == RegUsage.end()) 6029 RegUsage[ClassID] = 1; 6030 else 6031 RegUsage[ClassID] += 1; 6032 } else { 6033 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6034 if (RegUsage.find(ClassID) == RegUsage.end()) 6035 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6036 else 6037 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6038 } 6039 } 6040 } 6041 6042 for (auto& pair : RegUsage) { 6043 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6044 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6045 else 6046 MaxUsages[j][pair.first] = pair.second; 6047 } 6048 } 6049 6050 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6051 << OpenIntervals.size() << '\n'); 6052 6053 // Add the current instruction to the list of open intervals. 6054 OpenIntervals.insert(I); 6055 } 6056 6057 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6058 SmallMapVector<unsigned, unsigned, 4> Invariant; 6059 6060 for (auto Inst : LoopInvariants) { 6061 unsigned Usage = 6062 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6063 unsigned ClassID = 6064 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6065 if (Invariant.find(ClassID) == Invariant.end()) 6066 Invariant[ClassID] = Usage; 6067 else 6068 Invariant[ClassID] += Usage; 6069 } 6070 6071 LLVM_DEBUG({ 6072 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6073 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6074 << " item\n"; 6075 for (const auto &pair : MaxUsages[i]) { 6076 dbgs() << "LV(REG): RegisterClass: " 6077 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6078 << " registers\n"; 6079 } 6080 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6081 << " item\n"; 6082 for (const auto &pair : Invariant) { 6083 dbgs() << "LV(REG): RegisterClass: " 6084 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6085 << " registers\n"; 6086 } 6087 }); 6088 6089 RU.LoopInvariantRegs = Invariant; 6090 RU.MaxLocalUsers = MaxUsages[i]; 6091 RUs[i] = RU; 6092 } 6093 6094 return RUs; 6095 } 6096 6097 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I, 6098 ElementCount VF) { 6099 // TODO: Cost model for emulated masked load/store is completely 6100 // broken. This hack guides the cost model to use an artificially 6101 // high enough value to practically disable vectorization with such 6102 // operations, except where previously deployed legality hack allowed 6103 // using very low cost values. This is to avoid regressions coming simply 6104 // from moving "masked load/store" check from legality to cost model. 6105 // Masked Load/Gather emulation was previously never allowed. 6106 // Limited number of Masked Store/Scatter emulation was allowed. 6107 assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction"); 6108 return isa<LoadInst>(I) || 6109 (isa<StoreInst>(I) && 6110 NumPredStores > NumberOfStoresToPredicate); 6111 } 6112 6113 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6114 // If we aren't vectorizing the loop, or if we've already collected the 6115 // instructions to scalarize, there's nothing to do. Collection may already 6116 // have occurred if we have a user-selected VF and are now computing the 6117 // expected cost for interleaving. 6118 if (VF.isScalar() || VF.isZero() || 6119 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6120 return; 6121 6122 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6123 // not profitable to scalarize any instructions, the presence of VF in the 6124 // map will indicate that we've analyzed it already. 6125 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6126 6127 // Find all the instructions that are scalar with predication in the loop and 6128 // determine if it would be better to not if-convert the blocks they are in. 6129 // If so, we also record the instructions to scalarize. 6130 for (BasicBlock *BB : TheLoop->blocks()) { 6131 if (!blockNeedsPredicationForAnyReason(BB)) 6132 continue; 6133 for (Instruction &I : *BB) 6134 if (isScalarWithPredication(&I, VF)) { 6135 ScalarCostsTy ScalarCosts; 6136 // Do not apply discount if scalable, because that would lead to 6137 // invalid scalarization costs. 6138 // Do not apply discount logic if hacked cost is needed 6139 // for emulated masked memrefs. 6140 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) && 6141 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6142 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6143 // Remember that BB will remain after vectorization. 6144 PredicatedBBsAfterVectorization.insert(BB); 6145 } 6146 } 6147 } 6148 6149 int LoopVectorizationCostModel::computePredInstDiscount( 6150 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6151 assert(!isUniformAfterVectorization(PredInst, VF) && 6152 "Instruction marked uniform-after-vectorization will be predicated"); 6153 6154 // Initialize the discount to zero, meaning that the scalar version and the 6155 // vector version cost the same. 6156 InstructionCost Discount = 0; 6157 6158 // Holds instructions to analyze. The instructions we visit are mapped in 6159 // ScalarCosts. Those instructions are the ones that would be scalarized if 6160 // we find that the scalar version costs less. 6161 SmallVector<Instruction *, 8> Worklist; 6162 6163 // Returns true if the given instruction can be scalarized. 6164 auto canBeScalarized = [&](Instruction *I) -> bool { 6165 // We only attempt to scalarize instructions forming a single-use chain 6166 // from the original predicated block that would otherwise be vectorized. 6167 // Although not strictly necessary, we give up on instructions we know will 6168 // already be scalar to avoid traversing chains that are unlikely to be 6169 // beneficial. 6170 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6171 isScalarAfterVectorization(I, VF)) 6172 return false; 6173 6174 // If the instruction is scalar with predication, it will be analyzed 6175 // separately. We ignore it within the context of PredInst. 6176 if (isScalarWithPredication(I, VF)) 6177 return false; 6178 6179 // If any of the instruction's operands are uniform after vectorization, 6180 // the instruction cannot be scalarized. This prevents, for example, a 6181 // masked load from being scalarized. 6182 // 6183 // We assume we will only emit a value for lane zero of an instruction 6184 // marked uniform after vectorization, rather than VF identical values. 6185 // Thus, if we scalarize an instruction that uses a uniform, we would 6186 // create uses of values corresponding to the lanes we aren't emitting code 6187 // for. This behavior can be changed by allowing getScalarValue to clone 6188 // the lane zero values for uniforms rather than asserting. 6189 for (Use &U : I->operands()) 6190 if (auto *J = dyn_cast<Instruction>(U.get())) 6191 if (isUniformAfterVectorization(J, VF)) 6192 return false; 6193 6194 // Otherwise, we can scalarize the instruction. 6195 return true; 6196 }; 6197 6198 // Compute the expected cost discount from scalarizing the entire expression 6199 // feeding the predicated instruction. We currently only consider expressions 6200 // that are single-use instruction chains. 6201 Worklist.push_back(PredInst); 6202 while (!Worklist.empty()) { 6203 Instruction *I = Worklist.pop_back_val(); 6204 6205 // If we've already analyzed the instruction, there's nothing to do. 6206 if (ScalarCosts.find(I) != ScalarCosts.end()) 6207 continue; 6208 6209 // Compute the cost of the vector instruction. Note that this cost already 6210 // includes the scalarization overhead of the predicated instruction. 6211 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6212 6213 // Compute the cost of the scalarized instruction. This cost is the cost of 6214 // the instruction as if it wasn't if-converted and instead remained in the 6215 // predicated block. We will scale this cost by block probability after 6216 // computing the scalarization overhead. 6217 InstructionCost ScalarCost = 6218 VF.getFixedValue() * 6219 getInstructionCost(I, ElementCount::getFixed(1)).first; 6220 6221 // Compute the scalarization overhead of needed insertelement instructions 6222 // and phi nodes. 6223 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) { 6224 ScalarCost += TTI.getScalarizationOverhead( 6225 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6226 APInt::getAllOnes(VF.getFixedValue()), true, false); 6227 ScalarCost += 6228 VF.getFixedValue() * 6229 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6230 } 6231 6232 // Compute the scalarization overhead of needed extractelement 6233 // instructions. For each of the instruction's operands, if the operand can 6234 // be scalarized, add it to the worklist; otherwise, account for the 6235 // overhead. 6236 for (Use &U : I->operands()) 6237 if (auto *J = dyn_cast<Instruction>(U.get())) { 6238 assert(VectorType::isValidElementType(J->getType()) && 6239 "Instruction has non-scalar type"); 6240 if (canBeScalarized(J)) 6241 Worklist.push_back(J); 6242 else if (needsExtract(J, VF)) { 6243 ScalarCost += TTI.getScalarizationOverhead( 6244 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6245 APInt::getAllOnes(VF.getFixedValue()), false, true); 6246 } 6247 } 6248 6249 // Scale the total scalar cost by block probability. 6250 ScalarCost /= getReciprocalPredBlockProb(); 6251 6252 // Compute the discount. A non-negative discount means the vector version 6253 // of the instruction costs more, and scalarizing would be beneficial. 6254 Discount += VectorCost - ScalarCost; 6255 ScalarCosts[I] = ScalarCost; 6256 } 6257 6258 return *Discount.getValue(); 6259 } 6260 6261 LoopVectorizationCostModel::VectorizationCostTy 6262 LoopVectorizationCostModel::expectedCost( 6263 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6264 VectorizationCostTy Cost; 6265 6266 // For each block. 6267 for (BasicBlock *BB : TheLoop->blocks()) { 6268 VectorizationCostTy BlockCost; 6269 6270 // For each instruction in the old loop. 6271 for (Instruction &I : BB->instructionsWithoutDebug()) { 6272 // Skip ignored values. 6273 if (ValuesToIgnore.count(&I) || 6274 (VF.isVector() && VecValuesToIgnore.count(&I))) 6275 continue; 6276 6277 VectorizationCostTy C = getInstructionCost(&I, VF); 6278 6279 // Check if we should override the cost. 6280 if (C.first.isValid() && 6281 ForceTargetInstructionCost.getNumOccurrences() > 0) 6282 C.first = InstructionCost(ForceTargetInstructionCost); 6283 6284 // Keep a list of instructions with invalid costs. 6285 if (Invalid && !C.first.isValid()) 6286 Invalid->emplace_back(&I, VF); 6287 6288 BlockCost.first += C.first; 6289 BlockCost.second |= C.second; 6290 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6291 << " for VF " << VF << " For instruction: " << I 6292 << '\n'); 6293 } 6294 6295 // If we are vectorizing a predicated block, it will have been 6296 // if-converted. This means that the block's instructions (aside from 6297 // stores and instructions that may divide by zero) will now be 6298 // unconditionally executed. For the scalar case, we may not always execute 6299 // the predicated block, if it is an if-else block. Thus, scale the block's 6300 // cost by the probability of executing it. blockNeedsPredication from 6301 // Legal is used so as to not include all blocks in tail folded loops. 6302 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6303 BlockCost.first /= getReciprocalPredBlockProb(); 6304 6305 Cost.first += BlockCost.first; 6306 Cost.second |= BlockCost.second; 6307 } 6308 6309 return Cost; 6310 } 6311 6312 /// Gets Address Access SCEV after verifying that the access pattern 6313 /// is loop invariant except the induction variable dependence. 6314 /// 6315 /// This SCEV can be sent to the Target in order to estimate the address 6316 /// calculation cost. 6317 static const SCEV *getAddressAccessSCEV( 6318 Value *Ptr, 6319 LoopVectorizationLegality *Legal, 6320 PredicatedScalarEvolution &PSE, 6321 const Loop *TheLoop) { 6322 6323 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6324 if (!Gep) 6325 return nullptr; 6326 6327 // We are looking for a gep with all loop invariant indices except for one 6328 // which should be an induction variable. 6329 auto SE = PSE.getSE(); 6330 unsigned NumOperands = Gep->getNumOperands(); 6331 for (unsigned i = 1; i < NumOperands; ++i) { 6332 Value *Opd = Gep->getOperand(i); 6333 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6334 !Legal->isInductionVariable(Opd)) 6335 return nullptr; 6336 } 6337 6338 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6339 return PSE.getSCEV(Ptr); 6340 } 6341 6342 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6343 return Legal->hasStride(I->getOperand(0)) || 6344 Legal->hasStride(I->getOperand(1)); 6345 } 6346 6347 InstructionCost 6348 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6349 ElementCount VF) { 6350 assert(VF.isVector() && 6351 "Scalarization cost of instruction implies vectorization."); 6352 if (VF.isScalable()) 6353 return InstructionCost::getInvalid(); 6354 6355 Type *ValTy = getLoadStoreType(I); 6356 auto SE = PSE.getSE(); 6357 6358 unsigned AS = getLoadStoreAddressSpace(I); 6359 Value *Ptr = getLoadStorePointerOperand(I); 6360 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6361 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost` 6362 // that it is being called from this specific place. 6363 6364 // Figure out whether the access is strided and get the stride value 6365 // if it's known in compile time 6366 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6367 6368 // Get the cost of the scalar memory instruction and address computation. 6369 InstructionCost Cost = 6370 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6371 6372 // Don't pass *I here, since it is scalar but will actually be part of a 6373 // vectorized loop where the user of it is a vectorized instruction. 6374 const Align Alignment = getLoadStoreAlignment(I); 6375 Cost += VF.getKnownMinValue() * 6376 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6377 AS, TTI::TCK_RecipThroughput); 6378 6379 // Get the overhead of the extractelement and insertelement instructions 6380 // we might create due to scalarization. 6381 Cost += getScalarizationOverhead(I, VF); 6382 6383 // If we have a predicated load/store, it will need extra i1 extracts and 6384 // conditional branches, but may not be executed for each vector lane. Scale 6385 // the cost by the probability of executing the predicated block. 6386 if (isPredicatedInst(I, VF)) { 6387 Cost /= getReciprocalPredBlockProb(); 6388 6389 // Add the cost of an i1 extract and a branch 6390 auto *Vec_i1Ty = 6391 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6392 Cost += TTI.getScalarizationOverhead( 6393 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 6394 /*Insert=*/false, /*Extract=*/true); 6395 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6396 6397 if (useEmulatedMaskMemRefHack(I, VF)) 6398 // Artificially setting to a high enough value to practically disable 6399 // vectorization with such operations. 6400 Cost = 3000000; 6401 } 6402 6403 return Cost; 6404 } 6405 6406 InstructionCost 6407 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6408 ElementCount VF) { 6409 Type *ValTy = getLoadStoreType(I); 6410 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6411 Value *Ptr = getLoadStorePointerOperand(I); 6412 unsigned AS = getLoadStoreAddressSpace(I); 6413 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 6414 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6415 6416 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6417 "Stride should be 1 or -1 for consecutive memory access"); 6418 const Align Alignment = getLoadStoreAlignment(I); 6419 InstructionCost Cost = 0; 6420 if (Legal->isMaskRequired(I)) 6421 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6422 CostKind); 6423 else 6424 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6425 CostKind, I); 6426 6427 bool Reverse = ConsecutiveStride < 0; 6428 if (Reverse) 6429 Cost += 6430 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6431 return Cost; 6432 } 6433 6434 InstructionCost 6435 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6436 ElementCount VF) { 6437 assert(Legal->isUniformMemOp(*I)); 6438 6439 Type *ValTy = getLoadStoreType(I); 6440 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6441 const Align Alignment = getLoadStoreAlignment(I); 6442 unsigned AS = getLoadStoreAddressSpace(I); 6443 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6444 if (isa<LoadInst>(I)) { 6445 return TTI.getAddressComputationCost(ValTy) + 6446 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6447 CostKind) + 6448 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6449 } 6450 StoreInst *SI = cast<StoreInst>(I); 6451 6452 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6453 return TTI.getAddressComputationCost(ValTy) + 6454 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6455 CostKind) + 6456 (isLoopInvariantStoreValue 6457 ? 0 6458 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6459 VF.getKnownMinValue() - 1)); 6460 } 6461 6462 InstructionCost 6463 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6464 ElementCount VF) { 6465 Type *ValTy = getLoadStoreType(I); 6466 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6467 const Align Alignment = getLoadStoreAlignment(I); 6468 const Value *Ptr = getLoadStorePointerOperand(I); 6469 6470 return TTI.getAddressComputationCost(VectorTy) + 6471 TTI.getGatherScatterOpCost( 6472 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6473 TargetTransformInfo::TCK_RecipThroughput, I); 6474 } 6475 6476 InstructionCost 6477 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6478 ElementCount VF) { 6479 // TODO: Once we have support for interleaving with scalable vectors 6480 // we can calculate the cost properly here. 6481 if (VF.isScalable()) 6482 return InstructionCost::getInvalid(); 6483 6484 Type *ValTy = getLoadStoreType(I); 6485 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6486 unsigned AS = getLoadStoreAddressSpace(I); 6487 6488 auto Group = getInterleavedAccessGroup(I); 6489 assert(Group && "Fail to get an interleaved access group."); 6490 6491 unsigned InterleaveFactor = Group->getFactor(); 6492 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6493 6494 // Holds the indices of existing members in the interleaved group. 6495 SmallVector<unsigned, 4> Indices; 6496 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 6497 if (Group->getMember(IF)) 6498 Indices.push_back(IF); 6499 6500 // Calculate the cost of the whole interleaved group. 6501 bool UseMaskForGaps = 6502 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 6503 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 6504 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6505 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6506 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6507 6508 if (Group->isReverse()) { 6509 // TODO: Add support for reversed masked interleaved access. 6510 assert(!Legal->isMaskRequired(I) && 6511 "Reverse masked interleaved access not supported."); 6512 Cost += 6513 Group->getNumMembers() * 6514 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6515 } 6516 return Cost; 6517 } 6518 6519 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 6520 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6521 using namespace llvm::PatternMatch; 6522 // Early exit for no inloop reductions 6523 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6524 return None; 6525 auto *VectorTy = cast<VectorType>(Ty); 6526 6527 // We are looking for a pattern of, and finding the minimal acceptable cost: 6528 // reduce(mul(ext(A), ext(B))) or 6529 // reduce(mul(A, B)) or 6530 // reduce(ext(A)) or 6531 // reduce(A). 6532 // The basic idea is that we walk down the tree to do that, finding the root 6533 // reduction instruction in InLoopReductionImmediateChains. From there we find 6534 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6535 // of the components. If the reduction cost is lower then we return it for the 6536 // reduction instruction and 0 for the other instructions in the pattern. If 6537 // it is not we return an invalid cost specifying the orignal cost method 6538 // should be used. 6539 Instruction *RetI = I; 6540 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 6541 if (!RetI->hasOneUser()) 6542 return None; 6543 RetI = RetI->user_back(); 6544 } 6545 if (match(RetI, m_Mul(m_Value(), m_Value())) && 6546 RetI->user_back()->getOpcode() == Instruction::Add) { 6547 if (!RetI->hasOneUser()) 6548 return None; 6549 RetI = RetI->user_back(); 6550 } 6551 6552 // Test if the found instruction is a reduction, and if not return an invalid 6553 // cost specifying the parent to use the original cost modelling. 6554 if (!InLoopReductionImmediateChains.count(RetI)) 6555 return None; 6556 6557 // Find the reduction this chain is a part of and calculate the basic cost of 6558 // the reduction on its own. 6559 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6560 Instruction *ReductionPhi = LastChain; 6561 while (!isa<PHINode>(ReductionPhi)) 6562 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6563 6564 const RecurrenceDescriptor &RdxDesc = 6565 Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second; 6566 6567 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 6568 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 6569 6570 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a 6571 // normal fmul instruction to the cost of the fadd reduction. 6572 if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd) 6573 BaseCost += 6574 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind); 6575 6576 // If we're using ordered reductions then we can just return the base cost 6577 // here, since getArithmeticReductionCost calculates the full ordered 6578 // reduction cost when FP reassociation is not allowed. 6579 if (useOrderedReductions(RdxDesc)) 6580 return BaseCost; 6581 6582 // Get the operand that was not the reduction chain and match it to one of the 6583 // patterns, returning the better cost if it is found. 6584 Instruction *RedOp = RetI->getOperand(1) == LastChain 6585 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6586 : dyn_cast<Instruction>(RetI->getOperand(1)); 6587 6588 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6589 6590 Instruction *Op0, *Op1; 6591 if (RedOp && 6592 match(RedOp, 6593 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 6594 match(Op0, m_ZExtOrSExt(m_Value())) && 6595 Op0->getOpcode() == Op1->getOpcode() && 6596 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6597 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 6598 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 6599 6600 // Matched reduce(ext(mul(ext(A), ext(B))) 6601 // Note that the extend opcodes need to all match, or if A==B they will have 6602 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 6603 // which is equally fine. 6604 bool IsUnsigned = isa<ZExtInst>(Op0); 6605 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6606 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 6607 6608 InstructionCost ExtCost = 6609 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 6610 TTI::CastContextHint::None, CostKind, Op0); 6611 InstructionCost MulCost = 6612 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 6613 InstructionCost Ext2Cost = 6614 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 6615 TTI::CastContextHint::None, CostKind, RedOp); 6616 6617 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6618 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6619 CostKind); 6620 6621 if (RedCost.isValid() && 6622 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 6623 return I == RetI ? RedCost : 0; 6624 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 6625 !TheLoop->isLoopInvariant(RedOp)) { 6626 // Matched reduce(ext(A)) 6627 bool IsUnsigned = isa<ZExtInst>(RedOp); 6628 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6629 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6630 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6631 CostKind); 6632 6633 InstructionCost ExtCost = 6634 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6635 TTI::CastContextHint::None, CostKind, RedOp); 6636 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6637 return I == RetI ? RedCost : 0; 6638 } else if (RedOp && 6639 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 6640 if (match(Op0, m_ZExtOrSExt(m_Value())) && 6641 Op0->getOpcode() == Op1->getOpcode() && 6642 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 6643 bool IsUnsigned = isa<ZExtInst>(Op0); 6644 Type *Op0Ty = Op0->getOperand(0)->getType(); 6645 Type *Op1Ty = Op1->getOperand(0)->getType(); 6646 Type *LargestOpTy = 6647 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty 6648 : Op0Ty; 6649 auto *ExtType = VectorType::get(LargestOpTy, VectorTy); 6650 6651 // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of 6652 // different sizes. We take the largest type as the ext to reduce, and add 6653 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))). 6654 InstructionCost ExtCost0 = TTI.getCastInstrCost( 6655 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy), 6656 TTI::CastContextHint::None, CostKind, Op0); 6657 InstructionCost ExtCost1 = TTI.getCastInstrCost( 6658 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy), 6659 TTI::CastContextHint::None, CostKind, Op1); 6660 InstructionCost MulCost = 6661 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6662 6663 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6664 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6665 CostKind); 6666 InstructionCost ExtraExtCost = 0; 6667 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) { 6668 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1; 6669 ExtraExtCost = TTI.getCastInstrCost( 6670 ExtraExtOp->getOpcode(), ExtType, 6671 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy), 6672 TTI::CastContextHint::None, CostKind, ExtraExtOp); 6673 } 6674 6675 if (RedCost.isValid() && 6676 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost)) 6677 return I == RetI ? RedCost : 0; 6678 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 6679 // Matched reduce(mul()) 6680 InstructionCost MulCost = 6681 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6682 6683 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6684 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 6685 CostKind); 6686 6687 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 6688 return I == RetI ? RedCost : 0; 6689 } 6690 } 6691 6692 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 6693 } 6694 6695 InstructionCost 6696 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6697 ElementCount VF) { 6698 // Calculate scalar cost only. Vectorization cost should be ready at this 6699 // moment. 6700 if (VF.isScalar()) { 6701 Type *ValTy = getLoadStoreType(I); 6702 const Align Alignment = getLoadStoreAlignment(I); 6703 unsigned AS = getLoadStoreAddressSpace(I); 6704 6705 return TTI.getAddressComputationCost(ValTy) + 6706 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 6707 TTI::TCK_RecipThroughput, I); 6708 } 6709 return getWideningCost(I, VF); 6710 } 6711 6712 LoopVectorizationCostModel::VectorizationCostTy 6713 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6714 ElementCount VF) { 6715 // If we know that this instruction will remain uniform, check the cost of 6716 // the scalar version. 6717 if (isUniformAfterVectorization(I, VF)) 6718 VF = ElementCount::getFixed(1); 6719 6720 if (VF.isVector() && isProfitableToScalarize(I, VF)) 6721 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6722 6723 // Forced scalars do not have any scalarization overhead. 6724 auto ForcedScalar = ForcedScalars.find(VF); 6725 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 6726 auto InstSet = ForcedScalar->second; 6727 if (InstSet.count(I)) 6728 return VectorizationCostTy( 6729 (getInstructionCost(I, ElementCount::getFixed(1)).first * 6730 VF.getKnownMinValue()), 6731 false); 6732 } 6733 6734 Type *VectorTy; 6735 InstructionCost C = getInstructionCost(I, VF, VectorTy); 6736 6737 bool TypeNotScalarized = false; 6738 if (VF.isVector() && VectorTy->isVectorTy()) { 6739 unsigned NumParts = TTI.getNumberOfParts(VectorTy); 6740 if (NumParts) 6741 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 6742 else 6743 C = InstructionCost::getInvalid(); 6744 } 6745 return VectorizationCostTy(C, TypeNotScalarized); 6746 } 6747 6748 InstructionCost 6749 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6750 ElementCount VF) const { 6751 6752 // There is no mechanism yet to create a scalable scalarization loop, 6753 // so this is currently Invalid. 6754 if (VF.isScalable()) 6755 return InstructionCost::getInvalid(); 6756 6757 if (VF.isScalar()) 6758 return 0; 6759 6760 InstructionCost Cost = 0; 6761 Type *RetTy = ToVectorTy(I->getType(), VF); 6762 if (!RetTy->isVoidTy() && 6763 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 6764 Cost += TTI.getScalarizationOverhead( 6765 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 6766 false); 6767 6768 // Some targets keep addresses scalar. 6769 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 6770 return Cost; 6771 6772 // Some targets support efficient element stores. 6773 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 6774 return Cost; 6775 6776 // Collect operands to consider. 6777 CallInst *CI = dyn_cast<CallInst>(I); 6778 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 6779 6780 // Skip operands that do not require extraction/scalarization and do not incur 6781 // any overhead. 6782 SmallVector<Type *> Tys; 6783 for (auto *V : filterExtractingOperands(Ops, VF)) 6784 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 6785 return Cost + TTI.getOperandsScalarizationOverhead( 6786 filterExtractingOperands(Ops, VF), Tys); 6787 } 6788 6789 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 6790 if (VF.isScalar()) 6791 return; 6792 NumPredStores = 0; 6793 for (BasicBlock *BB : TheLoop->blocks()) { 6794 // For each instruction in the old loop. 6795 for (Instruction &I : *BB) { 6796 Value *Ptr = getLoadStorePointerOperand(&I); 6797 if (!Ptr) 6798 continue; 6799 6800 // TODO: We should generate better code and update the cost model for 6801 // predicated uniform stores. Today they are treated as any other 6802 // predicated store (see added test cases in 6803 // invariant-store-vectorization.ll). 6804 if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF)) 6805 NumPredStores++; 6806 6807 if (Legal->isUniformMemOp(I)) { 6808 // TODO: Avoid replicating loads and stores instead of 6809 // relying on instcombine to remove them. 6810 // Load: Scalar load + broadcast 6811 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 6812 InstructionCost Cost; 6813 if (isa<StoreInst>(&I) && VF.isScalable() && 6814 isLegalGatherOrScatter(&I, VF)) { 6815 Cost = getGatherScatterCost(&I, VF); 6816 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 6817 } else { 6818 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 6819 "Cannot yet scalarize uniform stores"); 6820 Cost = getUniformMemOpCost(&I, VF); 6821 setWideningDecision(&I, VF, CM_Scalarize, Cost); 6822 } 6823 continue; 6824 } 6825 6826 // We assume that widening is the best solution when possible. 6827 if (memoryInstructionCanBeWidened(&I, VF)) { 6828 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 6829 int ConsecutiveStride = Legal->isConsecutivePtr( 6830 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 6831 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6832 "Expected consecutive stride."); 6833 InstWidening Decision = 6834 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 6835 setWideningDecision(&I, VF, Decision, Cost); 6836 continue; 6837 } 6838 6839 // Choose between Interleaving, Gather/Scatter or Scalarization. 6840 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 6841 unsigned NumAccesses = 1; 6842 if (isAccessInterleaved(&I)) { 6843 auto Group = getInterleavedAccessGroup(&I); 6844 assert(Group && "Fail to get an interleaved access group."); 6845 6846 // Make one decision for the whole group. 6847 if (getWideningDecision(&I, VF) != CM_Unknown) 6848 continue; 6849 6850 NumAccesses = Group->getNumMembers(); 6851 if (interleavedAccessCanBeWidened(&I, VF)) 6852 InterleaveCost = getInterleaveGroupCost(&I, VF); 6853 } 6854 6855 InstructionCost GatherScatterCost = 6856 isLegalGatherOrScatter(&I, VF) 6857 ? getGatherScatterCost(&I, VF) * NumAccesses 6858 : InstructionCost::getInvalid(); 6859 6860 InstructionCost ScalarizationCost = 6861 getMemInstScalarizationCost(&I, VF) * NumAccesses; 6862 6863 // Choose better solution for the current VF, 6864 // write down this decision and use it during vectorization. 6865 InstructionCost Cost; 6866 InstWidening Decision; 6867 if (InterleaveCost <= GatherScatterCost && 6868 InterleaveCost < ScalarizationCost) { 6869 Decision = CM_Interleave; 6870 Cost = InterleaveCost; 6871 } else if (GatherScatterCost < ScalarizationCost) { 6872 Decision = CM_GatherScatter; 6873 Cost = GatherScatterCost; 6874 } else { 6875 Decision = CM_Scalarize; 6876 Cost = ScalarizationCost; 6877 } 6878 // If the instructions belongs to an interleave group, the whole group 6879 // receives the same decision. The whole group receives the cost, but 6880 // the cost will actually be assigned to one instruction. 6881 if (auto Group = getInterleavedAccessGroup(&I)) 6882 setWideningDecision(Group, VF, Decision, Cost); 6883 else 6884 setWideningDecision(&I, VF, Decision, Cost); 6885 } 6886 } 6887 6888 // Make sure that any load of address and any other address computation 6889 // remains scalar unless there is gather/scatter support. This avoids 6890 // inevitable extracts into address registers, and also has the benefit of 6891 // activating LSR more, since that pass can't optimize vectorized 6892 // addresses. 6893 if (TTI.prefersVectorizedAddressing()) 6894 return; 6895 6896 // Start with all scalar pointer uses. 6897 SmallPtrSet<Instruction *, 8> AddrDefs; 6898 for (BasicBlock *BB : TheLoop->blocks()) 6899 for (Instruction &I : *BB) { 6900 Instruction *PtrDef = 6901 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 6902 if (PtrDef && TheLoop->contains(PtrDef) && 6903 getWideningDecision(&I, VF) != CM_GatherScatter) 6904 AddrDefs.insert(PtrDef); 6905 } 6906 6907 // Add all instructions used to generate the addresses. 6908 SmallVector<Instruction *, 4> Worklist; 6909 append_range(Worklist, AddrDefs); 6910 while (!Worklist.empty()) { 6911 Instruction *I = Worklist.pop_back_val(); 6912 for (auto &Op : I->operands()) 6913 if (auto *InstOp = dyn_cast<Instruction>(Op)) 6914 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 6915 AddrDefs.insert(InstOp).second) 6916 Worklist.push_back(InstOp); 6917 } 6918 6919 for (auto *I : AddrDefs) { 6920 if (isa<LoadInst>(I)) { 6921 // Setting the desired widening decision should ideally be handled in 6922 // by cost functions, but since this involves the task of finding out 6923 // if the loaded register is involved in an address computation, it is 6924 // instead changed here when we know this is the case. 6925 InstWidening Decision = getWideningDecision(I, VF); 6926 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 6927 // Scalarize a widened load of address. 6928 setWideningDecision( 6929 I, VF, CM_Scalarize, 6930 (VF.getKnownMinValue() * 6931 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 6932 else if (auto Group = getInterleavedAccessGroup(I)) { 6933 // Scalarize an interleave group of address loads. 6934 for (unsigned I = 0; I < Group->getFactor(); ++I) { 6935 if (Instruction *Member = Group->getMember(I)) 6936 setWideningDecision( 6937 Member, VF, CM_Scalarize, 6938 (VF.getKnownMinValue() * 6939 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 6940 } 6941 } 6942 } else 6943 // Make sure I gets scalarized and a cost estimate without 6944 // scalarization overhead. 6945 ForcedScalars[VF].insert(I); 6946 } 6947 } 6948 6949 InstructionCost 6950 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 6951 Type *&VectorTy) { 6952 Type *RetTy = I->getType(); 6953 if (canTruncateToMinimalBitwidth(I, VF)) 6954 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6955 auto SE = PSE.getSE(); 6956 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6957 6958 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 6959 ElementCount VF) -> bool { 6960 if (VF.isScalar()) 6961 return true; 6962 6963 auto Scalarized = InstsToScalarize.find(VF); 6964 assert(Scalarized != InstsToScalarize.end() && 6965 "VF not yet analyzed for scalarization profitability"); 6966 return !Scalarized->second.count(I) && 6967 llvm::all_of(I->users(), [&](User *U) { 6968 auto *UI = cast<Instruction>(U); 6969 return !Scalarized->second.count(UI); 6970 }); 6971 }; 6972 (void) hasSingleCopyAfterVectorization; 6973 6974 if (isScalarAfterVectorization(I, VF)) { 6975 // With the exception of GEPs and PHIs, after scalarization there should 6976 // only be one copy of the instruction generated in the loop. This is 6977 // because the VF is either 1, or any instructions that need scalarizing 6978 // have already been dealt with by the the time we get here. As a result, 6979 // it means we don't have to multiply the instruction cost by VF. 6980 assert(I->getOpcode() == Instruction::GetElementPtr || 6981 I->getOpcode() == Instruction::PHI || 6982 (I->getOpcode() == Instruction::BitCast && 6983 I->getType()->isPointerTy()) || 6984 hasSingleCopyAfterVectorization(I, VF)); 6985 VectorTy = RetTy; 6986 } else 6987 VectorTy = ToVectorTy(RetTy, VF); 6988 6989 // TODO: We need to estimate the cost of intrinsic calls. 6990 switch (I->getOpcode()) { 6991 case Instruction::GetElementPtr: 6992 // We mark this instruction as zero-cost because the cost of GEPs in 6993 // vectorized code depends on whether the corresponding memory instruction 6994 // is scalarized or not. Therefore, we handle GEPs with the memory 6995 // instruction cost. 6996 return 0; 6997 case Instruction::Br: { 6998 // In cases of scalarized and predicated instructions, there will be VF 6999 // predicated blocks in the vectorized loop. Each branch around these 7000 // blocks requires also an extract of its vector compare i1 element. 7001 bool ScalarPredicatedBB = false; 7002 BranchInst *BI = cast<BranchInst>(I); 7003 if (VF.isVector() && BI->isConditional() && 7004 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7005 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7006 ScalarPredicatedBB = true; 7007 7008 if (ScalarPredicatedBB) { 7009 // Not possible to scalarize scalable vector with predicated instructions. 7010 if (VF.isScalable()) 7011 return InstructionCost::getInvalid(); 7012 // Return cost for branches around scalarized and predicated blocks. 7013 auto *Vec_i1Ty = 7014 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7015 return ( 7016 TTI.getScalarizationOverhead( 7017 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7018 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7019 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7020 // The back-edge branch will remain, as will all scalar branches. 7021 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7022 else 7023 // This branch will be eliminated by if-conversion. 7024 return 0; 7025 // Note: We currently assume zero cost for an unconditional branch inside 7026 // a predicated block since it will become a fall-through, although we 7027 // may decide in the future to call TTI for all branches. 7028 } 7029 case Instruction::PHI: { 7030 auto *Phi = cast<PHINode>(I); 7031 7032 // First-order recurrences are replaced by vector shuffles inside the loop. 7033 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7034 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7035 return TTI.getShuffleCost( 7036 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7037 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7038 7039 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7040 // converted into select instructions. We require N - 1 selects per phi 7041 // node, where N is the number of incoming values. 7042 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7043 return (Phi->getNumIncomingValues() - 1) * 7044 TTI.getCmpSelInstrCost( 7045 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7046 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7047 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7048 7049 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7050 } 7051 case Instruction::UDiv: 7052 case Instruction::SDiv: 7053 case Instruction::URem: 7054 case Instruction::SRem: 7055 // If we have a predicated instruction, it may not be executed for each 7056 // vector lane. Get the scalarization cost and scale this amount by the 7057 // probability of executing the predicated block. If the instruction is not 7058 // predicated, we fall through to the next case. 7059 if (VF.isVector() && isScalarWithPredication(I, VF)) { 7060 InstructionCost Cost = 0; 7061 7062 // These instructions have a non-void type, so account for the phi nodes 7063 // that we will create. This cost is likely to be zero. The phi node 7064 // cost, if any, should be scaled by the block probability because it 7065 // models a copy at the end of each predicated block. 7066 Cost += VF.getKnownMinValue() * 7067 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7068 7069 // The cost of the non-predicated instruction. 7070 Cost += VF.getKnownMinValue() * 7071 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7072 7073 // The cost of insertelement and extractelement instructions needed for 7074 // scalarization. 7075 Cost += getScalarizationOverhead(I, VF); 7076 7077 // Scale the cost by the probability of executing the predicated blocks. 7078 // This assumes the predicated block for each vector lane is equally 7079 // likely. 7080 return Cost / getReciprocalPredBlockProb(); 7081 } 7082 LLVM_FALLTHROUGH; 7083 case Instruction::Add: 7084 case Instruction::FAdd: 7085 case Instruction::Sub: 7086 case Instruction::FSub: 7087 case Instruction::Mul: 7088 case Instruction::FMul: 7089 case Instruction::FDiv: 7090 case Instruction::FRem: 7091 case Instruction::Shl: 7092 case Instruction::LShr: 7093 case Instruction::AShr: 7094 case Instruction::And: 7095 case Instruction::Or: 7096 case Instruction::Xor: { 7097 // Since we will replace the stride by 1 the multiplication should go away. 7098 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7099 return 0; 7100 7101 // Detect reduction patterns 7102 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7103 return *RedCost; 7104 7105 // Certain instructions can be cheaper to vectorize if they have a constant 7106 // second vector operand. One example of this are shifts on x86. 7107 Value *Op2 = I->getOperand(1); 7108 TargetTransformInfo::OperandValueProperties Op2VP; 7109 TargetTransformInfo::OperandValueKind Op2VK = 7110 TTI.getOperandInfo(Op2, Op2VP); 7111 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7112 Op2VK = TargetTransformInfo::OK_UniformValue; 7113 7114 SmallVector<const Value *, 4> Operands(I->operand_values()); 7115 return TTI.getArithmeticInstrCost( 7116 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7117 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7118 } 7119 case Instruction::FNeg: { 7120 return TTI.getArithmeticInstrCost( 7121 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7122 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7123 TargetTransformInfo::OP_None, I->getOperand(0), I); 7124 } 7125 case Instruction::Select: { 7126 SelectInst *SI = cast<SelectInst>(I); 7127 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7128 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7129 7130 const Value *Op0, *Op1; 7131 using namespace llvm::PatternMatch; 7132 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7133 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7134 // select x, y, false --> x & y 7135 // select x, true, y --> x | y 7136 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7137 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7138 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7139 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7140 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7141 Op1->getType()->getScalarSizeInBits() == 1); 7142 7143 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7144 return TTI.getArithmeticInstrCost( 7145 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7146 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7147 } 7148 7149 Type *CondTy = SI->getCondition()->getType(); 7150 if (!ScalarCond) 7151 CondTy = VectorType::get(CondTy, VF); 7152 7153 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 7154 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition())) 7155 Pred = Cmp->getPredicate(); 7156 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred, 7157 CostKind, I); 7158 } 7159 case Instruction::ICmp: 7160 case Instruction::FCmp: { 7161 Type *ValTy = I->getOperand(0)->getType(); 7162 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7163 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7164 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7165 VectorTy = ToVectorTy(ValTy, VF); 7166 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7167 cast<CmpInst>(I)->getPredicate(), CostKind, 7168 I); 7169 } 7170 case Instruction::Store: 7171 case Instruction::Load: { 7172 ElementCount Width = VF; 7173 if (Width.isVector()) { 7174 InstWidening Decision = getWideningDecision(I, Width); 7175 assert(Decision != CM_Unknown && 7176 "CM decision should be taken at this point"); 7177 if (Decision == CM_Scalarize) 7178 Width = ElementCount::getFixed(1); 7179 } 7180 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7181 return getMemoryInstructionCost(I, VF); 7182 } 7183 case Instruction::BitCast: 7184 if (I->getType()->isPointerTy()) 7185 return 0; 7186 LLVM_FALLTHROUGH; 7187 case Instruction::ZExt: 7188 case Instruction::SExt: 7189 case Instruction::FPToUI: 7190 case Instruction::FPToSI: 7191 case Instruction::FPExt: 7192 case Instruction::PtrToInt: 7193 case Instruction::IntToPtr: 7194 case Instruction::SIToFP: 7195 case Instruction::UIToFP: 7196 case Instruction::Trunc: 7197 case Instruction::FPTrunc: { 7198 // Computes the CastContextHint from a Load/Store instruction. 7199 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7200 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7201 "Expected a load or a store!"); 7202 7203 if (VF.isScalar() || !TheLoop->contains(I)) 7204 return TTI::CastContextHint::Normal; 7205 7206 switch (getWideningDecision(I, VF)) { 7207 case LoopVectorizationCostModel::CM_GatherScatter: 7208 return TTI::CastContextHint::GatherScatter; 7209 case LoopVectorizationCostModel::CM_Interleave: 7210 return TTI::CastContextHint::Interleave; 7211 case LoopVectorizationCostModel::CM_Scalarize: 7212 case LoopVectorizationCostModel::CM_Widen: 7213 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7214 : TTI::CastContextHint::Normal; 7215 case LoopVectorizationCostModel::CM_Widen_Reverse: 7216 return TTI::CastContextHint::Reversed; 7217 case LoopVectorizationCostModel::CM_Unknown: 7218 llvm_unreachable("Instr did not go through cost modelling?"); 7219 } 7220 7221 llvm_unreachable("Unhandled case!"); 7222 }; 7223 7224 unsigned Opcode = I->getOpcode(); 7225 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7226 // For Trunc, the context is the only user, which must be a StoreInst. 7227 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7228 if (I->hasOneUse()) 7229 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7230 CCH = ComputeCCH(Store); 7231 } 7232 // For Z/Sext, the context is the operand, which must be a LoadInst. 7233 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7234 Opcode == Instruction::FPExt) { 7235 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7236 CCH = ComputeCCH(Load); 7237 } 7238 7239 // We optimize the truncation of induction variables having constant 7240 // integer steps. The cost of these truncations is the same as the scalar 7241 // operation. 7242 if (isOptimizableIVTruncate(I, VF)) { 7243 auto *Trunc = cast<TruncInst>(I); 7244 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7245 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7246 } 7247 7248 // Detect reduction patterns 7249 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7250 return *RedCost; 7251 7252 Type *SrcScalarTy = I->getOperand(0)->getType(); 7253 Type *SrcVecTy = 7254 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7255 if (canTruncateToMinimalBitwidth(I, VF)) { 7256 // This cast is going to be shrunk. This may remove the cast or it might 7257 // turn it into slightly different cast. For example, if MinBW == 16, 7258 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7259 // 7260 // Calculate the modified src and dest types. 7261 Type *MinVecTy = VectorTy; 7262 if (Opcode == Instruction::Trunc) { 7263 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7264 VectorTy = 7265 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7266 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7267 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7268 VectorTy = 7269 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7270 } 7271 } 7272 7273 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7274 } 7275 case Instruction::Call: { 7276 if (RecurrenceDescriptor::isFMulAddIntrinsic(I)) 7277 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7278 return *RedCost; 7279 bool NeedToScalarize; 7280 CallInst *CI = cast<CallInst>(I); 7281 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7282 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7283 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7284 return std::min(CallCost, IntrinsicCost); 7285 } 7286 return CallCost; 7287 } 7288 case Instruction::ExtractValue: 7289 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7290 case Instruction::Alloca: 7291 // We cannot easily widen alloca to a scalable alloca, as 7292 // the result would need to be a vector of pointers. 7293 if (VF.isScalable()) 7294 return InstructionCost::getInvalid(); 7295 LLVM_FALLTHROUGH; 7296 default: 7297 // This opcode is unknown. Assume that it is the same as 'mul'. 7298 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7299 } // end of switch. 7300 } 7301 7302 char LoopVectorize::ID = 0; 7303 7304 static const char lv_name[] = "Loop Vectorization"; 7305 7306 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7307 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7308 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7309 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7310 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7311 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7312 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7313 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7314 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7315 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7316 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7317 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7318 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7319 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7320 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7321 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7322 7323 namespace llvm { 7324 7325 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7326 7327 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7328 bool VectorizeOnlyWhenForced) { 7329 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7330 } 7331 7332 } // end namespace llvm 7333 7334 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7335 // Check if the pointer operand of a load or store instruction is 7336 // consecutive. 7337 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7338 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 7339 return false; 7340 } 7341 7342 void LoopVectorizationCostModel::collectValuesToIgnore() { 7343 // Ignore ephemeral values. 7344 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7345 7346 // Find all stores to invariant variables. Since they are going to sink 7347 // outside the loop we do not need calculate cost for them. 7348 for (BasicBlock *BB : TheLoop->blocks()) 7349 for (Instruction &I : *BB) { 7350 StoreInst *SI; 7351 if ((SI = dyn_cast<StoreInst>(&I)) && 7352 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) 7353 ValuesToIgnore.insert(&I); 7354 } 7355 7356 // Ignore type-promoting instructions we identified during reduction 7357 // detection. 7358 for (auto &Reduction : Legal->getReductionVars()) { 7359 const RecurrenceDescriptor &RedDes = Reduction.second; 7360 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7361 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7362 } 7363 // Ignore type-casting instructions we identified during induction 7364 // detection. 7365 for (auto &Induction : Legal->getInductionVars()) { 7366 const InductionDescriptor &IndDes = Induction.second; 7367 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7368 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7369 } 7370 } 7371 7372 void LoopVectorizationCostModel::collectInLoopReductions() { 7373 for (auto &Reduction : Legal->getReductionVars()) { 7374 PHINode *Phi = Reduction.first; 7375 const RecurrenceDescriptor &RdxDesc = Reduction.second; 7376 7377 // We don't collect reductions that are type promoted (yet). 7378 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7379 continue; 7380 7381 // If the target would prefer this reduction to happen "in-loop", then we 7382 // want to record it as such. 7383 unsigned Opcode = RdxDesc.getOpcode(); 7384 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7385 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7386 TargetTransformInfo::ReductionFlags())) 7387 continue; 7388 7389 // Check that we can correctly put the reductions into the loop, by 7390 // finding the chain of operations that leads from the phi to the loop 7391 // exit value. 7392 SmallVector<Instruction *, 4> ReductionOperations = 7393 RdxDesc.getReductionOpChain(Phi, TheLoop); 7394 bool InLoop = !ReductionOperations.empty(); 7395 if (InLoop) { 7396 InLoopReductionChains[Phi] = ReductionOperations; 7397 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7398 Instruction *LastChain = Phi; 7399 for (auto *I : ReductionOperations) { 7400 InLoopReductionImmediateChains[I] = LastChain; 7401 LastChain = I; 7402 } 7403 } 7404 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7405 << " reduction for phi: " << *Phi << "\n"); 7406 } 7407 } 7408 7409 // TODO: we could return a pair of values that specify the max VF and 7410 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7411 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7412 // doesn't have a cost model that can choose which plan to execute if 7413 // more than one is generated. 7414 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7415 LoopVectorizationCostModel &CM) { 7416 unsigned WidestType; 7417 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7418 return WidestVectorRegBits / WidestType; 7419 } 7420 7421 VectorizationFactor 7422 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7423 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7424 ElementCount VF = UserVF; 7425 // Outer loop handling: They may require CFG and instruction level 7426 // transformations before even evaluating whether vectorization is profitable. 7427 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7428 // the vectorization pipeline. 7429 if (!OrigLoop->isInnermost()) { 7430 // If the user doesn't provide a vectorization factor, determine a 7431 // reasonable one. 7432 if (UserVF.isZero()) { 7433 VF = ElementCount::getFixed(determineVPlanVF( 7434 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7435 .getFixedSize(), 7436 CM)); 7437 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7438 7439 // Make sure we have a VF > 1 for stress testing. 7440 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7441 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7442 << "overriding computed VF.\n"); 7443 VF = ElementCount::getFixed(4); 7444 } 7445 } 7446 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7447 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7448 "VF needs to be a power of two"); 7449 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7450 << "VF " << VF << " to build VPlans.\n"); 7451 buildVPlans(VF, VF); 7452 7453 // For VPlan build stress testing, we bail out after VPlan construction. 7454 if (VPlanBuildStressTest) 7455 return VectorizationFactor::Disabled(); 7456 7457 return {VF, 0 /*Cost*/}; 7458 } 7459 7460 LLVM_DEBUG( 7461 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7462 "VPlan-native path.\n"); 7463 return VectorizationFactor::Disabled(); 7464 } 7465 7466 bool LoopVectorizationPlanner::requiresTooManyRuntimeChecks() const { 7467 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 7468 return (NumRuntimePointerChecks > 7469 VectorizerParams::RuntimeMemoryCheckThreshold && 7470 !Hints.allowReordering()) || 7471 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 7472 } 7473 7474 Optional<VectorizationFactor> 7475 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7476 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7477 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 7478 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 7479 return None; 7480 7481 // Invalidate interleave groups if all blocks of loop will be predicated. 7482 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 7483 !useMaskedInterleavedAccesses(*TTI)) { 7484 LLVM_DEBUG( 7485 dbgs() 7486 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7487 "which requires masked-interleaved support.\n"); 7488 if (CM.InterleaveInfo.invalidateGroups()) 7489 // Invalidating interleave groups also requires invalidating all decisions 7490 // based on them, which includes widening decisions and uniform and scalar 7491 // values. 7492 CM.invalidateCostModelingDecisions(); 7493 } 7494 7495 ElementCount MaxUserVF = 7496 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 7497 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 7498 if (!UserVF.isZero() && UserVFIsLegal) { 7499 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 7500 "VF needs to be a power of two"); 7501 // Collect the instructions (and their associated costs) that will be more 7502 // profitable to scalarize. 7503 if (CM.selectUserVectorizationFactor(UserVF)) { 7504 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7505 CM.collectInLoopReductions(); 7506 buildVPlansWithVPRecipes(UserVF, UserVF); 7507 LLVM_DEBUG(printPlans(dbgs())); 7508 return {{UserVF, 0}}; 7509 } else 7510 reportVectorizationInfo("UserVF ignored because of invalid costs.", 7511 "InvalidCost", ORE, OrigLoop); 7512 } 7513 7514 // Populate the set of Vectorization Factor Candidates. 7515 ElementCountSet VFCandidates; 7516 for (auto VF = ElementCount::getFixed(1); 7517 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 7518 VFCandidates.insert(VF); 7519 for (auto VF = ElementCount::getScalable(1); 7520 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 7521 VFCandidates.insert(VF); 7522 7523 for (const auto &VF : VFCandidates) { 7524 // Collect Uniform and Scalar instructions after vectorization with VF. 7525 CM.collectUniformsAndScalars(VF); 7526 7527 // Collect the instructions (and their associated costs) that will be more 7528 // profitable to scalarize. 7529 if (VF.isVector()) 7530 CM.collectInstsToScalarize(VF); 7531 } 7532 7533 CM.collectInLoopReductions(); 7534 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 7535 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 7536 7537 LLVM_DEBUG(printPlans(dbgs())); 7538 if (!MaxFactors.hasVector()) 7539 return VectorizationFactor::Disabled(); 7540 7541 // Select the optimal vectorization factor. 7542 return CM.selectVectorizationFactor(VFCandidates); 7543 } 7544 7545 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 7546 assert(count_if(VPlans, 7547 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 7548 1 && 7549 "Best VF has not a single VPlan."); 7550 7551 for (const VPlanPtr &Plan : VPlans) { 7552 if (Plan->hasVF(VF)) 7553 return *Plan.get(); 7554 } 7555 llvm_unreachable("No plan found!"); 7556 } 7557 7558 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7559 SmallVector<Metadata *, 4> MDs; 7560 // Reserve first location for self reference to the LoopID metadata node. 7561 MDs.push_back(nullptr); 7562 bool IsUnrollMetadata = false; 7563 MDNode *LoopID = L->getLoopID(); 7564 if (LoopID) { 7565 // First find existing loop unrolling disable metadata. 7566 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7567 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7568 if (MD) { 7569 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7570 IsUnrollMetadata = 7571 S && S->getString().startswith("llvm.loop.unroll.disable"); 7572 } 7573 MDs.push_back(LoopID->getOperand(i)); 7574 } 7575 } 7576 7577 if (!IsUnrollMetadata) { 7578 // Add runtime unroll disable metadata. 7579 LLVMContext &Context = L->getHeader()->getContext(); 7580 SmallVector<Metadata *, 1> DisableOperands; 7581 DisableOperands.push_back( 7582 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7583 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7584 MDs.push_back(DisableNode); 7585 MDNode *NewLoopID = MDNode::get(Context, MDs); 7586 // Set operand 0 to refer to the loop id itself. 7587 NewLoopID->replaceOperandWith(0, NewLoopID); 7588 L->setLoopID(NewLoopID); 7589 } 7590 } 7591 7592 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 7593 VPlan &BestVPlan, 7594 InnerLoopVectorizer &ILV, 7595 DominatorTree *DT) { 7596 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 7597 << '\n'); 7598 7599 // Perform the actual loop transformation. 7600 7601 // 1. Set up the skeleton for vectorization, including vector pre-header and 7602 // middle block. The vector loop is created during VPlan execution. 7603 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 7604 Value *CanonicalIVStartValue; 7605 std::tie(State.CFG.PrevBB, CanonicalIVStartValue) = 7606 ILV.createVectorizedLoopSkeleton(); 7607 ILV.collectPoisonGeneratingRecipes(State); 7608 7609 ILV.printDebugTracesAtStart(); 7610 7611 //===------------------------------------------------===// 7612 // 7613 // Notice: any optimization or new instruction that go 7614 // into the code below should also be implemented in 7615 // the cost-model. 7616 // 7617 //===------------------------------------------------===// 7618 7619 // 2. Copy and widen instructions from the old loop into the new loop. 7620 BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr), 7621 ILV.getOrCreateVectorTripCount(nullptr), 7622 CanonicalIVStartValue, State); 7623 BestVPlan.execute(&State); 7624 7625 // Keep all loop hints from the original loop on the vector loop (we'll 7626 // replace the vectorizer-specific hints below). 7627 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7628 7629 Optional<MDNode *> VectorizedLoopID = 7630 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7631 LLVMLoopVectorizeFollowupVectorized}); 7632 7633 VPBasicBlock *HeaderVPBB = 7634 BestVPlan.getVectorLoopRegion()->getEntryBasicBlock(); 7635 Loop *L = LI->getLoopFor(State.CFG.VPBB2IRBB[HeaderVPBB]); 7636 if (VectorizedLoopID.hasValue()) 7637 L->setLoopID(VectorizedLoopID.getValue()); 7638 else { 7639 // Keep all loop hints from the original loop on the vector loop (we'll 7640 // replace the vectorizer-specific hints below). 7641 if (MDNode *LID = OrigLoop->getLoopID()) 7642 L->setLoopID(LID); 7643 7644 LoopVectorizeHints Hints(L, true, *ORE); 7645 Hints.setAlreadyVectorized(); 7646 } 7647 // Disable runtime unrolling when vectorizing the epilogue loop. 7648 if (CanonicalIVStartValue) 7649 AddRuntimeUnrollDisableMetaData(L); 7650 7651 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7652 // predication, updating analyses. 7653 ILV.fixVectorizedLoop(State, BestVPlan); 7654 7655 ILV.printDebugTracesAtEnd(); 7656 } 7657 7658 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 7659 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 7660 for (const auto &Plan : VPlans) 7661 if (PrintVPlansInDotFormat) 7662 Plan->printDOT(O); 7663 else 7664 Plan->print(O); 7665 } 7666 #endif 7667 7668 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7669 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7670 7671 // We create new control-flow for the vectorized loop, so the original exit 7672 // conditions will be dead after vectorization if it's only used by the 7673 // terminator 7674 SmallVector<BasicBlock*> ExitingBlocks; 7675 OrigLoop->getExitingBlocks(ExitingBlocks); 7676 for (auto *BB : ExitingBlocks) { 7677 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 7678 if (!Cmp || !Cmp->hasOneUse()) 7679 continue; 7680 7681 // TODO: we should introduce a getUniqueExitingBlocks on Loop 7682 if (!DeadInstructions.insert(Cmp).second) 7683 continue; 7684 7685 // The operands of the icmp is often a dead trunc, used by IndUpdate. 7686 // TODO: can recurse through operands in general 7687 for (Value *Op : Cmp->operands()) { 7688 if (isa<TruncInst>(Op) && Op->hasOneUse()) 7689 DeadInstructions.insert(cast<Instruction>(Op)); 7690 } 7691 } 7692 7693 // We create new "steps" for induction variable updates to which the original 7694 // induction variables map. An original update instruction will be dead if 7695 // all its users except the induction variable are dead. 7696 auto *Latch = OrigLoop->getLoopLatch(); 7697 for (auto &Induction : Legal->getInductionVars()) { 7698 PHINode *Ind = Induction.first; 7699 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7700 7701 // If the tail is to be folded by masking, the primary induction variable, 7702 // if exists, isn't dead: it will be used for masking. Don't kill it. 7703 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 7704 continue; 7705 7706 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7707 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7708 })) 7709 DeadInstructions.insert(IndUpdate); 7710 } 7711 } 7712 7713 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7714 7715 //===--------------------------------------------------------------------===// 7716 // EpilogueVectorizerMainLoop 7717 //===--------------------------------------------------------------------===// 7718 7719 /// This function is partially responsible for generating the control flow 7720 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7721 std::pair<BasicBlock *, Value *> 7722 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 7723 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7724 7725 // Workaround! Compute the trip count of the original loop and cache it 7726 // before we start modifying the CFG. This code has a systemic problem 7727 // wherein it tries to run analysis over partially constructed IR; this is 7728 // wrong, and not simply for SCEV. The trip count of the original loop 7729 // simply happens to be prone to hitting this in practice. In theory, we 7730 // can hit the same issue for any SCEV, or ValueTracking query done during 7731 // mutation. See PR49900. 7732 getOrCreateTripCount(OrigLoop->getLoopPreheader()); 7733 createVectorLoopSkeleton(""); 7734 7735 // Generate the code to check the minimum iteration count of the vector 7736 // epilogue (see below). 7737 EPI.EpilogueIterationCountCheck = 7738 emitIterationCountCheck(LoopScalarPreHeader, true); 7739 EPI.EpilogueIterationCountCheck->setName("iter.check"); 7740 7741 // Generate the code to check any assumptions that we've made for SCEV 7742 // expressions. 7743 EPI.SCEVSafetyCheck = emitSCEVChecks(LoopScalarPreHeader); 7744 7745 // Generate the code that checks at runtime if arrays overlap. We put the 7746 // checks into a separate block to make the more common case of few elements 7747 // faster. 7748 EPI.MemSafetyCheck = emitMemRuntimeChecks(LoopScalarPreHeader); 7749 7750 // Generate the iteration count check for the main loop, *after* the check 7751 // for the epilogue loop, so that the path-length is shorter for the case 7752 // that goes directly through the vector epilogue. The longer-path length for 7753 // the main loop is compensated for, by the gain from vectorizing the larger 7754 // trip count. Note: the branch will get updated later on when we vectorize 7755 // the epilogue. 7756 EPI.MainLoopIterationCountCheck = 7757 emitIterationCountCheck(LoopScalarPreHeader, false); 7758 7759 // Generate the induction variable. 7760 EPI.VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 7761 7762 // Skip induction resume value creation here because they will be created in 7763 // the second pass. If we created them here, they wouldn't be used anyway, 7764 // because the vplan in the second pass still contains the inductions from the 7765 // original loop. 7766 7767 return {completeLoopSkeleton(OrigLoopID), nullptr}; 7768 } 7769 7770 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 7771 LLVM_DEBUG({ 7772 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 7773 << "Main Loop VF:" << EPI.MainLoopVF 7774 << ", Main Loop UF:" << EPI.MainLoopUF 7775 << ", Epilogue Loop VF:" << EPI.EpilogueVF 7776 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7777 }); 7778 } 7779 7780 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 7781 DEBUG_WITH_TYPE(VerboseDebug, { 7782 dbgs() << "intermediate fn:\n" 7783 << *OrigLoop->getHeader()->getParent() << "\n"; 7784 }); 7785 } 7786 7787 BasicBlock * 7788 EpilogueVectorizerMainLoop::emitIterationCountCheck(BasicBlock *Bypass, 7789 bool ForEpilogue) { 7790 assert(Bypass && "Expected valid bypass basic block."); 7791 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 7792 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 7793 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 7794 // Reuse existing vector loop preheader for TC checks. 7795 // Note that new preheader block is generated for vector loop. 7796 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 7797 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 7798 7799 // Generate code to check if the loop's trip count is less than VF * UF of the 7800 // main vector loop. 7801 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 7802 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7803 7804 Value *CheckMinIters = Builder.CreateICmp( 7805 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 7806 "min.iters.check"); 7807 7808 if (!ForEpilogue) 7809 TCCheckBlock->setName("vector.main.loop.iter.check"); 7810 7811 // Create new preheader for vector loop. 7812 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 7813 DT, LI, nullptr, "vector.ph"); 7814 7815 if (ForEpilogue) { 7816 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 7817 DT->getNode(Bypass)->getIDom()) && 7818 "TC check is expected to dominate Bypass"); 7819 7820 // Update dominator for Bypass & LoopExit. 7821 DT->changeImmediateDominator(Bypass, TCCheckBlock); 7822 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7823 // For loops with multiple exits, there's no edge from the middle block 7824 // to exit blocks (as the epilogue must run) and thus no need to update 7825 // the immediate dominator of the exit blocks. 7826 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 7827 7828 LoopBypassBlocks.push_back(TCCheckBlock); 7829 7830 // Save the trip count so we don't have to regenerate it in the 7831 // vec.epilog.iter.check. This is safe to do because the trip count 7832 // generated here dominates the vector epilog iter check. 7833 EPI.TripCount = Count; 7834 } 7835 7836 ReplaceInstWithInst( 7837 TCCheckBlock->getTerminator(), 7838 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7839 7840 return TCCheckBlock; 7841 } 7842 7843 //===--------------------------------------------------------------------===// 7844 // EpilogueVectorizerEpilogueLoop 7845 //===--------------------------------------------------------------------===// 7846 7847 /// This function is partially responsible for generating the control flow 7848 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7849 std::pair<BasicBlock *, Value *> 7850 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 7851 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7852 createVectorLoopSkeleton("vec.epilog."); 7853 7854 // Now, compare the remaining count and if there aren't enough iterations to 7855 // execute the vectorized epilogue skip to the scalar part. 7856 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 7857 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 7858 LoopVectorPreHeader = 7859 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 7860 LI, nullptr, "vec.epilog.ph"); 7861 emitMinimumVectorEpilogueIterCountCheck(LoopScalarPreHeader, 7862 VecEpilogueIterationCountCheck); 7863 7864 // Adjust the control flow taking the state info from the main loop 7865 // vectorization into account. 7866 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 7867 "expected this to be saved from the previous pass."); 7868 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 7869 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 7870 7871 DT->changeImmediateDominator(LoopVectorPreHeader, 7872 EPI.MainLoopIterationCountCheck); 7873 7874 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 7875 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7876 7877 if (EPI.SCEVSafetyCheck) 7878 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 7879 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7880 if (EPI.MemSafetyCheck) 7881 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 7882 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7883 7884 DT->changeImmediateDominator( 7885 VecEpilogueIterationCountCheck, 7886 VecEpilogueIterationCountCheck->getSinglePredecessor()); 7887 7888 DT->changeImmediateDominator(LoopScalarPreHeader, 7889 EPI.EpilogueIterationCountCheck); 7890 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7891 // If there is an epilogue which must run, there's no edge from the 7892 // middle block to exit blocks and thus no need to update the immediate 7893 // dominator of the exit blocks. 7894 DT->changeImmediateDominator(LoopExitBlock, 7895 EPI.EpilogueIterationCountCheck); 7896 7897 // Keep track of bypass blocks, as they feed start values to the induction 7898 // phis in the scalar loop preheader. 7899 if (EPI.SCEVSafetyCheck) 7900 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 7901 if (EPI.MemSafetyCheck) 7902 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 7903 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 7904 7905 // The vec.epilog.iter.check block may contain Phi nodes from reductions which 7906 // merge control-flow from the latch block and the middle block. Update the 7907 // incoming values here and move the Phi into the preheader. 7908 SmallVector<PHINode *, 4> PhisInBlock; 7909 for (PHINode &Phi : VecEpilogueIterationCountCheck->phis()) 7910 PhisInBlock.push_back(&Phi); 7911 7912 for (PHINode *Phi : PhisInBlock) { 7913 Phi->replaceIncomingBlockWith( 7914 VecEpilogueIterationCountCheck->getSinglePredecessor(), 7915 VecEpilogueIterationCountCheck); 7916 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck); 7917 if (EPI.SCEVSafetyCheck) 7918 Phi->removeIncomingValue(EPI.SCEVSafetyCheck); 7919 if (EPI.MemSafetyCheck) 7920 Phi->removeIncomingValue(EPI.MemSafetyCheck); 7921 Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI()); 7922 } 7923 7924 // Generate a resume induction for the vector epilogue and put it in the 7925 // vector epilogue preheader 7926 Type *IdxTy = Legal->getWidestInductionType(); 7927 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 7928 LoopVectorPreHeader->getFirstNonPHI()); 7929 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 7930 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 7931 EPI.MainLoopIterationCountCheck); 7932 7933 // Generate induction resume values. These variables save the new starting 7934 // indexes for the scalar loop. They are used to test if there are any tail 7935 // iterations left once the vector loop has completed. 7936 // Note that when the vectorized epilogue is skipped due to iteration count 7937 // check, then the resume value for the induction variable comes from 7938 // the trip count of the main vector loop, hence passing the AdditionalBypass 7939 // argument. 7940 createInductionResumeValues({VecEpilogueIterationCountCheck, 7941 EPI.VectorTripCount} /* AdditionalBypass */); 7942 7943 return {completeLoopSkeleton(OrigLoopID), EPResumeVal}; 7944 } 7945 7946 BasicBlock * 7947 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 7948 BasicBlock *Bypass, BasicBlock *Insert) { 7949 7950 assert(EPI.TripCount && 7951 "Expected trip count to have been safed in the first pass."); 7952 assert( 7953 (!isa<Instruction>(EPI.TripCount) || 7954 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 7955 "saved trip count does not dominate insertion point."); 7956 Value *TC = EPI.TripCount; 7957 IRBuilder<> Builder(Insert->getTerminator()); 7958 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 7959 7960 // Generate code to check if the loop's trip count is less than VF * UF of the 7961 // vector epilogue loop. 7962 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 7963 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7964 7965 Value *CheckMinIters = 7966 Builder.CreateICmp(P, Count, 7967 createStepForVF(Builder, Count->getType(), 7968 EPI.EpilogueVF, EPI.EpilogueUF), 7969 "min.epilog.iters.check"); 7970 7971 ReplaceInstWithInst( 7972 Insert->getTerminator(), 7973 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7974 7975 LoopBypassBlocks.push_back(Insert); 7976 return Insert; 7977 } 7978 7979 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 7980 LLVM_DEBUG({ 7981 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 7982 << "Epilogue Loop VF:" << EPI.EpilogueVF 7983 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7984 }); 7985 } 7986 7987 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 7988 DEBUG_WITH_TYPE(VerboseDebug, { 7989 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n"; 7990 }); 7991 } 7992 7993 bool LoopVectorizationPlanner::getDecisionAndClampRange( 7994 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 7995 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 7996 bool PredicateAtRangeStart = Predicate(Range.Start); 7997 7998 for (ElementCount TmpVF = Range.Start * 2; 7999 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8000 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8001 Range.End = TmpVF; 8002 break; 8003 } 8004 8005 return PredicateAtRangeStart; 8006 } 8007 8008 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8009 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8010 /// of VF's starting at a given VF and extending it as much as possible. Each 8011 /// vectorization decision can potentially shorten this sub-range during 8012 /// buildVPlan(). 8013 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8014 ElementCount MaxVF) { 8015 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8016 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8017 VFRange SubRange = {VF, MaxVFPlusOne}; 8018 VPlans.push_back(buildVPlan(SubRange)); 8019 VF = SubRange.End; 8020 } 8021 } 8022 8023 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8024 VPlanPtr &Plan) { 8025 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8026 8027 // Look for cached value. 8028 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8029 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8030 if (ECEntryIt != EdgeMaskCache.end()) 8031 return ECEntryIt->second; 8032 8033 VPValue *SrcMask = createBlockInMask(Src, Plan); 8034 8035 // The terminator has to be a branch inst! 8036 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8037 assert(BI && "Unexpected terminator found"); 8038 8039 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8040 return EdgeMaskCache[Edge] = SrcMask; 8041 8042 // If source is an exiting block, we know the exit edge is dynamically dead 8043 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8044 // adding uses of an otherwise potentially dead instruction. 8045 if (OrigLoop->isLoopExiting(Src)) 8046 return EdgeMaskCache[Edge] = SrcMask; 8047 8048 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8049 assert(EdgeMask && "No Edge Mask found for condition"); 8050 8051 if (BI->getSuccessor(0) != Dst) 8052 EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc()); 8053 8054 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8055 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8056 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8057 // The select version does not introduce new UB if SrcMask is false and 8058 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8059 VPValue *False = Plan->getOrAddVPValue( 8060 ConstantInt::getFalse(BI->getCondition()->getType())); 8061 EdgeMask = 8062 Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc()); 8063 } 8064 8065 return EdgeMaskCache[Edge] = EdgeMask; 8066 } 8067 8068 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8069 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8070 8071 // Look for cached value. 8072 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8073 if (BCEntryIt != BlockMaskCache.end()) 8074 return BCEntryIt->second; 8075 8076 // All-one mask is modelled as no-mask following the convention for masked 8077 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8078 VPValue *BlockMask = nullptr; 8079 8080 if (OrigLoop->getHeader() == BB) { 8081 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8082 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8083 8084 // Introduce the early-exit compare IV <= BTC to form header block mask. 8085 // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by 8086 // constructing the desired canonical IV in the header block as its first 8087 // non-phi instructions. 8088 assert(CM.foldTailByMasking() && "must fold the tail"); 8089 VPBasicBlock *HeaderVPBB = 8090 Plan->getVectorLoopRegion()->getEntryBasicBlock(); 8091 auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi(); 8092 auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV()); 8093 HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi()); 8094 8095 VPBuilder::InsertPointGuard Guard(Builder); 8096 Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint); 8097 if (CM.TTI.emitGetActiveLaneMask()) { 8098 VPValue *TC = Plan->getOrCreateTripCount(); 8099 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC}); 8100 } else { 8101 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8102 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8103 } 8104 return BlockMaskCache[BB] = BlockMask; 8105 } 8106 8107 // This is the block mask. We OR all incoming edges. 8108 for (auto *Predecessor : predecessors(BB)) { 8109 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8110 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8111 return BlockMaskCache[BB] = EdgeMask; 8112 8113 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8114 BlockMask = EdgeMask; 8115 continue; 8116 } 8117 8118 BlockMask = Builder.createOr(BlockMask, EdgeMask, {}); 8119 } 8120 8121 return BlockMaskCache[BB] = BlockMask; 8122 } 8123 8124 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8125 ArrayRef<VPValue *> Operands, 8126 VFRange &Range, 8127 VPlanPtr &Plan) { 8128 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8129 "Must be called with either a load or store"); 8130 8131 auto willWiden = [&](ElementCount VF) -> bool { 8132 LoopVectorizationCostModel::InstWidening Decision = 8133 CM.getWideningDecision(I, VF); 8134 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8135 "CM decision should be taken at this point."); 8136 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8137 return true; 8138 if (CM.isScalarAfterVectorization(I, VF) || 8139 CM.isProfitableToScalarize(I, VF)) 8140 return false; 8141 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8142 }; 8143 8144 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8145 return nullptr; 8146 8147 VPValue *Mask = nullptr; 8148 if (Legal->isMaskRequired(I)) 8149 Mask = createBlockInMask(I->getParent(), Plan); 8150 8151 // Determine if the pointer operand of the access is either consecutive or 8152 // reverse consecutive. 8153 LoopVectorizationCostModel::InstWidening Decision = 8154 CM.getWideningDecision(I, Range.Start); 8155 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8156 bool Consecutive = 8157 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8158 8159 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8160 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8161 Consecutive, Reverse); 8162 8163 StoreInst *Store = cast<StoreInst>(I); 8164 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8165 Mask, Consecutive, Reverse); 8166 } 8167 8168 /// Creates a VPWidenIntOrFpInductionRecpipe for \p Phi. If needed, it will also 8169 /// insert a recipe to expand the step for the induction recipe. 8170 static VPWidenIntOrFpInductionRecipe *createWidenInductionRecipes( 8171 PHINode *Phi, Instruction *PhiOrTrunc, VPValue *Start, 8172 const InductionDescriptor &IndDesc, LoopVectorizationCostModel &CM, 8173 VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop, VFRange &Range) { 8174 // Returns true if an instruction \p I should be scalarized instead of 8175 // vectorized for the chosen vectorization factor. 8176 auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) { 8177 return CM.isScalarAfterVectorization(I, VF) || 8178 CM.isProfitableToScalarize(I, VF); 8179 }; 8180 8181 bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange( 8182 [&](ElementCount VF) { 8183 // Returns true if we should generate a scalar version of \p IV. 8184 if (ShouldScalarizeInstruction(PhiOrTrunc, VF)) 8185 return true; 8186 auto isScalarInst = [&](User *U) -> bool { 8187 auto *I = cast<Instruction>(U); 8188 return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF); 8189 }; 8190 return any_of(PhiOrTrunc->users(), isScalarInst); 8191 }, 8192 Range); 8193 bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange( 8194 [&](ElementCount VF) { 8195 return ShouldScalarizeInstruction(PhiOrTrunc, VF); 8196 }, 8197 Range); 8198 assert(IndDesc.getStartValue() == 8199 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader())); 8200 assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) && 8201 "step must be loop invariant"); 8202 8203 VPValue *Step = 8204 vputils::getOrCreateVPValueForSCEVExpr(Plan, IndDesc.getStep(), SE); 8205 if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) { 8206 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, IndDesc, TruncI, 8207 NeedsScalarIV, !NeedsScalarIVOnly); 8208 } 8209 assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here"); 8210 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, IndDesc, 8211 NeedsScalarIV, !NeedsScalarIVOnly); 8212 } 8213 8214 VPRecipeBase *VPRecipeBuilder::tryToOptimizeInductionPHI( 8215 PHINode *Phi, ArrayRef<VPValue *> Operands, VPlan &Plan, VFRange &Range) { 8216 8217 // Check if this is an integer or fp induction. If so, build the recipe that 8218 // produces its scalar and vector values. 8219 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) 8220 return createWidenInductionRecipes(Phi, Phi, Operands[0], *II, CM, Plan, 8221 *PSE.getSE(), *OrigLoop, Range); 8222 8223 // Check if this is pointer induction. If so, build the recipe for it. 8224 if (auto *II = Legal->getPointerInductionDescriptor(Phi)) 8225 return new VPWidenPointerInductionRecipe(Phi, Operands[0], *II, 8226 *PSE.getSE()); 8227 return nullptr; 8228 } 8229 8230 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8231 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, VPlan &Plan) { 8232 // Optimize the special case where the source is a constant integer 8233 // induction variable. Notice that we can only optimize the 'trunc' case 8234 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8235 // (c) other casts depend on pointer size. 8236 8237 // Determine whether \p K is a truncation based on an induction variable that 8238 // can be optimized. 8239 auto isOptimizableIVTruncate = 8240 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8241 return [=](ElementCount VF) -> bool { 8242 return CM.isOptimizableIVTruncate(K, VF); 8243 }; 8244 }; 8245 8246 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8247 isOptimizableIVTruncate(I), Range)) { 8248 8249 auto *Phi = cast<PHINode>(I->getOperand(0)); 8250 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi); 8251 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8252 return createWidenInductionRecipes(Phi, I, Start, II, CM, Plan, 8253 *PSE.getSE(), *OrigLoop, Range); 8254 } 8255 return nullptr; 8256 } 8257 8258 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8259 ArrayRef<VPValue *> Operands, 8260 VPlanPtr &Plan) { 8261 // If all incoming values are equal, the incoming VPValue can be used directly 8262 // instead of creating a new VPBlendRecipe. 8263 VPValue *FirstIncoming = Operands[0]; 8264 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8265 return FirstIncoming == Inc; 8266 })) { 8267 return Operands[0]; 8268 } 8269 8270 unsigned NumIncoming = Phi->getNumIncomingValues(); 8271 // For in-loop reductions, we do not need to create an additional select. 8272 VPValue *InLoopVal = nullptr; 8273 for (unsigned In = 0; In < NumIncoming; In++) { 8274 PHINode *PhiOp = 8275 dyn_cast_or_null<PHINode>(Operands[In]->getUnderlyingValue()); 8276 if (PhiOp && CM.isInLoopReduction(PhiOp)) { 8277 assert(!InLoopVal && "Found more than one in-loop reduction!"); 8278 InLoopVal = Operands[In]; 8279 } 8280 } 8281 8282 assert((!InLoopVal || NumIncoming == 2) && 8283 "Found an in-loop reduction for PHI with unexpected number of " 8284 "incoming values"); 8285 if (InLoopVal) 8286 return Operands[Operands[0] == InLoopVal ? 1 : 0]; 8287 8288 // We know that all PHIs in non-header blocks are converted into selects, so 8289 // we don't have to worry about the insertion order and we can just use the 8290 // builder. At this point we generate the predication tree. There may be 8291 // duplications since this is a simple recursive scan, but future 8292 // optimizations will clean it up. 8293 SmallVector<VPValue *, 2> OperandsWithMask; 8294 8295 for (unsigned In = 0; In < NumIncoming; In++) { 8296 VPValue *EdgeMask = 8297 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8298 assert((EdgeMask || NumIncoming == 1) && 8299 "Multiple predecessors with one having a full mask"); 8300 OperandsWithMask.push_back(Operands[In]); 8301 if (EdgeMask) 8302 OperandsWithMask.push_back(EdgeMask); 8303 } 8304 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8305 } 8306 8307 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8308 ArrayRef<VPValue *> Operands, 8309 VFRange &Range) const { 8310 8311 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8312 [this, CI](ElementCount VF) { 8313 return CM.isScalarWithPredication(CI, VF); 8314 }, 8315 Range); 8316 8317 if (IsPredicated) 8318 return nullptr; 8319 8320 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8321 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8322 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8323 ID == Intrinsic::pseudoprobe || 8324 ID == Intrinsic::experimental_noalias_scope_decl)) 8325 return nullptr; 8326 8327 auto willWiden = [&](ElementCount VF) -> bool { 8328 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8329 // The following case may be scalarized depending on the VF. 8330 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8331 // version of the instruction. 8332 // Is it beneficial to perform intrinsic call compared to lib call? 8333 bool NeedToScalarize = false; 8334 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8335 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8336 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8337 return UseVectorIntrinsic || !NeedToScalarize; 8338 }; 8339 8340 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8341 return nullptr; 8342 8343 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8344 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8345 } 8346 8347 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8348 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8349 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8350 // Instruction should be widened, unless it is scalar after vectorization, 8351 // scalarization is profitable or it is predicated. 8352 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8353 return CM.isScalarAfterVectorization(I, VF) || 8354 CM.isProfitableToScalarize(I, VF) || 8355 CM.isScalarWithPredication(I, VF); 8356 }; 8357 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8358 Range); 8359 } 8360 8361 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8362 ArrayRef<VPValue *> Operands) const { 8363 auto IsVectorizableOpcode = [](unsigned Opcode) { 8364 switch (Opcode) { 8365 case Instruction::Add: 8366 case Instruction::And: 8367 case Instruction::AShr: 8368 case Instruction::BitCast: 8369 case Instruction::FAdd: 8370 case Instruction::FCmp: 8371 case Instruction::FDiv: 8372 case Instruction::FMul: 8373 case Instruction::FNeg: 8374 case Instruction::FPExt: 8375 case Instruction::FPToSI: 8376 case Instruction::FPToUI: 8377 case Instruction::FPTrunc: 8378 case Instruction::FRem: 8379 case Instruction::FSub: 8380 case Instruction::ICmp: 8381 case Instruction::IntToPtr: 8382 case Instruction::LShr: 8383 case Instruction::Mul: 8384 case Instruction::Or: 8385 case Instruction::PtrToInt: 8386 case Instruction::SDiv: 8387 case Instruction::Select: 8388 case Instruction::SExt: 8389 case Instruction::Shl: 8390 case Instruction::SIToFP: 8391 case Instruction::SRem: 8392 case Instruction::Sub: 8393 case Instruction::Trunc: 8394 case Instruction::UDiv: 8395 case Instruction::UIToFP: 8396 case Instruction::URem: 8397 case Instruction::Xor: 8398 case Instruction::ZExt: 8399 case Instruction::Freeze: 8400 return true; 8401 } 8402 return false; 8403 }; 8404 8405 if (!IsVectorizableOpcode(I->getOpcode())) 8406 return nullptr; 8407 8408 // Success: widen this instruction. 8409 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8410 } 8411 8412 void VPRecipeBuilder::fixHeaderPhis() { 8413 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8414 for (VPHeaderPHIRecipe *R : PhisToFix) { 8415 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8416 VPRecipeBase *IncR = 8417 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8418 R->addOperand(IncR->getVPSingleValue()); 8419 } 8420 } 8421 8422 VPBasicBlock *VPRecipeBuilder::handleReplication( 8423 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8424 VPlanPtr &Plan) { 8425 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8426 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8427 Range); 8428 8429 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8430 [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); }, 8431 Range); 8432 8433 // Even if the instruction is not marked as uniform, there are certain 8434 // intrinsic calls that can be effectively treated as such, so we check for 8435 // them here. Conservatively, we only do this for scalable vectors, since 8436 // for fixed-width VFs we can always fall back on full scalarization. 8437 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 8438 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 8439 case Intrinsic::assume: 8440 case Intrinsic::lifetime_start: 8441 case Intrinsic::lifetime_end: 8442 // For scalable vectors if one of the operands is variant then we still 8443 // want to mark as uniform, which will generate one instruction for just 8444 // the first lane of the vector. We can't scalarize the call in the same 8445 // way as for fixed-width vectors because we don't know how many lanes 8446 // there are. 8447 // 8448 // The reasons for doing it this way for scalable vectors are: 8449 // 1. For the assume intrinsic generating the instruction for the first 8450 // lane is still be better than not generating any at all. For 8451 // example, the input may be a splat across all lanes. 8452 // 2. For the lifetime start/end intrinsics the pointer operand only 8453 // does anything useful when the input comes from a stack object, 8454 // which suggests it should always be uniform. For non-stack objects 8455 // the effect is to poison the object, which still allows us to 8456 // remove the call. 8457 IsUniform = true; 8458 break; 8459 default: 8460 break; 8461 } 8462 } 8463 8464 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8465 IsUniform, IsPredicated); 8466 setRecipe(I, Recipe); 8467 Plan->addVPValue(I, Recipe); 8468 8469 // Find if I uses a predicated instruction. If so, it will use its scalar 8470 // value. Avoid hoisting the insert-element which packs the scalar value into 8471 // a vector value, as that happens iff all users use the vector value. 8472 for (VPValue *Op : Recipe->operands()) { 8473 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8474 if (!PredR) 8475 continue; 8476 auto *RepR = 8477 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8478 assert(RepR->isPredicated() && 8479 "expected Replicate recipe to be predicated"); 8480 RepR->setAlsoPack(false); 8481 } 8482 8483 // Finalize the recipe for Instr, first if it is not predicated. 8484 if (!IsPredicated) { 8485 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8486 VPBB->appendRecipe(Recipe); 8487 return VPBB; 8488 } 8489 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8490 8491 VPBlockBase *SingleSucc = VPBB->getSingleSuccessor(); 8492 assert(SingleSucc && "VPBB must have a single successor when handling " 8493 "predicated replication."); 8494 VPBlockUtils::disconnectBlocks(VPBB, SingleSucc); 8495 // Record predicated instructions for above packing optimizations. 8496 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8497 VPBlockUtils::insertBlockAfter(Region, VPBB); 8498 auto *RegSucc = new VPBasicBlock(); 8499 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8500 VPBlockUtils::connectBlocks(RegSucc, SingleSucc); 8501 return RegSucc; 8502 } 8503 8504 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8505 VPRecipeBase *PredRecipe, 8506 VPlanPtr &Plan) { 8507 // Instructions marked for predication are replicated and placed under an 8508 // if-then construct to prevent side-effects. 8509 8510 // Generate recipes to compute the block mask for this region. 8511 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8512 8513 // Build the triangular if-then region. 8514 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8515 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8516 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8517 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8518 auto *PHIRecipe = Instr->getType()->isVoidTy() 8519 ? nullptr 8520 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8521 if (PHIRecipe) { 8522 Plan->removeVPValueFor(Instr); 8523 Plan->addVPValue(Instr, PHIRecipe); 8524 } 8525 auto *Exiting = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8526 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8527 VPRegionBlock *Region = new VPRegionBlock(Entry, Exiting, RegionName, true); 8528 8529 // Note: first set Entry as region entry and then connect successors starting 8530 // from it in order, to propagate the "parent" of each VPBasicBlock. 8531 VPBlockUtils::insertTwoBlocksAfter(Pred, Exiting, BlockInMask, Entry); 8532 VPBlockUtils::connectBlocks(Pred, Exiting); 8533 8534 return Region; 8535 } 8536 8537 VPRecipeOrVPValueTy 8538 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8539 ArrayRef<VPValue *> Operands, 8540 VFRange &Range, VPlanPtr &Plan) { 8541 // First, check for specific widening recipes that deal with inductions, Phi 8542 // nodes, calls and memory operations. 8543 VPRecipeBase *Recipe; 8544 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8545 if (Phi->getParent() != OrigLoop->getHeader()) 8546 return tryToBlend(Phi, Operands, Plan); 8547 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, *Plan, Range))) 8548 return toVPRecipeResult(Recipe); 8549 8550 VPHeaderPHIRecipe *PhiRecipe = nullptr; 8551 assert((Legal->isReductionVariable(Phi) || 8552 Legal->isFirstOrderRecurrence(Phi)) && 8553 "can only widen reductions and first-order recurrences here"); 8554 VPValue *StartV = Operands[0]; 8555 if (Legal->isReductionVariable(Phi)) { 8556 const RecurrenceDescriptor &RdxDesc = 8557 Legal->getReductionVars().find(Phi)->second; 8558 assert(RdxDesc.getRecurrenceStartValue() == 8559 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8560 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 8561 CM.isInLoopReduction(Phi), 8562 CM.useOrderedReductions(RdxDesc)); 8563 } else { 8564 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 8565 } 8566 8567 // Record the incoming value from the backedge, so we can add the incoming 8568 // value from the backedge after all recipes have been created. 8569 recordRecipeOf(cast<Instruction>( 8570 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8571 PhisToFix.push_back(PhiRecipe); 8572 return toVPRecipeResult(PhiRecipe); 8573 } 8574 8575 if (isa<TruncInst>(Instr) && 8576 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8577 Range, *Plan))) 8578 return toVPRecipeResult(Recipe); 8579 8580 // All widen recipes below deal only with VF > 1. 8581 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8582 [&](ElementCount VF) { return VF.isScalar(); }, Range)) 8583 return nullptr; 8584 8585 if (auto *CI = dyn_cast<CallInst>(Instr)) 8586 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8587 8588 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8589 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8590 8591 if (!shouldWiden(Instr, Range)) 8592 return nullptr; 8593 8594 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8595 return toVPRecipeResult(new VPWidenGEPRecipe( 8596 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8597 8598 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8599 bool InvariantCond = 8600 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8601 return toVPRecipeResult(new VPWidenSelectRecipe( 8602 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8603 } 8604 8605 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8606 } 8607 8608 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8609 ElementCount MaxVF) { 8610 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8611 8612 // Collect instructions from the original loop that will become trivially dead 8613 // in the vectorized loop. We don't need to vectorize these instructions. For 8614 // example, original induction update instructions can become dead because we 8615 // separately emit induction "steps" when generating code for the new loop. 8616 // Similarly, we create a new latch condition when setting up the structure 8617 // of the new loop, so the old one can become dead. 8618 SmallPtrSet<Instruction *, 4> DeadInstructions; 8619 collectTriviallyDeadInstructions(DeadInstructions); 8620 8621 // Add assume instructions we need to drop to DeadInstructions, to prevent 8622 // them from being added to the VPlan. 8623 // TODO: We only need to drop assumes in blocks that get flattend. If the 8624 // control flow is preserved, we should keep them. 8625 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8626 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8627 8628 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8629 // Dead instructions do not need sinking. Remove them from SinkAfter. 8630 for (Instruction *I : DeadInstructions) 8631 SinkAfter.erase(I); 8632 8633 // Cannot sink instructions after dead instructions (there won't be any 8634 // recipes for them). Instead, find the first non-dead previous instruction. 8635 for (auto &P : Legal->getSinkAfter()) { 8636 Instruction *SinkTarget = P.second; 8637 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 8638 (void)FirstInst; 8639 while (DeadInstructions.contains(SinkTarget)) { 8640 assert( 8641 SinkTarget != FirstInst && 8642 "Must find a live instruction (at least the one feeding the " 8643 "first-order recurrence PHI) before reaching beginning of the block"); 8644 SinkTarget = SinkTarget->getPrevNode(); 8645 assert(SinkTarget != P.first && 8646 "sink source equals target, no sinking required"); 8647 } 8648 P.second = SinkTarget; 8649 } 8650 8651 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8652 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8653 VFRange SubRange = {VF, MaxVFPlusOne}; 8654 VPlans.push_back( 8655 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8656 VF = SubRange.End; 8657 } 8658 } 8659 8660 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a 8661 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a 8662 // BranchOnCount VPInstruction to the latch. 8663 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL, 8664 bool HasNUW, bool IsVPlanNative) { 8665 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8666 auto *StartV = Plan.getOrAddVPValue(StartIdx); 8667 8668 auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL); 8669 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion(); 8670 VPBasicBlock *Header = TopRegion->getEntryBasicBlock(); 8671 Header->insert(CanonicalIVPHI, Header->begin()); 8672 8673 auto *CanonicalIVIncrement = 8674 new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW 8675 : VPInstruction::CanonicalIVIncrement, 8676 {CanonicalIVPHI}, DL); 8677 CanonicalIVPHI->addOperand(CanonicalIVIncrement); 8678 8679 VPBasicBlock *EB = TopRegion->getExitingBasicBlock(); 8680 if (IsVPlanNative) 8681 EB->setCondBit(nullptr); 8682 EB->appendRecipe(CanonicalIVIncrement); 8683 8684 auto *BranchOnCount = 8685 new VPInstruction(VPInstruction::BranchOnCount, 8686 {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL); 8687 EB->appendRecipe(BranchOnCount); 8688 } 8689 8690 // Add exit values to \p Plan. VPLiveOuts are added for each LCSSA phi in the 8691 // original exit block. 8692 static void addUsersInExitBlock(VPBasicBlock *HeaderVPBB, 8693 VPBasicBlock *MiddleVPBB, Loop *OrigLoop, 8694 VPlan &Plan) { 8695 BasicBlock *ExitBB = OrigLoop->getUniqueExitBlock(); 8696 BasicBlock *ExitingBB = OrigLoop->getExitingBlock(); 8697 // Only handle single-exit loops with unique exit blocks for now. 8698 if (!ExitBB || !ExitBB->getSinglePredecessor() || !ExitingBB) 8699 return; 8700 8701 // Introduce VPUsers modeling the exit values. 8702 for (PHINode &ExitPhi : ExitBB->phis()) { 8703 Value *IncomingValue = 8704 ExitPhi.getIncomingValueForBlock(ExitingBB); 8705 VPValue *V = Plan.getOrAddVPValue(IncomingValue, true); 8706 Plan.addLiveOut(&ExitPhi, V); 8707 } 8708 } 8709 8710 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8711 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8712 const MapVector<Instruction *, Instruction *> &SinkAfter) { 8713 8714 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8715 8716 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8717 8718 // --------------------------------------------------------------------------- 8719 // Pre-construction: record ingredients whose recipes we'll need to further 8720 // process after constructing the initial VPlan. 8721 // --------------------------------------------------------------------------- 8722 8723 // Mark instructions we'll need to sink later and their targets as 8724 // ingredients whose recipe we'll need to record. 8725 for (auto &Entry : SinkAfter) { 8726 RecipeBuilder.recordRecipeOf(Entry.first); 8727 RecipeBuilder.recordRecipeOf(Entry.second); 8728 } 8729 for (auto &Reduction : CM.getInLoopReductionChains()) { 8730 PHINode *Phi = Reduction.first; 8731 RecurKind Kind = 8732 Legal->getReductionVars().find(Phi)->second.getRecurrenceKind(); 8733 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8734 8735 RecipeBuilder.recordRecipeOf(Phi); 8736 for (auto &R : ReductionOperations) { 8737 RecipeBuilder.recordRecipeOf(R); 8738 // For min/max reductions, where we have a pair of icmp/select, we also 8739 // need to record the ICmp recipe, so it can be removed later. 8740 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 8741 "Only min/max recurrences allowed for inloop reductions"); 8742 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8743 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8744 } 8745 } 8746 8747 // For each interleave group which is relevant for this (possibly trimmed) 8748 // Range, add it to the set of groups to be later applied to the VPlan and add 8749 // placeholders for its members' Recipes which we'll be replacing with a 8750 // single VPInterleaveRecipe. 8751 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8752 auto applyIG = [IG, this](ElementCount VF) -> bool { 8753 return (VF.isVector() && // Query is illegal for VF == 1 8754 CM.getWideningDecision(IG->getInsertPos(), VF) == 8755 LoopVectorizationCostModel::CM_Interleave); 8756 }; 8757 if (!getDecisionAndClampRange(applyIG, Range)) 8758 continue; 8759 InterleaveGroups.insert(IG); 8760 for (unsigned i = 0; i < IG->getFactor(); i++) 8761 if (Instruction *Member = IG->getMember(i)) 8762 RecipeBuilder.recordRecipeOf(Member); 8763 }; 8764 8765 // --------------------------------------------------------------------------- 8766 // Build initial VPlan: Scan the body of the loop in a topological order to 8767 // visit each basic block after having visited its predecessor basic blocks. 8768 // --------------------------------------------------------------------------- 8769 8770 // Create initial VPlan skeleton, starting with a block for the pre-header, 8771 // followed by a region for the vector loop, followed by the middle block. The 8772 // skeleton vector loop region contains a header and latch block. 8773 VPBasicBlock *Preheader = new VPBasicBlock("vector.ph"); 8774 auto Plan = std::make_unique<VPlan>(Preheader); 8775 8776 VPBasicBlock *HeaderVPBB = new VPBasicBlock("vector.body"); 8777 VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch"); 8778 VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB); 8779 auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop"); 8780 VPBlockUtils::insertBlockAfter(TopRegion, Preheader); 8781 VPBasicBlock *MiddleVPBB = new VPBasicBlock("middle.block"); 8782 VPBlockUtils::insertBlockAfter(MiddleVPBB, TopRegion); 8783 8784 Instruction *DLInst = 8785 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 8786 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), 8787 DLInst ? DLInst->getDebugLoc() : DebugLoc(), 8788 !CM.foldTailByMasking(), false); 8789 8790 // Scan the body of the loop in a topological order to visit each basic block 8791 // after having visited its predecessor basic blocks. 8792 LoopBlocksDFS DFS(OrigLoop); 8793 DFS.perform(LI); 8794 8795 VPBasicBlock *VPBB = HeaderVPBB; 8796 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 8797 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8798 // Relevant instructions from basic block BB will be grouped into VPRecipe 8799 // ingredients and fill a new VPBasicBlock. 8800 unsigned VPBBsForBB = 0; 8801 if (VPBB != HeaderVPBB) 8802 VPBB->setName(BB->getName()); 8803 Builder.setInsertPoint(VPBB); 8804 8805 // Introduce each ingredient into VPlan. 8806 // TODO: Model and preserve debug intrinsics in VPlan. 8807 for (Instruction &I : BB->instructionsWithoutDebug()) { 8808 Instruction *Instr = &I; 8809 8810 // First filter out irrelevant instructions, to ensure no recipes are 8811 // built for them. 8812 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 8813 continue; 8814 8815 SmallVector<VPValue *, 4> Operands; 8816 auto *Phi = dyn_cast<PHINode>(Instr); 8817 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 8818 Operands.push_back(Plan->getOrAddVPValue( 8819 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 8820 } else { 8821 auto OpRange = Plan->mapToVPValues(Instr->operands()); 8822 Operands = {OpRange.begin(), OpRange.end()}; 8823 } 8824 8825 // Invariant stores inside loop will be deleted and a single store 8826 // with the final reduction value will be added to the exit block 8827 StoreInst *SI; 8828 if ((SI = dyn_cast<StoreInst>(&I)) && 8829 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) 8830 continue; 8831 8832 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 8833 Instr, Operands, Range, Plan)) { 8834 // If Instr can be simplified to an existing VPValue, use it. 8835 if (RecipeOrValue.is<VPValue *>()) { 8836 auto *VPV = RecipeOrValue.get<VPValue *>(); 8837 Plan->addVPValue(Instr, VPV); 8838 // If the re-used value is a recipe, register the recipe for the 8839 // instruction, in case the recipe for Instr needs to be recorded. 8840 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 8841 RecipeBuilder.setRecipe(Instr, R); 8842 continue; 8843 } 8844 // Otherwise, add the new recipe. 8845 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 8846 for (auto *Def : Recipe->definedValues()) { 8847 auto *UV = Def->getUnderlyingValue(); 8848 Plan->addVPValue(UV, Def); 8849 } 8850 8851 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 8852 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 8853 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 8854 // of the header block. That can happen for truncates of induction 8855 // variables. Those recipes are moved to the phi section of the header 8856 // block after applying SinkAfter, which relies on the original 8857 // position of the trunc. 8858 assert(isa<TruncInst>(Instr)); 8859 InductionsToMove.push_back( 8860 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 8861 } 8862 RecipeBuilder.setRecipe(Instr, Recipe); 8863 VPBB->appendRecipe(Recipe); 8864 continue; 8865 } 8866 8867 // Otherwise, if all widening options failed, Instruction is to be 8868 // replicated. This may create a successor for VPBB. 8869 VPBasicBlock *NextVPBB = 8870 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 8871 if (NextVPBB != VPBB) { 8872 VPBB = NextVPBB; 8873 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8874 : ""); 8875 } 8876 } 8877 8878 VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB); 8879 VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor()); 8880 } 8881 8882 HeaderVPBB->setName("vector.body"); 8883 8884 // Fold the last, empty block into its predecessor. 8885 VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB); 8886 assert(VPBB && "expected to fold last (empty) block"); 8887 // After here, VPBB should not be used. 8888 VPBB = nullptr; 8889 8890 addUsersInExitBlock(HeaderVPBB, MiddleVPBB, OrigLoop, *Plan); 8891 8892 assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) && 8893 !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() && 8894 "entry block must be set to a VPRegionBlock having a non-empty entry " 8895 "VPBasicBlock"); 8896 RecipeBuilder.fixHeaderPhis(); 8897 8898 // --------------------------------------------------------------------------- 8899 // Transform initial VPlan: Apply previously taken decisions, in order, to 8900 // bring the VPlan to its final state. 8901 // --------------------------------------------------------------------------- 8902 8903 // Apply Sink-After legal constraints. 8904 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 8905 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 8906 if (Region && Region->isReplicator()) { 8907 assert(Region->getNumSuccessors() == 1 && 8908 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 8909 assert(R->getParent()->size() == 1 && 8910 "A recipe in an original replicator region must be the only " 8911 "recipe in its block"); 8912 return Region; 8913 } 8914 return nullptr; 8915 }; 8916 for (auto &Entry : SinkAfter) { 8917 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 8918 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 8919 8920 auto *TargetRegion = GetReplicateRegion(Target); 8921 auto *SinkRegion = GetReplicateRegion(Sink); 8922 if (!SinkRegion) { 8923 // If the sink source is not a replicate region, sink the recipe directly. 8924 if (TargetRegion) { 8925 // The target is in a replication region, make sure to move Sink to 8926 // the block after it, not into the replication region itself. 8927 VPBasicBlock *NextBlock = 8928 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 8929 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 8930 } else 8931 Sink->moveAfter(Target); 8932 continue; 8933 } 8934 8935 // The sink source is in a replicate region. Unhook the region from the CFG. 8936 auto *SinkPred = SinkRegion->getSinglePredecessor(); 8937 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 8938 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 8939 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 8940 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 8941 8942 if (TargetRegion) { 8943 // The target recipe is also in a replicate region, move the sink region 8944 // after the target region. 8945 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 8946 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 8947 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 8948 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 8949 } else { 8950 // The sink source is in a replicate region, we need to move the whole 8951 // replicate region, which should only contain a single recipe in the 8952 // main block. 8953 auto *SplitBlock = 8954 Target->getParent()->splitAt(std::next(Target->getIterator())); 8955 8956 auto *SplitPred = SplitBlock->getSinglePredecessor(); 8957 8958 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 8959 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 8960 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 8961 } 8962 } 8963 8964 VPlanTransforms::removeRedundantCanonicalIVs(*Plan); 8965 VPlanTransforms::removeRedundantInductionCasts(*Plan); 8966 8967 // Now that sink-after is done, move induction recipes for optimized truncates 8968 // to the phi section of the header block. 8969 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 8970 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 8971 8972 // Adjust the recipes for any inloop reductions. 8973 adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExiting()), Plan, 8974 RecipeBuilder, Range.Start); 8975 8976 // Introduce a recipe to combine the incoming and previous values of a 8977 // first-order recurrence. 8978 for (VPRecipeBase &R : 8979 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) { 8980 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 8981 if (!RecurPhi) 8982 continue; 8983 8984 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 8985 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 8986 auto *Region = GetReplicateRegion(PrevRecipe); 8987 if (Region) 8988 InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor()); 8989 if (Region || PrevRecipe->isPhi()) 8990 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 8991 else 8992 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 8993 8994 auto *RecurSplice = cast<VPInstruction>( 8995 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 8996 {RecurPhi, RecurPhi->getBackedgeValue()})); 8997 8998 RecurPhi->replaceAllUsesWith(RecurSplice); 8999 // Set the first operand of RecurSplice to RecurPhi again, after replacing 9000 // all users. 9001 RecurSplice->setOperand(0, RecurPhi); 9002 } 9003 9004 // Interleave memory: for each Interleave Group we marked earlier as relevant 9005 // for this VPlan, replace the Recipes widening its memory instructions with a 9006 // single VPInterleaveRecipe at its insertion point. 9007 for (auto IG : InterleaveGroups) { 9008 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9009 RecipeBuilder.getRecipe(IG->getInsertPos())); 9010 SmallVector<VPValue *, 4> StoredValues; 9011 for (unsigned i = 0; i < IG->getFactor(); ++i) 9012 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 9013 auto *StoreR = 9014 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 9015 StoredValues.push_back(StoreR->getStoredValue()); 9016 } 9017 9018 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9019 Recipe->getMask()); 9020 VPIG->insertBefore(Recipe); 9021 unsigned J = 0; 9022 for (unsigned i = 0; i < IG->getFactor(); ++i) 9023 if (Instruction *Member = IG->getMember(i)) { 9024 if (!Member->getType()->isVoidTy()) { 9025 VPValue *OriginalV = Plan->getVPValue(Member); 9026 Plan->removeVPValueFor(Member); 9027 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9028 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9029 J++; 9030 } 9031 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9032 } 9033 } 9034 9035 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9036 // in ways that accessing values using original IR values is incorrect. 9037 Plan->disableValue2VPValue(); 9038 9039 VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE()); 9040 VPlanTransforms::sinkScalarOperands(*Plan); 9041 VPlanTransforms::mergeReplicateRegions(*Plan); 9042 VPlanTransforms::removeDeadRecipes(*Plan, *OrigLoop); 9043 VPlanTransforms::removeRedundantExpandSCEVRecipes(*Plan); 9044 9045 std::string PlanName; 9046 raw_string_ostream RSO(PlanName); 9047 ElementCount VF = Range.Start; 9048 Plan->addVF(VF); 9049 RSO << "Initial VPlan for VF={" << VF; 9050 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9051 Plan->addVF(VF); 9052 RSO << "," << VF; 9053 } 9054 RSO << "},UF>=1"; 9055 RSO.flush(); 9056 Plan->setName(PlanName); 9057 9058 // Fold Exit block into its predecessor if possible. 9059 // TODO: Fold block earlier once all VPlan transforms properly maintain a 9060 // VPBasicBlock as exit. 9061 VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExiting()); 9062 9063 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9064 return Plan; 9065 } 9066 9067 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9068 // Outer loop handling: They may require CFG and instruction level 9069 // transformations before even evaluating whether vectorization is profitable. 9070 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9071 // the vectorization pipeline. 9072 assert(!OrigLoop->isInnermost()); 9073 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9074 9075 // Create new empty VPlan 9076 auto Plan = std::make_unique<VPlan>(); 9077 9078 // Build hierarchical CFG 9079 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9080 HCFGBuilder.buildHierarchicalCFG(); 9081 9082 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9083 VF *= 2) 9084 Plan->addVF(VF); 9085 9086 SmallPtrSet<Instruction *, 1> DeadInstructions; 9087 VPlanTransforms::VPInstructionsToVPRecipes( 9088 OrigLoop, Plan, 9089 [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); }, 9090 DeadInstructions, *PSE.getSE()); 9091 9092 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(), 9093 true, true); 9094 return Plan; 9095 } 9096 9097 // Adjust the recipes for reductions. For in-loop reductions the chain of 9098 // instructions leading from the loop exit instr to the phi need to be converted 9099 // to reductions, with one operand being vector and the other being the scalar 9100 // reduction chain. For other reductions, a select is introduced between the phi 9101 // and live-out recipes when folding the tail. 9102 void LoopVectorizationPlanner::adjustRecipesForReductions( 9103 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9104 ElementCount MinVF) { 9105 for (auto &Reduction : CM.getInLoopReductionChains()) { 9106 PHINode *Phi = Reduction.first; 9107 const RecurrenceDescriptor &RdxDesc = 9108 Legal->getReductionVars().find(Phi)->second; 9109 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9110 9111 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9112 continue; 9113 9114 // ReductionOperations are orders top-down from the phi's use to the 9115 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9116 // which of the two operands will remain scalar and which will be reduced. 9117 // For minmax the chain will be the select instructions. 9118 Instruction *Chain = Phi; 9119 for (Instruction *R : ReductionOperations) { 9120 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9121 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9122 9123 VPValue *ChainOp = Plan->getVPValue(Chain); 9124 unsigned FirstOpId; 9125 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9126 "Only min/max recurrences allowed for inloop reductions"); 9127 // Recognize a call to the llvm.fmuladd intrinsic. 9128 bool IsFMulAdd = (Kind == RecurKind::FMulAdd); 9129 assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) && 9130 "Expected instruction to be a call to the llvm.fmuladd intrinsic"); 9131 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9132 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9133 "Expected to replace a VPWidenSelectSC"); 9134 FirstOpId = 1; 9135 } else { 9136 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) || 9137 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) && 9138 "Expected to replace a VPWidenSC"); 9139 FirstOpId = 0; 9140 } 9141 unsigned VecOpId = 9142 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9143 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9144 9145 auto *CondOp = CM.blockNeedsPredicationForAnyReason(R->getParent()) 9146 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9147 : nullptr; 9148 9149 if (IsFMulAdd) { 9150 // If the instruction is a call to the llvm.fmuladd intrinsic then we 9151 // need to create an fmul recipe to use as the vector operand for the 9152 // fadd reduction. 9153 VPInstruction *FMulRecipe = new VPInstruction( 9154 Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))}); 9155 FMulRecipe->setFastMathFlags(R->getFastMathFlags()); 9156 WidenRecipe->getParent()->insert(FMulRecipe, 9157 WidenRecipe->getIterator()); 9158 VecOp = FMulRecipe; 9159 } 9160 VPReductionRecipe *RedRecipe = 9161 new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9162 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9163 Plan->removeVPValueFor(R); 9164 Plan->addVPValue(R, RedRecipe); 9165 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9166 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9167 WidenRecipe->eraseFromParent(); 9168 9169 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9170 VPRecipeBase *CompareRecipe = 9171 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9172 assert(isa<VPWidenRecipe>(CompareRecipe) && 9173 "Expected to replace a VPWidenSC"); 9174 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9175 "Expected no remaining users"); 9176 CompareRecipe->eraseFromParent(); 9177 } 9178 Chain = R; 9179 } 9180 } 9181 9182 // If tail is folded by masking, introduce selects between the phi 9183 // and the live-out instruction of each reduction, at the beginning of the 9184 // dedicated latch block. 9185 if (CM.foldTailByMasking()) { 9186 Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin()); 9187 for (VPRecipeBase &R : 9188 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) { 9189 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9190 if (!PhiR || PhiR->isInLoop()) 9191 continue; 9192 VPValue *Cond = 9193 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9194 VPValue *Red = PhiR->getBackedgeValue(); 9195 assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB && 9196 "reduction recipe must be defined before latch"); 9197 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9198 } 9199 } 9200 } 9201 9202 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9203 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9204 VPSlotTracker &SlotTracker) const { 9205 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9206 IG->getInsertPos()->printAsOperand(O, false); 9207 O << ", "; 9208 getAddr()->printAsOperand(O, SlotTracker); 9209 VPValue *Mask = getMask(); 9210 if (Mask) { 9211 O << ", "; 9212 Mask->printAsOperand(O, SlotTracker); 9213 } 9214 9215 unsigned OpIdx = 0; 9216 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9217 if (!IG->getMember(i)) 9218 continue; 9219 if (getNumStoreOperands() > 0) { 9220 O << "\n" << Indent << " store "; 9221 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9222 O << " to index " << i; 9223 } else { 9224 O << "\n" << Indent << " "; 9225 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9226 O << " = load from index " << i; 9227 } 9228 ++OpIdx; 9229 } 9230 } 9231 #endif 9232 9233 void VPWidenCallRecipe::execute(VPTransformState &State) { 9234 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9235 *this, State); 9236 } 9237 9238 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9239 auto &I = *cast<SelectInst>(getUnderlyingInstr()); 9240 State.ILV->setDebugLocFromInst(&I); 9241 9242 // The condition can be loop invariant but still defined inside the 9243 // loop. This means that we can't just use the original 'cond' value. 9244 // We have to take the 'vectorized' value and pick the first lane. 9245 // Instcombine will make this a no-op. 9246 auto *InvarCond = 9247 InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr; 9248 9249 for (unsigned Part = 0; Part < State.UF; ++Part) { 9250 Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part); 9251 Value *Op0 = State.get(getOperand(1), Part); 9252 Value *Op1 = State.get(getOperand(2), Part); 9253 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1); 9254 State.set(this, Sel, Part); 9255 State.ILV->addMetadata(Sel, &I); 9256 } 9257 } 9258 9259 void VPWidenRecipe::execute(VPTransformState &State) { 9260 auto &I = *cast<Instruction>(getUnderlyingValue()); 9261 auto &Builder = State.Builder; 9262 switch (I.getOpcode()) { 9263 case Instruction::Call: 9264 case Instruction::Br: 9265 case Instruction::PHI: 9266 case Instruction::GetElementPtr: 9267 case Instruction::Select: 9268 llvm_unreachable("This instruction is handled by a different recipe."); 9269 case Instruction::UDiv: 9270 case Instruction::SDiv: 9271 case Instruction::SRem: 9272 case Instruction::URem: 9273 case Instruction::Add: 9274 case Instruction::FAdd: 9275 case Instruction::Sub: 9276 case Instruction::FSub: 9277 case Instruction::FNeg: 9278 case Instruction::Mul: 9279 case Instruction::FMul: 9280 case Instruction::FDiv: 9281 case Instruction::FRem: 9282 case Instruction::Shl: 9283 case Instruction::LShr: 9284 case Instruction::AShr: 9285 case Instruction::And: 9286 case Instruction::Or: 9287 case Instruction::Xor: { 9288 // Just widen unops and binops. 9289 State.ILV->setDebugLocFromInst(&I); 9290 9291 for (unsigned Part = 0; Part < State.UF; ++Part) { 9292 SmallVector<Value *, 2> Ops; 9293 for (VPValue *VPOp : operands()) 9294 Ops.push_back(State.get(VPOp, Part)); 9295 9296 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 9297 9298 if (auto *VecOp = dyn_cast<Instruction>(V)) { 9299 VecOp->copyIRFlags(&I); 9300 9301 // If the instruction is vectorized and was in a basic block that needed 9302 // predication, we can't propagate poison-generating flags (nuw/nsw, 9303 // exact, etc.). The control flow has been linearized and the 9304 // instruction is no longer guarded by the predicate, which could make 9305 // the flag properties to no longer hold. 9306 if (State.MayGeneratePoisonRecipes.contains(this)) 9307 VecOp->dropPoisonGeneratingFlags(); 9308 } 9309 9310 // Use this vector value for all users of the original instruction. 9311 State.set(this, V, Part); 9312 State.ILV->addMetadata(V, &I); 9313 } 9314 9315 break; 9316 } 9317 case Instruction::Freeze: { 9318 State.ILV->setDebugLocFromInst(&I); 9319 9320 for (unsigned Part = 0; Part < State.UF; ++Part) { 9321 Value *Op = State.get(getOperand(0), Part); 9322 9323 Value *Freeze = Builder.CreateFreeze(Op); 9324 State.set(this, Freeze, Part); 9325 } 9326 break; 9327 } 9328 case Instruction::ICmp: 9329 case Instruction::FCmp: { 9330 // Widen compares. Generate vector compares. 9331 bool FCmp = (I.getOpcode() == Instruction::FCmp); 9332 auto *Cmp = cast<CmpInst>(&I); 9333 State.ILV->setDebugLocFromInst(Cmp); 9334 for (unsigned Part = 0; Part < State.UF; ++Part) { 9335 Value *A = State.get(getOperand(0), Part); 9336 Value *B = State.get(getOperand(1), Part); 9337 Value *C = nullptr; 9338 if (FCmp) { 9339 // Propagate fast math flags. 9340 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9341 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 9342 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 9343 } else { 9344 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 9345 } 9346 State.set(this, C, Part); 9347 State.ILV->addMetadata(C, &I); 9348 } 9349 9350 break; 9351 } 9352 9353 case Instruction::ZExt: 9354 case Instruction::SExt: 9355 case Instruction::FPToUI: 9356 case Instruction::FPToSI: 9357 case Instruction::FPExt: 9358 case Instruction::PtrToInt: 9359 case Instruction::IntToPtr: 9360 case Instruction::SIToFP: 9361 case Instruction::UIToFP: 9362 case Instruction::Trunc: 9363 case Instruction::FPTrunc: 9364 case Instruction::BitCast: { 9365 auto *CI = cast<CastInst>(&I); 9366 State.ILV->setDebugLocFromInst(CI); 9367 9368 /// Vectorize casts. 9369 Type *DestTy = (State.VF.isScalar()) 9370 ? CI->getType() 9371 : VectorType::get(CI->getType(), State.VF); 9372 9373 for (unsigned Part = 0; Part < State.UF; ++Part) { 9374 Value *A = State.get(getOperand(0), Part); 9375 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 9376 State.set(this, Cast, Part); 9377 State.ILV->addMetadata(Cast, &I); 9378 } 9379 break; 9380 } 9381 default: 9382 // This instruction is not vectorized by simple widening. 9383 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 9384 llvm_unreachable("Unhandled instruction!"); 9385 } // end of switch. 9386 } 9387 9388 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9389 auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr()); 9390 // Construct a vector GEP by widening the operands of the scalar GEP as 9391 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 9392 // results in a vector of pointers when at least one operand of the GEP 9393 // is vector-typed. Thus, to keep the representation compact, we only use 9394 // vector-typed operands for loop-varying values. 9395 9396 if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 9397 // If we are vectorizing, but the GEP has only loop-invariant operands, 9398 // the GEP we build (by only using vector-typed operands for 9399 // loop-varying values) would be a scalar pointer. Thus, to ensure we 9400 // produce a vector of pointers, we need to either arbitrarily pick an 9401 // operand to broadcast, or broadcast a clone of the original GEP. 9402 // Here, we broadcast a clone of the original. 9403 // 9404 // TODO: If at some point we decide to scalarize instructions having 9405 // loop-invariant operands, this special case will no longer be 9406 // required. We would add the scalarization decision to 9407 // collectLoopScalars() and teach getVectorValue() to broadcast 9408 // the lane-zero scalar value. 9409 auto *Clone = State.Builder.Insert(GEP->clone()); 9410 for (unsigned Part = 0; Part < State.UF; ++Part) { 9411 Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone); 9412 State.set(this, EntryPart, Part); 9413 State.ILV->addMetadata(EntryPart, GEP); 9414 } 9415 } else { 9416 // If the GEP has at least one loop-varying operand, we are sure to 9417 // produce a vector of pointers. But if we are only unrolling, we want 9418 // to produce a scalar GEP for each unroll part. Thus, the GEP we 9419 // produce with the code below will be scalar (if VF == 1) or vector 9420 // (otherwise). Note that for the unroll-only case, we still maintain 9421 // values in the vector mapping with initVector, as we do for other 9422 // instructions. 9423 for (unsigned Part = 0; Part < State.UF; ++Part) { 9424 // The pointer operand of the new GEP. If it's loop-invariant, we 9425 // won't broadcast it. 9426 auto *Ptr = IsPtrLoopInvariant 9427 ? State.get(getOperand(0), VPIteration(0, 0)) 9428 : State.get(getOperand(0), Part); 9429 9430 // Collect all the indices for the new GEP. If any index is 9431 // loop-invariant, we won't broadcast it. 9432 SmallVector<Value *, 4> Indices; 9433 for (unsigned I = 1, E = getNumOperands(); I < E; I++) { 9434 VPValue *Operand = getOperand(I); 9435 if (IsIndexLoopInvariant[I - 1]) 9436 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 9437 else 9438 Indices.push_back(State.get(Operand, Part)); 9439 } 9440 9441 // If the GEP instruction is vectorized and was in a basic block that 9442 // needed predication, we can't propagate the poison-generating 'inbounds' 9443 // flag. The control flow has been linearized and the GEP is no longer 9444 // guarded by the predicate, which could make the 'inbounds' properties to 9445 // no longer hold. 9446 bool IsInBounds = 9447 GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0; 9448 9449 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 9450 // but it should be a vector, otherwise. 9451 auto *NewGEP = State.Builder.CreateGEP(GEP->getSourceElementType(), Ptr, 9452 Indices, "", IsInBounds); 9453 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) && 9454 "NewGEP is not a pointer vector"); 9455 State.set(this, NewGEP, Part); 9456 State.ILV->addMetadata(NewGEP, GEP); 9457 } 9458 } 9459 } 9460 9461 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9462 assert(!State.Instance && "Int or FP induction being replicated."); 9463 9464 Value *Start = getStartValue()->getLiveInIRValue(); 9465 const InductionDescriptor &ID = getInductionDescriptor(); 9466 TruncInst *Trunc = getTruncInst(); 9467 IRBuilderBase &Builder = State.Builder; 9468 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 9469 assert(State.VF.isVector() && "must have vector VF"); 9470 9471 // The value from the original loop to which we are mapping the new induction 9472 // variable. 9473 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 9474 9475 // Fast-math-flags propagate from the original induction instruction. 9476 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9477 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 9478 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 9479 9480 // Now do the actual transformations, and start with fetching the step value. 9481 Value *Step = State.get(getStepValue(), VPIteration(0, 0)); 9482 9483 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 9484 "Expected either an induction phi-node or a truncate of it!"); 9485 9486 // Construct the initial value of the vector IV in the vector loop preheader 9487 auto CurrIP = Builder.saveIP(); 9488 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); 9489 Builder.SetInsertPoint(VectorPH->getTerminator()); 9490 if (isa<TruncInst>(EntryVal)) { 9491 assert(Start->getType()->isIntegerTy() && 9492 "Truncation requires an integer type"); 9493 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 9494 Step = Builder.CreateTrunc(Step, TruncType); 9495 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 9496 } 9497 9498 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 9499 Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start); 9500 Value *SteppedStart = getStepVector( 9501 SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder); 9502 9503 // We create vector phi nodes for both integer and floating-point induction 9504 // variables. Here, we determine the kind of arithmetic we will perform. 9505 Instruction::BinaryOps AddOp; 9506 Instruction::BinaryOps MulOp; 9507 if (Step->getType()->isIntegerTy()) { 9508 AddOp = Instruction::Add; 9509 MulOp = Instruction::Mul; 9510 } else { 9511 AddOp = ID.getInductionOpcode(); 9512 MulOp = Instruction::FMul; 9513 } 9514 9515 // Multiply the vectorization factor by the step using integer or 9516 // floating-point arithmetic as appropriate. 9517 Type *StepType = Step->getType(); 9518 Value *RuntimeVF; 9519 if (Step->getType()->isFloatingPointTy()) 9520 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF); 9521 else 9522 RuntimeVF = getRuntimeVF(Builder, StepType, State.VF); 9523 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 9524 9525 // Create a vector splat to use in the induction update. 9526 // 9527 // FIXME: If the step is non-constant, we create the vector splat with 9528 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 9529 // handle a constant vector splat. 9530 Value *SplatVF = isa<Constant>(Mul) 9531 ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul)) 9532 : Builder.CreateVectorSplat(State.VF, Mul); 9533 Builder.restoreIP(CurrIP); 9534 9535 // We may need to add the step a number of times, depending on the unroll 9536 // factor. The last of those goes into the PHI. 9537 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 9538 &*State.CFG.PrevBB->getFirstInsertionPt()); 9539 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 9540 Instruction *LastInduction = VecInd; 9541 for (unsigned Part = 0; Part < State.UF; ++Part) { 9542 State.set(this, LastInduction, Part); 9543 9544 if (isa<TruncInst>(EntryVal)) 9545 State.ILV->addMetadata(LastInduction, EntryVal); 9546 9547 LastInduction = cast<Instruction>( 9548 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 9549 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 9550 } 9551 9552 LastInduction->setName("vec.ind.next"); 9553 VecInd->addIncoming(SteppedStart, VectorPH); 9554 // Add induction update using an incorrect block temporarily. The phi node 9555 // will be fixed after VPlan execution. Note that at this point the latch 9556 // block cannot be used, as it does not exist yet. 9557 // TODO: Model increment value in VPlan, by turning the recipe into a 9558 // multi-def and a subclass of VPHeaderPHIRecipe. 9559 VecInd->addIncoming(LastInduction, VectorPH); 9560 } 9561 9562 void VPWidenPointerInductionRecipe::execute(VPTransformState &State) { 9563 assert(IndDesc.getKind() == InductionDescriptor::IK_PtrInduction && 9564 "Not a pointer induction according to InductionDescriptor!"); 9565 assert(cast<PHINode>(getUnderlyingInstr())->getType()->isPointerTy() && 9566 "Unexpected type."); 9567 9568 auto *IVR = getParent()->getPlan()->getCanonicalIV(); 9569 PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0)); 9570 9571 if (onlyScalarsGenerated(State.VF)) { 9572 // This is the normalized GEP that starts counting at zero. 9573 Value *PtrInd = State.Builder.CreateSExtOrTrunc( 9574 CanonicalIV, IndDesc.getStep()->getType()); 9575 // Determine the number of scalars we need to generate for each unroll 9576 // iteration. If the instruction is uniform, we only need to generate the 9577 // first lane. Otherwise, we generate all VF values. 9578 bool IsUniform = vputils::onlyFirstLaneUsed(this); 9579 assert((IsUniform || !State.VF.isScalable()) && 9580 "Cannot scalarize a scalable VF"); 9581 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 9582 9583 for (unsigned Part = 0; Part < State.UF; ++Part) { 9584 Value *PartStart = 9585 createStepForVF(State.Builder, PtrInd->getType(), State.VF, Part); 9586 9587 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 9588 Value *Idx = State.Builder.CreateAdd( 9589 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 9590 Value *GlobalIdx = State.Builder.CreateAdd(PtrInd, Idx); 9591 9592 Value *Step = CreateStepValue(IndDesc.getStep(), SE, 9593 State.CFG.PrevBB->getTerminator()); 9594 Value *SclrGep = emitTransformedIndex( 9595 State.Builder, GlobalIdx, IndDesc.getStartValue(), Step, IndDesc); 9596 SclrGep->setName("next.gep"); 9597 State.set(this, SclrGep, VPIteration(Part, Lane)); 9598 } 9599 } 9600 return; 9601 } 9602 9603 assert(isa<SCEVConstant>(IndDesc.getStep()) && 9604 "Induction step not a SCEV constant!"); 9605 Type *PhiType = IndDesc.getStep()->getType(); 9606 9607 // Build a pointer phi 9608 Value *ScalarStartValue = getStartValue()->getLiveInIRValue(); 9609 Type *ScStValueType = ScalarStartValue->getType(); 9610 PHINode *NewPointerPhi = 9611 PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV); 9612 9613 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); 9614 NewPointerPhi->addIncoming(ScalarStartValue, VectorPH); 9615 9616 // A pointer induction, performed by using a gep 9617 const DataLayout &DL = NewPointerPhi->getModule()->getDataLayout(); 9618 Instruction *InductionLoc = &*State.Builder.GetInsertPoint(); 9619 9620 const SCEV *ScalarStep = IndDesc.getStep(); 9621 SCEVExpander Exp(SE, DL, "induction"); 9622 Value *ScalarStepValue = Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 9623 Value *RuntimeVF = getRuntimeVF(State.Builder, PhiType, State.VF); 9624 Value *NumUnrolledElems = 9625 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 9626 Value *InductionGEP = GetElementPtrInst::Create( 9627 IndDesc.getElementType(), NewPointerPhi, 9628 State.Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 9629 InductionLoc); 9630 // Add induction update using an incorrect block temporarily. The phi node 9631 // will be fixed after VPlan execution. Note that at this point the latch 9632 // block cannot be used, as it does not exist yet. 9633 // TODO: Model increment value in VPlan, by turning the recipe into a 9634 // multi-def and a subclass of VPHeaderPHIRecipe. 9635 NewPointerPhi->addIncoming(InductionGEP, VectorPH); 9636 9637 // Create UF many actual address geps that use the pointer 9638 // phi as base and a vectorized version of the step value 9639 // (<step*0, ..., step*N>) as offset. 9640 for (unsigned Part = 0; Part < State.UF; ++Part) { 9641 Type *VecPhiType = VectorType::get(PhiType, State.VF); 9642 Value *StartOffsetScalar = 9643 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 9644 Value *StartOffset = 9645 State.Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 9646 // Create a vector of consecutive numbers from zero to VF. 9647 StartOffset = State.Builder.CreateAdd( 9648 StartOffset, State.Builder.CreateStepVector(VecPhiType)); 9649 9650 Value *GEP = State.Builder.CreateGEP( 9651 IndDesc.getElementType(), NewPointerPhi, 9652 State.Builder.CreateMul( 9653 StartOffset, 9654 State.Builder.CreateVectorSplat(State.VF, ScalarStepValue), 9655 "vector.gep")); 9656 State.set(this, GEP, Part); 9657 } 9658 } 9659 9660 void VPScalarIVStepsRecipe::execute(VPTransformState &State) { 9661 assert(!State.Instance && "VPScalarIVStepsRecipe being replicated."); 9662 9663 // Fast-math-flags propagate from the original induction instruction. 9664 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder); 9665 if (IndDesc.getInductionBinOp() && 9666 isa<FPMathOperator>(IndDesc.getInductionBinOp())) 9667 State.Builder.setFastMathFlags( 9668 IndDesc.getInductionBinOp()->getFastMathFlags()); 9669 9670 Value *Step = State.get(getStepValue(), VPIteration(0, 0)); 9671 auto CreateScalarIV = [&](Value *&Step) -> Value * { 9672 Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0)); 9673 auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0); 9674 if (!isCanonical() || CanonicalIV->getType() != Ty) { 9675 ScalarIV = 9676 Ty->isIntegerTy() 9677 ? State.Builder.CreateSExtOrTrunc(ScalarIV, Ty) 9678 : State.Builder.CreateCast(Instruction::SIToFP, ScalarIV, Ty); 9679 ScalarIV = emitTransformedIndex(State.Builder, ScalarIV, 9680 getStartValue()->getLiveInIRValue(), Step, 9681 IndDesc); 9682 ScalarIV->setName("offset.idx"); 9683 } 9684 if (TruncToTy) { 9685 assert(Step->getType()->isIntegerTy() && 9686 "Truncation requires an integer step"); 9687 ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncToTy); 9688 Step = State.Builder.CreateTrunc(Step, TruncToTy); 9689 } 9690 return ScalarIV; 9691 }; 9692 9693 Value *ScalarIV = CreateScalarIV(Step); 9694 if (State.VF.isVector()) { 9695 buildScalarSteps(ScalarIV, Step, IndDesc, this, State); 9696 return; 9697 } 9698 9699 for (unsigned Part = 0; Part < State.UF; ++Part) { 9700 assert(!State.VF.isScalable() && "scalable vectors not yet supported."); 9701 Value *EntryPart; 9702 if (Step->getType()->isFloatingPointTy()) { 9703 Value *StartIdx = 9704 getRuntimeVFAsFloat(State.Builder, Step->getType(), State.VF * Part); 9705 // Floating-point operations inherit FMF via the builder's flags. 9706 Value *MulOp = State.Builder.CreateFMul(StartIdx, Step); 9707 EntryPart = State.Builder.CreateBinOp(IndDesc.getInductionOpcode(), 9708 ScalarIV, MulOp); 9709 } else { 9710 Value *StartIdx = 9711 getRuntimeVF(State.Builder, Step->getType(), State.VF * Part); 9712 EntryPart = State.Builder.CreateAdd( 9713 ScalarIV, State.Builder.CreateMul(StartIdx, Step), "induction"); 9714 } 9715 State.set(this, EntryPart, Part); 9716 } 9717 } 9718 9719 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9720 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9721 State); 9722 } 9723 9724 void VPBlendRecipe::execute(VPTransformState &State) { 9725 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9726 // We know that all PHIs in non-header blocks are converted into 9727 // selects, so we don't have to worry about the insertion order and we 9728 // can just use the builder. 9729 // At this point we generate the predication tree. There may be 9730 // duplications since this is a simple recursive scan, but future 9731 // optimizations will clean it up. 9732 9733 unsigned NumIncoming = getNumIncomingValues(); 9734 9735 // Generate a sequence of selects of the form: 9736 // SELECT(Mask3, In3, 9737 // SELECT(Mask2, In2, 9738 // SELECT(Mask1, In1, 9739 // In0))) 9740 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9741 // are essentially undef are taken from In0. 9742 InnerLoopVectorizer::VectorParts Entry(State.UF); 9743 for (unsigned In = 0; In < NumIncoming; ++In) { 9744 for (unsigned Part = 0; Part < State.UF; ++Part) { 9745 // We might have single edge PHIs (blocks) - use an identity 9746 // 'select' for the first PHI operand. 9747 Value *In0 = State.get(getIncomingValue(In), Part); 9748 if (In == 0) 9749 Entry[Part] = In0; // Initialize with the first incoming value. 9750 else { 9751 // Select between the current value and the previous incoming edge 9752 // based on the incoming mask. 9753 Value *Cond = State.get(getMask(In), Part); 9754 Entry[Part] = 9755 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9756 } 9757 } 9758 } 9759 for (unsigned Part = 0; Part < State.UF; ++Part) 9760 State.set(this, Entry[Part], Part); 9761 } 9762 9763 void VPInterleaveRecipe::execute(VPTransformState &State) { 9764 assert(!State.Instance && "Interleave group being replicated."); 9765 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9766 getStoredValues(), getMask()); 9767 } 9768 9769 void VPReductionRecipe::execute(VPTransformState &State) { 9770 assert(!State.Instance && "Reduction being replicated."); 9771 Value *PrevInChain = State.get(getChainOp(), 0); 9772 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9773 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9774 // Propagate the fast-math flags carried by the underlying instruction. 9775 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9776 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9777 for (unsigned Part = 0; Part < State.UF; ++Part) { 9778 Value *NewVecOp = State.get(getVecOp(), Part); 9779 if (VPValue *Cond = getCondOp()) { 9780 Value *NewCond = State.get(Cond, Part); 9781 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9782 Value *Iden = RdxDesc->getRecurrenceIdentity( 9783 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9784 Value *IdenVec = 9785 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9786 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9787 NewVecOp = Select; 9788 } 9789 Value *NewRed; 9790 Value *NextInChain; 9791 if (IsOrdered) { 9792 if (State.VF.isVector()) 9793 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9794 PrevInChain); 9795 else 9796 NewRed = State.Builder.CreateBinOp( 9797 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9798 NewVecOp); 9799 PrevInChain = NewRed; 9800 } else { 9801 PrevInChain = State.get(getChainOp(), Part); 9802 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9803 } 9804 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9805 NextInChain = 9806 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9807 NewRed, PrevInChain); 9808 } else if (IsOrdered) 9809 NextInChain = NewRed; 9810 else 9811 NextInChain = State.Builder.CreateBinOp( 9812 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9813 PrevInChain); 9814 State.set(this, NextInChain, Part); 9815 } 9816 } 9817 9818 void VPReplicateRecipe::execute(VPTransformState &State) { 9819 if (State.Instance) { // Generate a single instance. 9820 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9821 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance, 9822 IsPredicated, State); 9823 // Insert scalar instance packing it into a vector. 9824 if (AlsoPack && State.VF.isVector()) { 9825 // If we're constructing lane 0, initialize to start from poison. 9826 if (State.Instance->Lane.isFirstLane()) { 9827 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9828 Value *Poison = PoisonValue::get( 9829 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9830 State.set(this, Poison, State.Instance->Part); 9831 } 9832 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9833 } 9834 return; 9835 } 9836 9837 // Generate scalar instances for all VF lanes of all UF parts, unless the 9838 // instruction is uniform inwhich case generate only the first lane for each 9839 // of the UF parts. 9840 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9841 assert((!State.VF.isScalable() || IsUniform) && 9842 "Can't scalarize a scalable vector"); 9843 for (unsigned Part = 0; Part < State.UF; ++Part) 9844 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9845 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 9846 VPIteration(Part, Lane), IsPredicated, 9847 State); 9848 } 9849 9850 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9851 assert(State.Instance && "Branch on Mask works only on single instance."); 9852 9853 unsigned Part = State.Instance->Part; 9854 unsigned Lane = State.Instance->Lane.getKnownLane(); 9855 9856 Value *ConditionBit = nullptr; 9857 VPValue *BlockInMask = getMask(); 9858 if (BlockInMask) { 9859 ConditionBit = State.get(BlockInMask, Part); 9860 if (ConditionBit->getType()->isVectorTy()) 9861 ConditionBit = State.Builder.CreateExtractElement( 9862 ConditionBit, State.Builder.getInt32(Lane)); 9863 } else // Block in mask is all-one. 9864 ConditionBit = State.Builder.getTrue(); 9865 9866 // Replace the temporary unreachable terminator with a new conditional branch, 9867 // whose two destinations will be set later when they are created. 9868 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9869 assert(isa<UnreachableInst>(CurrentTerminator) && 9870 "Expected to replace unreachable terminator with conditional branch."); 9871 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9872 CondBr->setSuccessor(0, nullptr); 9873 ReplaceInstWithInst(CurrentTerminator, CondBr); 9874 } 9875 9876 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9877 assert(State.Instance && "Predicated instruction PHI works per instance."); 9878 Instruction *ScalarPredInst = 9879 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9880 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9881 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9882 assert(PredicatingBB && "Predicated block has no single predecessor."); 9883 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9884 "operand must be VPReplicateRecipe"); 9885 9886 // By current pack/unpack logic we need to generate only a single phi node: if 9887 // a vector value for the predicated instruction exists at this point it means 9888 // the instruction has vector users only, and a phi for the vector value is 9889 // needed. In this case the recipe of the predicated instruction is marked to 9890 // also do that packing, thereby "hoisting" the insert-element sequence. 9891 // Otherwise, a phi node for the scalar value is needed. 9892 unsigned Part = State.Instance->Part; 9893 if (State.hasVectorValue(getOperand(0), Part)) { 9894 Value *VectorValue = State.get(getOperand(0), Part); 9895 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9896 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9897 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9898 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9899 if (State.hasVectorValue(this, Part)) 9900 State.reset(this, VPhi, Part); 9901 else 9902 State.set(this, VPhi, Part); 9903 // NOTE: Currently we need to update the value of the operand, so the next 9904 // predicated iteration inserts its generated value in the correct vector. 9905 State.reset(getOperand(0), VPhi, Part); 9906 } else { 9907 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9908 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9909 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9910 PredicatingBB); 9911 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9912 if (State.hasScalarValue(this, *State.Instance)) 9913 State.reset(this, Phi, *State.Instance); 9914 else 9915 State.set(this, Phi, *State.Instance); 9916 // NOTE: Currently we need to update the value of the operand, so the next 9917 // predicated iteration inserts its generated value in the correct vector. 9918 State.reset(getOperand(0), Phi, *State.Instance); 9919 } 9920 } 9921 9922 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9923 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9924 9925 // Attempt to issue a wide load. 9926 LoadInst *LI = dyn_cast<LoadInst>(&Ingredient); 9927 StoreInst *SI = dyn_cast<StoreInst>(&Ingredient); 9928 9929 assert((LI || SI) && "Invalid Load/Store instruction"); 9930 assert((!SI || StoredValue) && "No stored value provided for widened store"); 9931 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 9932 9933 Type *ScalarDataTy = getLoadStoreType(&Ingredient); 9934 9935 auto *DataTy = VectorType::get(ScalarDataTy, State.VF); 9936 const Align Alignment = getLoadStoreAlignment(&Ingredient); 9937 bool CreateGatherScatter = !Consecutive; 9938 9939 auto &Builder = State.Builder; 9940 InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF); 9941 bool isMaskRequired = getMask(); 9942 if (isMaskRequired) 9943 for (unsigned Part = 0; Part < State.UF; ++Part) 9944 BlockInMaskParts[Part] = State.get(getMask(), Part); 9945 9946 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 9947 // Calculate the pointer for the specific unroll-part. 9948 GetElementPtrInst *PartPtr = nullptr; 9949 9950 bool InBounds = false; 9951 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 9952 InBounds = gep->isInBounds(); 9953 if (Reverse) { 9954 // If the address is consecutive but reversed, then the 9955 // wide store needs to start at the last vector element. 9956 // RunTimeVF = VScale * VF.getKnownMinValue() 9957 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 9958 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF); 9959 // NumElt = -Part * RunTimeVF 9960 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 9961 // LastLane = 1 - RunTimeVF 9962 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 9963 PartPtr = 9964 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 9965 PartPtr->setIsInBounds(InBounds); 9966 PartPtr = cast<GetElementPtrInst>( 9967 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 9968 PartPtr->setIsInBounds(InBounds); 9969 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 9970 BlockInMaskParts[Part] = 9971 Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse"); 9972 } else { 9973 Value *Increment = 9974 createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part); 9975 PartPtr = cast<GetElementPtrInst>( 9976 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 9977 PartPtr->setIsInBounds(InBounds); 9978 } 9979 9980 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 9981 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 9982 }; 9983 9984 // Handle Stores: 9985 if (SI) { 9986 State.ILV->setDebugLocFromInst(SI); 9987 9988 for (unsigned Part = 0; Part < State.UF; ++Part) { 9989 Instruction *NewSI = nullptr; 9990 Value *StoredVal = State.get(StoredValue, Part); 9991 if (CreateGatherScatter) { 9992 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 9993 Value *VectorGep = State.get(getAddr(), Part); 9994 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 9995 MaskPart); 9996 } else { 9997 if (Reverse) { 9998 // If we store to reverse consecutive memory locations, then we need 9999 // to reverse the order of elements in the stored value. 10000 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse"); 10001 // We don't want to update the value in the map as it might be used in 10002 // another expression. So don't call resetVectorValue(StoredVal). 10003 } 10004 auto *VecPtr = 10005 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10006 if (isMaskRequired) 10007 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 10008 BlockInMaskParts[Part]); 10009 else 10010 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 10011 } 10012 State.ILV->addMetadata(NewSI, SI); 10013 } 10014 return; 10015 } 10016 10017 // Handle loads. 10018 assert(LI && "Must have a load instruction"); 10019 State.ILV->setDebugLocFromInst(LI); 10020 for (unsigned Part = 0; Part < State.UF; ++Part) { 10021 Value *NewLI; 10022 if (CreateGatherScatter) { 10023 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 10024 Value *VectorGep = State.get(getAddr(), Part); 10025 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 10026 nullptr, "wide.masked.gather"); 10027 State.ILV->addMetadata(NewLI, LI); 10028 } else { 10029 auto *VecPtr = 10030 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10031 if (isMaskRequired) 10032 NewLI = Builder.CreateMaskedLoad( 10033 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 10034 PoisonValue::get(DataTy), "wide.masked.load"); 10035 else 10036 NewLI = 10037 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 10038 10039 // Add metadata to the load, but setVectorValue to the reverse shuffle. 10040 State.ILV->addMetadata(NewLI, LI); 10041 if (Reverse) 10042 NewLI = Builder.CreateVectorReverse(NewLI, "reverse"); 10043 } 10044 10045 State.set(getVPSingleValue(), NewLI, Part); 10046 } 10047 } 10048 10049 // Determine how to lower the scalar epilogue, which depends on 1) optimising 10050 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 10051 // predication, and 4) a TTI hook that analyses whether the loop is suitable 10052 // for predication. 10053 static ScalarEpilogueLowering getScalarEpilogueLowering( 10054 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 10055 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 10056 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 10057 LoopVectorizationLegality &LVL) { 10058 // 1) OptSize takes precedence over all other options, i.e. if this is set, 10059 // don't look at hints or options, and don't request a scalar epilogue. 10060 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 10061 // LoopAccessInfo (due to code dependency and not being able to reliably get 10062 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 10063 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 10064 // versioning when the vectorization is forced, unlike hasOptSize. So revert 10065 // back to the old way and vectorize with versioning when forced. See D81345.) 10066 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 10067 PGSOQueryType::IRPass) && 10068 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 10069 return CM_ScalarEpilogueNotAllowedOptSize; 10070 10071 // 2) If set, obey the directives 10072 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 10073 switch (PreferPredicateOverEpilogue) { 10074 case PreferPredicateTy::ScalarEpilogue: 10075 return CM_ScalarEpilogueAllowed; 10076 case PreferPredicateTy::PredicateElseScalarEpilogue: 10077 return CM_ScalarEpilogueNotNeededUsePredicate; 10078 case PreferPredicateTy::PredicateOrDontVectorize: 10079 return CM_ScalarEpilogueNotAllowedUsePredicate; 10080 }; 10081 } 10082 10083 // 3) If set, obey the hints 10084 switch (Hints.getPredicate()) { 10085 case LoopVectorizeHints::FK_Enabled: 10086 return CM_ScalarEpilogueNotNeededUsePredicate; 10087 case LoopVectorizeHints::FK_Disabled: 10088 return CM_ScalarEpilogueAllowed; 10089 }; 10090 10091 // 4) if the TTI hook indicates this is profitable, request predication. 10092 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 10093 LVL.getLAI())) 10094 return CM_ScalarEpilogueNotNeededUsePredicate; 10095 10096 return CM_ScalarEpilogueAllowed; 10097 } 10098 10099 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 10100 // If Values have been set for this Def return the one relevant for \p Part. 10101 if (hasVectorValue(Def, Part)) 10102 return Data.PerPartOutput[Def][Part]; 10103 10104 if (!hasScalarValue(Def, {Part, 0})) { 10105 Value *IRV = Def->getLiveInIRValue(); 10106 Value *B = ILV->getBroadcastInstrs(IRV); 10107 set(Def, B, Part); 10108 return B; 10109 } 10110 10111 Value *ScalarValue = get(Def, {Part, 0}); 10112 // If we aren't vectorizing, we can just copy the scalar map values over 10113 // to the vector map. 10114 if (VF.isScalar()) { 10115 set(Def, ScalarValue, Part); 10116 return ScalarValue; 10117 } 10118 10119 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 10120 bool IsUniform = RepR && RepR->isUniform(); 10121 10122 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 10123 // Check if there is a scalar value for the selected lane. 10124 if (!hasScalarValue(Def, {Part, LastLane})) { 10125 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 10126 assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) || 10127 isa<VPScalarIVStepsRecipe>(Def->getDef())) && 10128 "unexpected recipe found to be invariant"); 10129 IsUniform = true; 10130 LastLane = 0; 10131 } 10132 10133 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 10134 // Set the insert point after the last scalarized instruction or after the 10135 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 10136 // will directly follow the scalar definitions. 10137 auto OldIP = Builder.saveIP(); 10138 auto NewIP = 10139 isa<PHINode>(LastInst) 10140 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 10141 : std::next(BasicBlock::iterator(LastInst)); 10142 Builder.SetInsertPoint(&*NewIP); 10143 10144 // However, if we are vectorizing, we need to construct the vector values. 10145 // If the value is known to be uniform after vectorization, we can just 10146 // broadcast the scalar value corresponding to lane zero for each unroll 10147 // iteration. Otherwise, we construct the vector values using 10148 // insertelement instructions. Since the resulting vectors are stored in 10149 // State, we will only generate the insertelements once. 10150 Value *VectorValue = nullptr; 10151 if (IsUniform) { 10152 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 10153 set(Def, VectorValue, Part); 10154 } else { 10155 // Initialize packing with insertelements to start from undef. 10156 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 10157 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10158 set(Def, Undef, Part); 10159 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10160 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10161 VectorValue = get(Def, Part); 10162 } 10163 Builder.restoreIP(OldIP); 10164 return VectorValue; 10165 } 10166 10167 // Process the loop in the VPlan-native vectorization path. This path builds 10168 // VPlan upfront in the vectorization pipeline, which allows to apply 10169 // VPlan-to-VPlan transformations from the very beginning without modifying the 10170 // input LLVM IR. 10171 static bool processLoopInVPlanNativePath( 10172 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10173 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10174 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10175 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10176 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10177 LoopVectorizationRequirements &Requirements) { 10178 10179 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10180 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10181 return false; 10182 } 10183 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10184 Function *F = L->getHeader()->getParent(); 10185 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10186 10187 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10188 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10189 10190 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10191 &Hints, IAI); 10192 // Use the planner for outer loop vectorization. 10193 // TODO: CM is not used at this point inside the planner. Turn CM into an 10194 // optional argument if we don't need it in the future. 10195 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10196 Requirements, ORE); 10197 10198 // Get user vectorization factor. 10199 ElementCount UserVF = Hints.getWidth(); 10200 10201 CM.collectElementTypesForWidening(); 10202 10203 // Plan how to best vectorize, return the best VF and its cost. 10204 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10205 10206 // If we are stress testing VPlan builds, do not attempt to generate vector 10207 // code. Masked vector code generation support will follow soon. 10208 // Also, do not attempt to vectorize if no vector code will be produced. 10209 if (VPlanBuildStressTest || VectorizationFactor::Disabled() == VF) 10210 return false; 10211 10212 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10213 10214 { 10215 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10216 F->getParent()->getDataLayout()); 10217 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10218 &CM, BFI, PSI, Checks); 10219 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10220 << L->getHeader()->getParent()->getName() << "\"\n"); 10221 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT); 10222 } 10223 10224 // Mark the loop as already vectorized to avoid vectorizing again. 10225 Hints.setAlreadyVectorized(); 10226 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10227 return true; 10228 } 10229 10230 // Emit a remark if there are stores to floats that required a floating point 10231 // extension. If the vectorized loop was generated with floating point there 10232 // will be a performance penalty from the conversion overhead and the change in 10233 // the vector width. 10234 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10235 SmallVector<Instruction *, 4> Worklist; 10236 for (BasicBlock *BB : L->getBlocks()) { 10237 for (Instruction &Inst : *BB) { 10238 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10239 if (S->getValueOperand()->getType()->isFloatTy()) 10240 Worklist.push_back(S); 10241 } 10242 } 10243 } 10244 10245 // Traverse the floating point stores upwards searching, for floating point 10246 // conversions. 10247 SmallPtrSet<const Instruction *, 4> Visited; 10248 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10249 while (!Worklist.empty()) { 10250 auto *I = Worklist.pop_back_val(); 10251 if (!L->contains(I)) 10252 continue; 10253 if (!Visited.insert(I).second) 10254 continue; 10255 10256 // Emit a remark if the floating point store required a floating 10257 // point conversion. 10258 // TODO: More work could be done to identify the root cause such as a 10259 // constant or a function return type and point the user to it. 10260 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10261 ORE->emit([&]() { 10262 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10263 I->getDebugLoc(), L->getHeader()) 10264 << "floating point conversion changes vector width. " 10265 << "Mixed floating point precision requires an up/down " 10266 << "cast that will negatively impact performance."; 10267 }); 10268 10269 for (Use &Op : I->operands()) 10270 if (auto *OpI = dyn_cast<Instruction>(Op)) 10271 Worklist.push_back(OpI); 10272 } 10273 } 10274 10275 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10276 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10277 !EnableLoopInterleaving), 10278 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10279 !EnableLoopVectorization) {} 10280 10281 bool LoopVectorizePass::processLoop(Loop *L) { 10282 assert((EnableVPlanNativePath || L->isInnermost()) && 10283 "VPlan-native path is not enabled. Only process inner loops."); 10284 10285 #ifndef NDEBUG 10286 const std::string DebugLocStr = getDebugLocString(L); 10287 #endif /* NDEBUG */ 10288 10289 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '" 10290 << L->getHeader()->getParent()->getName() << "' from " 10291 << DebugLocStr << "\n"); 10292 10293 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI); 10294 10295 LLVM_DEBUG( 10296 dbgs() << "LV: Loop hints:" 10297 << " force=" 10298 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10299 ? "disabled" 10300 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10301 ? "enabled" 10302 : "?")) 10303 << " width=" << Hints.getWidth() 10304 << " interleave=" << Hints.getInterleave() << "\n"); 10305 10306 // Function containing loop 10307 Function *F = L->getHeader()->getParent(); 10308 10309 // Looking at the diagnostic output is the only way to determine if a loop 10310 // was vectorized (other than looking at the IR or machine code), so it 10311 // is important to generate an optimization remark for each loop. Most of 10312 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10313 // generated as OptimizationRemark and OptimizationRemarkMissed are 10314 // less verbose reporting vectorized loops and unvectorized loops that may 10315 // benefit from vectorization, respectively. 10316 10317 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10318 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10319 return false; 10320 } 10321 10322 PredicatedScalarEvolution PSE(*SE, *L); 10323 10324 // Check if it is legal to vectorize the loop. 10325 LoopVectorizationRequirements Requirements; 10326 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10327 &Requirements, &Hints, DB, AC, BFI, PSI); 10328 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10329 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10330 Hints.emitRemarkWithHints(); 10331 return false; 10332 } 10333 10334 // Check the function attributes and profiles to find out if this function 10335 // should be optimized for size. 10336 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10337 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10338 10339 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10340 // here. They may require CFG and instruction level transformations before 10341 // even evaluating whether vectorization is profitable. Since we cannot modify 10342 // the incoming IR, we need to build VPlan upfront in the vectorization 10343 // pipeline. 10344 if (!L->isInnermost()) 10345 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10346 ORE, BFI, PSI, Hints, Requirements); 10347 10348 assert(L->isInnermost() && "Inner loop expected."); 10349 10350 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10351 // count by optimizing for size, to minimize overheads. 10352 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10353 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10354 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10355 << "This loop is worth vectorizing only if no scalar " 10356 << "iteration overheads are incurred."); 10357 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10358 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10359 else { 10360 LLVM_DEBUG(dbgs() << "\n"); 10361 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10362 } 10363 } 10364 10365 // Check the function attributes to see if implicit floats are allowed. 10366 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10367 // an integer loop and the vector instructions selected are purely integer 10368 // vector instructions? 10369 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10370 reportVectorizationFailure( 10371 "Can't vectorize when the NoImplicitFloat attribute is used", 10372 "loop not vectorized due to NoImplicitFloat attribute", 10373 "NoImplicitFloat", ORE, L); 10374 Hints.emitRemarkWithHints(); 10375 return false; 10376 } 10377 10378 // Check if the target supports potentially unsafe FP vectorization. 10379 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10380 // for the target we're vectorizing for, to make sure none of the 10381 // additional fp-math flags can help. 10382 if (Hints.isPotentiallyUnsafe() && 10383 TTI->isFPVectorizationPotentiallyUnsafe()) { 10384 reportVectorizationFailure( 10385 "Potentially unsafe FP op prevents vectorization", 10386 "loop not vectorized due to unsafe FP support.", 10387 "UnsafeFP", ORE, L); 10388 Hints.emitRemarkWithHints(); 10389 return false; 10390 } 10391 10392 bool AllowOrderedReductions; 10393 // If the flag is set, use that instead and override the TTI behaviour. 10394 if (ForceOrderedReductions.getNumOccurrences() > 0) 10395 AllowOrderedReductions = ForceOrderedReductions; 10396 else 10397 AllowOrderedReductions = TTI->enableOrderedReductions(); 10398 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10399 ORE->emit([&]() { 10400 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10401 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10402 ExactFPMathInst->getDebugLoc(), 10403 ExactFPMathInst->getParent()) 10404 << "loop not vectorized: cannot prove it is safe to reorder " 10405 "floating-point operations"; 10406 }); 10407 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10408 "reorder floating-point operations\n"); 10409 Hints.emitRemarkWithHints(); 10410 return false; 10411 } 10412 10413 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10414 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10415 10416 // If an override option has been passed in for interleaved accesses, use it. 10417 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10418 UseInterleaved = EnableInterleavedMemAccesses; 10419 10420 // Analyze interleaved memory accesses. 10421 if (UseInterleaved) { 10422 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10423 } 10424 10425 // Use the cost model. 10426 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10427 F, &Hints, IAI); 10428 CM.collectValuesToIgnore(); 10429 CM.collectElementTypesForWidening(); 10430 10431 // Use the planner for vectorization. 10432 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10433 Requirements, ORE); 10434 10435 // Get user vectorization factor and interleave count. 10436 ElementCount UserVF = Hints.getWidth(); 10437 unsigned UserIC = Hints.getInterleave(); 10438 10439 // Plan how to best vectorize, return the best VF and its cost. 10440 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10441 10442 VectorizationFactor VF = VectorizationFactor::Disabled(); 10443 unsigned IC = 1; 10444 10445 if (MaybeVF) { 10446 if (LVP.requiresTooManyRuntimeChecks()) { 10447 ORE->emit([&]() { 10448 return OptimizationRemarkAnalysisAliasing( 10449 DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(), 10450 L->getHeader()) 10451 << "loop not vectorized: cannot prove it is safe to reorder " 10452 "memory operations"; 10453 }); 10454 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 10455 Hints.emitRemarkWithHints(); 10456 return false; 10457 } 10458 VF = *MaybeVF; 10459 // Select the interleave count. 10460 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10461 } 10462 10463 // Identify the diagnostic messages that should be produced. 10464 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10465 bool VectorizeLoop = true, InterleaveLoop = true; 10466 if (VF.Width.isScalar()) { 10467 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10468 VecDiagMsg = std::make_pair( 10469 "VectorizationNotBeneficial", 10470 "the cost-model indicates that vectorization is not beneficial"); 10471 VectorizeLoop = false; 10472 } 10473 10474 if (!MaybeVF && UserIC > 1) { 10475 // Tell the user interleaving was avoided up-front, despite being explicitly 10476 // requested. 10477 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10478 "interleaving should be avoided up front\n"); 10479 IntDiagMsg = std::make_pair( 10480 "InterleavingAvoided", 10481 "Ignoring UserIC, because interleaving was avoided up front"); 10482 InterleaveLoop = false; 10483 } else if (IC == 1 && UserIC <= 1) { 10484 // Tell the user interleaving is not beneficial. 10485 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10486 IntDiagMsg = std::make_pair( 10487 "InterleavingNotBeneficial", 10488 "the cost-model indicates that interleaving is not beneficial"); 10489 InterleaveLoop = false; 10490 if (UserIC == 1) { 10491 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10492 IntDiagMsg.second += 10493 " and is explicitly disabled or interleave count is set to 1"; 10494 } 10495 } else if (IC > 1 && UserIC == 1) { 10496 // Tell the user interleaving is beneficial, but it explicitly disabled. 10497 LLVM_DEBUG( 10498 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10499 IntDiagMsg = std::make_pair( 10500 "InterleavingBeneficialButDisabled", 10501 "the cost-model indicates that interleaving is beneficial " 10502 "but is explicitly disabled or interleave count is set to 1"); 10503 InterleaveLoop = false; 10504 } 10505 10506 // Override IC if user provided an interleave count. 10507 IC = UserIC > 0 ? UserIC : IC; 10508 10509 // Emit diagnostic messages, if any. 10510 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10511 if (!VectorizeLoop && !InterleaveLoop) { 10512 // Do not vectorize or interleaving the loop. 10513 ORE->emit([&]() { 10514 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10515 L->getStartLoc(), L->getHeader()) 10516 << VecDiagMsg.second; 10517 }); 10518 ORE->emit([&]() { 10519 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10520 L->getStartLoc(), L->getHeader()) 10521 << IntDiagMsg.second; 10522 }); 10523 return false; 10524 } else if (!VectorizeLoop && InterleaveLoop) { 10525 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10526 ORE->emit([&]() { 10527 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10528 L->getStartLoc(), L->getHeader()) 10529 << VecDiagMsg.second; 10530 }); 10531 } else if (VectorizeLoop && !InterleaveLoop) { 10532 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10533 << ") in " << DebugLocStr << '\n'); 10534 ORE->emit([&]() { 10535 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10536 L->getStartLoc(), L->getHeader()) 10537 << IntDiagMsg.second; 10538 }); 10539 } else if (VectorizeLoop && InterleaveLoop) { 10540 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10541 << ") in " << DebugLocStr << '\n'); 10542 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10543 } 10544 10545 bool DisableRuntimeUnroll = false; 10546 MDNode *OrigLoopID = L->getLoopID(); 10547 { 10548 // Optimistically generate runtime checks. Drop them if they turn out to not 10549 // be profitable. Limit the scope of Checks, so the cleanup happens 10550 // immediately after vector codegeneration is done. 10551 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10552 F->getParent()->getDataLayout()); 10553 if (!VF.Width.isScalar() || IC > 1) 10554 Checks.Create(L, *LVL.getLAI(), PSE.getPredicate(), VF.Width, IC); 10555 10556 using namespace ore; 10557 if (!VectorizeLoop) { 10558 assert(IC > 1 && "interleave count should not be 1 or 0"); 10559 // If we decided that it is not legal to vectorize the loop, then 10560 // interleave it. 10561 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10562 &CM, BFI, PSI, Checks); 10563 10564 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10565 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT); 10566 10567 ORE->emit([&]() { 10568 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10569 L->getHeader()) 10570 << "interleaved loop (interleaved count: " 10571 << NV("InterleaveCount", IC) << ")"; 10572 }); 10573 } else { 10574 // If we decided that it is *legal* to vectorize the loop, then do it. 10575 10576 // Consider vectorizing the epilogue too if it's profitable. 10577 VectorizationFactor EpilogueVF = 10578 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10579 if (EpilogueVF.Width.isVector()) { 10580 10581 // The first pass vectorizes the main loop and creates a scalar epilogue 10582 // to be vectorized by executing the plan (potentially with a different 10583 // factor) again shortly afterwards. 10584 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10585 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10586 EPI, &LVL, &CM, BFI, PSI, Checks); 10587 10588 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10589 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10590 DT); 10591 ++LoopsVectorized; 10592 10593 // Second pass vectorizes the epilogue and adjusts the control flow 10594 // edges from the first pass. 10595 EPI.MainLoopVF = EPI.EpilogueVF; 10596 EPI.MainLoopUF = EPI.EpilogueUF; 10597 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10598 ORE, EPI, &LVL, &CM, BFI, PSI, 10599 Checks); 10600 10601 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10602 VPRegionBlock *VectorLoop = BestEpiPlan.getVectorLoopRegion(); 10603 VPBasicBlock *Header = VectorLoop->getEntryBasicBlock(); 10604 Header->setName("vec.epilog.vector.body"); 10605 10606 // Ensure that the start values for any VPReductionPHIRecipes are 10607 // updated before vectorising the epilogue loop. 10608 for (VPRecipeBase &R : Header->phis()) { 10609 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) { 10610 if (auto *Resume = MainILV.getReductionResumeValue( 10611 ReductionPhi->getRecurrenceDescriptor())) { 10612 VPValue *StartVal = BestEpiPlan.getOrAddExternalDef(Resume); 10613 ReductionPhi->setOperand(0, StartVal); 10614 } 10615 } 10616 } 10617 10618 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10619 DT); 10620 ++LoopsEpilogueVectorized; 10621 10622 if (!MainILV.areSafetyChecksAdded()) 10623 DisableRuntimeUnroll = true; 10624 } else { 10625 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10626 &LVL, &CM, BFI, PSI, Checks); 10627 10628 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10629 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT); 10630 ++LoopsVectorized; 10631 10632 // Add metadata to disable runtime unrolling a scalar loop when there 10633 // are no runtime checks about strides and memory. A scalar loop that is 10634 // rarely used is not worth unrolling. 10635 if (!LB.areSafetyChecksAdded()) 10636 DisableRuntimeUnroll = true; 10637 } 10638 // Report the vectorization decision. 10639 ORE->emit([&]() { 10640 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10641 L->getHeader()) 10642 << "vectorized loop (vectorization width: " 10643 << NV("VectorizationFactor", VF.Width) 10644 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10645 }); 10646 } 10647 10648 if (ORE->allowExtraAnalysis(LV_NAME)) 10649 checkMixedPrecision(L, ORE); 10650 } 10651 10652 Optional<MDNode *> RemainderLoopID = 10653 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10654 LLVMLoopVectorizeFollowupEpilogue}); 10655 if (RemainderLoopID.hasValue()) { 10656 L->setLoopID(RemainderLoopID.getValue()); 10657 } else { 10658 if (DisableRuntimeUnroll) 10659 AddRuntimeUnrollDisableMetaData(L); 10660 10661 // Mark the loop as already vectorized to avoid vectorizing again. 10662 Hints.setAlreadyVectorized(); 10663 } 10664 10665 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10666 return true; 10667 } 10668 10669 LoopVectorizeResult LoopVectorizePass::runImpl( 10670 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10671 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10672 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10673 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10674 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10675 SE = &SE_; 10676 LI = &LI_; 10677 TTI = &TTI_; 10678 DT = &DT_; 10679 BFI = &BFI_; 10680 TLI = TLI_; 10681 AA = &AA_; 10682 AC = &AC_; 10683 GetLAA = &GetLAA_; 10684 DB = &DB_; 10685 ORE = &ORE_; 10686 PSI = PSI_; 10687 10688 // Don't attempt if 10689 // 1. the target claims to have no vector registers, and 10690 // 2. interleaving won't help ILP. 10691 // 10692 // The second condition is necessary because, even if the target has no 10693 // vector registers, loop vectorization may still enable scalar 10694 // interleaving. 10695 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10696 TTI->getMaxInterleaveFactor(1) < 2) 10697 return LoopVectorizeResult(false, false); 10698 10699 bool Changed = false, CFGChanged = false; 10700 10701 // The vectorizer requires loops to be in simplified form. 10702 // Since simplification may add new inner loops, it has to run before the 10703 // legality and profitability checks. This means running the loop vectorizer 10704 // will simplify all loops, regardless of whether anything end up being 10705 // vectorized. 10706 for (auto &L : *LI) 10707 Changed |= CFGChanged |= 10708 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10709 10710 // Build up a worklist of inner-loops to vectorize. This is necessary as 10711 // the act of vectorizing or partially unrolling a loop creates new loops 10712 // and can invalidate iterators across the loops. 10713 SmallVector<Loop *, 8> Worklist; 10714 10715 for (Loop *L : *LI) 10716 collectSupportedLoops(*L, LI, ORE, Worklist); 10717 10718 LoopsAnalyzed += Worklist.size(); 10719 10720 // Now walk the identified inner loops. 10721 while (!Worklist.empty()) { 10722 Loop *L = Worklist.pop_back_val(); 10723 10724 // For the inner loops we actually process, form LCSSA to simplify the 10725 // transform. 10726 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10727 10728 Changed |= CFGChanged |= processLoop(L); 10729 } 10730 10731 // Process each loop nest in the function. 10732 return LoopVectorizeResult(Changed, CFGChanged); 10733 } 10734 10735 PreservedAnalyses LoopVectorizePass::run(Function &F, 10736 FunctionAnalysisManager &AM) { 10737 auto &LI = AM.getResult<LoopAnalysis>(F); 10738 // There are no loops in the function. Return before computing other expensive 10739 // analyses. 10740 if (LI.empty()) 10741 return PreservedAnalyses::all(); 10742 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10743 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10744 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10745 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10746 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10747 auto &AA = AM.getResult<AAManager>(F); 10748 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10749 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10750 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10751 10752 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10753 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10754 [&](Loop &L) -> const LoopAccessInfo & { 10755 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10756 TLI, TTI, nullptr, nullptr, nullptr}; 10757 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10758 }; 10759 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10760 ProfileSummaryInfo *PSI = 10761 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10762 LoopVectorizeResult Result = 10763 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10764 if (!Result.MadeAnyChange) 10765 return PreservedAnalyses::all(); 10766 PreservedAnalyses PA; 10767 10768 // We currently do not preserve loopinfo/dominator analyses with outer loop 10769 // vectorization. Until this is addressed, mark these analyses as preserved 10770 // only for non-VPlan-native path. 10771 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10772 if (!EnableVPlanNativePath) { 10773 PA.preserve<LoopAnalysis>(); 10774 PA.preserve<DominatorTreeAnalysis>(); 10775 } 10776 10777 if (Result.MadeCFGChange) { 10778 // Making CFG changes likely means a loop got vectorized. Indicate that 10779 // extra simplification passes should be run. 10780 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only 10781 // be run if runtime checks have been added. 10782 AM.getResult<ShouldRunExtraVectorPasses>(F); 10783 PA.preserve<ShouldRunExtraVectorPasses>(); 10784 } else { 10785 PA.preserveSet<CFGAnalyses>(); 10786 } 10787 return PA; 10788 } 10789 10790 void LoopVectorizePass::printPipeline( 10791 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10792 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10793 OS, MapClassName2PassName); 10794 10795 OS << "<"; 10796 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10797 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10798 OS << ">"; 10799 } 10800