1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/LLVMContext.h" 116 #include "llvm/IR/Metadata.h" 117 #include "llvm/IR/Module.h" 118 #include "llvm/IR/Operator.h" 119 #include "llvm/IR/PatternMatch.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 202 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 203 cl::desc("The maximum allowed number of runtime memory checks with a " 204 "vectorize(enable) pragma.")); 205 206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 207 // that predication is preferred, and this lists all options. I.e., the 208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 209 // and predicate the instructions accordingly. If tail-folding fails, there are 210 // different fallback strategies depending on these values: 211 namespace PreferPredicateTy { 212 enum Option { 213 ScalarEpilogue = 0, 214 PredicateElseScalarEpilogue, 215 PredicateOrDontVectorize 216 }; 217 } // namespace PreferPredicateTy 218 219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 220 "prefer-predicate-over-epilogue", 221 cl::init(PreferPredicateTy::ScalarEpilogue), 222 cl::Hidden, 223 cl::desc("Tail-folding and predication preferences over creating a scalar " 224 "epilogue loop."), 225 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 226 "scalar-epilogue", 227 "Don't tail-predicate loops, create scalar epilogue"), 228 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 229 "predicate-else-scalar-epilogue", 230 "prefer tail-folding, create scalar epilogue if tail " 231 "folding fails."), 232 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 233 "predicate-dont-vectorize", 234 "prefers tail-folding, don't attempt vectorization if " 235 "tail-folding fails."))); 236 237 static cl::opt<bool> MaximizeBandwidth( 238 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 239 cl::desc("Maximize bandwidth when selecting vectorization factor which " 240 "will be determined by the smallest type in loop.")); 241 242 static cl::opt<bool> EnableInterleavedMemAccesses( 243 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 244 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 245 246 /// An interleave-group may need masking if it resides in a block that needs 247 /// predication, or in order to mask away gaps. 248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 249 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 250 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 251 252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 253 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 254 cl::desc("We don't interleave loops with a estimated constant trip count " 255 "below this number")); 256 257 static cl::opt<unsigned> ForceTargetNumScalarRegs( 258 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 259 cl::desc("A flag that overrides the target's number of scalar registers.")); 260 261 static cl::opt<unsigned> ForceTargetNumVectorRegs( 262 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 263 cl::desc("A flag that overrides the target's number of vector registers.")); 264 265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 266 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "scalar loops.")); 269 270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 271 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's max interleave factor for " 273 "vectorized loops.")); 274 275 static cl::opt<unsigned> ForceTargetInstructionCost( 276 "force-target-instruction-cost", cl::init(0), cl::Hidden, 277 cl::desc("A flag that overrides the target's expected cost for " 278 "an instruction to a single constant value. Mostly " 279 "useful for getting consistent testing.")); 280 281 static cl::opt<bool> ForceTargetSupportsScalableVectors( 282 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 283 cl::desc( 284 "Pretend that scalable vectors are supported, even if the target does " 285 "not support them. This flag should only be used for testing.")); 286 287 static cl::opt<unsigned> SmallLoopCost( 288 "small-loop-cost", cl::init(20), cl::Hidden, 289 cl::desc( 290 "The cost of a loop that is considered 'small' by the interleaver.")); 291 292 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 293 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 294 cl::desc("Enable the use of the block frequency analysis to access PGO " 295 "heuristics minimizing code growth in cold regions and being more " 296 "aggressive in hot regions.")); 297 298 // Runtime interleave loops for load/store throughput. 299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 300 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 301 cl::desc( 302 "Enable runtime interleaving until load/store ports are saturated")); 303 304 /// Interleave small loops with scalar reductions. 305 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 306 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 307 cl::desc("Enable interleaving for loops with small iteration counts that " 308 "contain scalar reductions to expose ILP.")); 309 310 /// The number of stores in a loop that are allowed to need predication. 311 static cl::opt<unsigned> NumberOfStoresToPredicate( 312 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 313 cl::desc("Max number of stores to be predicated behind an if.")); 314 315 static cl::opt<bool> EnableIndVarRegisterHeur( 316 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 317 cl::desc("Count the induction variable only once when interleaving")); 318 319 static cl::opt<bool> EnableCondStoresVectorization( 320 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 321 cl::desc("Enable if predication of stores during vectorization.")); 322 323 static cl::opt<unsigned> MaxNestedScalarReductionIC( 324 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 325 cl::desc("The maximum interleave count to use when interleaving a scalar " 326 "reduction in a nested loop.")); 327 328 static cl::opt<bool> 329 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 330 cl::Hidden, 331 cl::desc("Prefer in-loop vector reductions, " 332 "overriding the targets preference.")); 333 334 static cl::opt<bool> ForceOrderedReductions( 335 "force-ordered-reductions", cl::init(false), cl::Hidden, 336 cl::desc("Enable the vectorisation of loops with in-order (strict) " 337 "FP reductions")); 338 339 static cl::opt<bool> PreferPredicatedReductionSelect( 340 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 341 cl::desc( 342 "Prefer predicating a reduction operation over an after loop select.")); 343 344 cl::opt<bool> EnableVPlanNativePath( 345 "enable-vplan-native-path", cl::init(false), cl::Hidden, 346 cl::desc("Enable VPlan-native vectorization path with " 347 "support for outer loop vectorization.")); 348 349 // FIXME: Remove this switch once we have divergence analysis. Currently we 350 // assume divergent non-backedge branches when this switch is true. 351 cl::opt<bool> EnableVPlanPredication( 352 "enable-vplan-predication", cl::init(false), cl::Hidden, 353 cl::desc("Enable VPlan-native vectorization path predicator with " 354 "support for outer loop vectorization.")); 355 356 // This flag enables the stress testing of the VPlan H-CFG construction in the 357 // VPlan-native vectorization path. It must be used in conjuction with 358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 359 // verification of the H-CFGs built. 360 static cl::opt<bool> VPlanBuildStressTest( 361 "vplan-build-stress-test", cl::init(false), cl::Hidden, 362 cl::desc( 363 "Build VPlan for every supported loop nest in the function and bail " 364 "out right after the build (stress test the VPlan H-CFG construction " 365 "in the VPlan-native vectorization path).")); 366 367 cl::opt<bool> llvm::EnableLoopInterleaving( 368 "interleave-loops", cl::init(true), cl::Hidden, 369 cl::desc("Enable loop interleaving in Loop vectorization passes")); 370 cl::opt<bool> llvm::EnableLoopVectorization( 371 "vectorize-loops", cl::init(true), cl::Hidden, 372 cl::desc("Run the Loop vectorization passes")); 373 374 cl::opt<bool> PrintVPlansInDotFormat( 375 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 376 cl::desc("Use dot format instead of plain text when dumping VPlans")); 377 378 /// A helper function that returns true if the given type is irregular. The 379 /// type is irregular if its allocated size doesn't equal the store size of an 380 /// element of the corresponding vector type. 381 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 382 // Determine if an array of N elements of type Ty is "bitcast compatible" 383 // with a <N x Ty> vector. 384 // This is only true if there is no padding between the array elements. 385 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 386 } 387 388 /// A helper function that returns the reciprocal of the block probability of 389 /// predicated blocks. If we return X, we are assuming the predicated block 390 /// will execute once for every X iterations of the loop header. 391 /// 392 /// TODO: We should use actual block probability here, if available. Currently, 393 /// we always assume predicated blocks have a 50% chance of executing. 394 static unsigned getReciprocalPredBlockProb() { return 2; } 395 396 /// A helper function that returns an integer or floating-point constant with 397 /// value C. 398 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 399 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 400 : ConstantFP::get(Ty, C); 401 } 402 403 /// Returns "best known" trip count for the specified loop \p L as defined by 404 /// the following procedure: 405 /// 1) Returns exact trip count if it is known. 406 /// 2) Returns expected trip count according to profile data if any. 407 /// 3) Returns upper bound estimate if it is known. 408 /// 4) Returns None if all of the above failed. 409 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 410 // Check if exact trip count is known. 411 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 412 return ExpectedTC; 413 414 // Check if there is an expected trip count available from profile data. 415 if (LoopVectorizeWithBlockFrequency) 416 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 417 return EstimatedTC; 418 419 // Check if upper bound estimate is known. 420 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 421 return ExpectedTC; 422 423 return None; 424 } 425 426 // Forward declare GeneratedRTChecks. 427 class GeneratedRTChecks; 428 429 namespace llvm { 430 431 /// InnerLoopVectorizer vectorizes loops which contain only one basic 432 /// block to a specified vectorization factor (VF). 433 /// This class performs the widening of scalars into vectors, or multiple 434 /// scalars. This class also implements the following features: 435 /// * It inserts an epilogue loop for handling loops that don't have iteration 436 /// counts that are known to be a multiple of the vectorization factor. 437 /// * It handles the code generation for reduction variables. 438 /// * Scalarization (implementation using scalars) of un-vectorizable 439 /// instructions. 440 /// InnerLoopVectorizer does not perform any vectorization-legality 441 /// checks, and relies on the caller to check for the different legality 442 /// aspects. The InnerLoopVectorizer relies on the 443 /// LoopVectorizationLegality class to provide information about the induction 444 /// and reduction variables that were found to a given vectorization factor. 445 class InnerLoopVectorizer { 446 public: 447 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 448 LoopInfo *LI, DominatorTree *DT, 449 const TargetLibraryInfo *TLI, 450 const TargetTransformInfo *TTI, AssumptionCache *AC, 451 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 452 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 453 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 454 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 455 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 456 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 457 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 458 PSI(PSI), RTChecks(RTChecks) { 459 // Query this against the original loop and save it here because the profile 460 // of the original loop header may change as the transformation happens. 461 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 462 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 463 } 464 465 virtual ~InnerLoopVectorizer() = default; 466 467 /// Create a new empty loop that will contain vectorized instructions later 468 /// on, while the old loop will be used as the scalar remainder. Control flow 469 /// is generated around the vectorized (and scalar epilogue) loops consisting 470 /// of various checks and bypasses. Return the pre-header block of the new 471 /// loop. 472 /// In the case of epilogue vectorization, this function is overriden to 473 /// handle the more complex control flow around the loops. 474 virtual BasicBlock *createVectorizedLoopSkeleton(); 475 476 /// Widen a single instruction within the innermost loop. 477 void widenInstruction(Instruction &I, VPWidenRecipe *WidenRec, 478 VPTransformState &State); 479 480 /// Widen a single call instruction within the innermost loop. 481 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 482 VPTransformState &State); 483 484 /// Widen a single select instruction within the innermost loop. 485 void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, 486 bool InvariantCond, VPTransformState &State); 487 488 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 489 void fixVectorizedLoop(VPTransformState &State); 490 491 // Return true if any runtime check is added. 492 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 493 494 /// A type for vectorized values in the new loop. Each value from the 495 /// original loop, when vectorized, is represented by UF vector values in the 496 /// new unrolled loop, where UF is the unroll factor. 497 using VectorParts = SmallVector<Value *, 2>; 498 499 /// Vectorize a single GetElementPtrInst based on information gathered and 500 /// decisions taken during planning. 501 void widenGEP(GetElementPtrInst *GEP, VPWidenGEPRecipe *WidenGEPRec, 502 VPUser &Indices, unsigned UF, ElementCount VF, 503 bool IsPtrLoopInvariant, SmallBitVector &IsIndexLoopInvariant, 504 VPTransformState &State); 505 506 /// Vectorize a single first-order recurrence or pointer induction PHINode in 507 /// a block. This method handles the induction variable canonicalization. It 508 /// supports both VF = 1 for unrolled loops and arbitrary length vectors. 509 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 510 VPTransformState &State); 511 512 /// A helper function to scalarize a single Instruction in the innermost loop. 513 /// Generates a sequence of scalar instances for each lane between \p MinLane 514 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 515 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p 516 /// Instr's operands. 517 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe, 518 const VPIteration &Instance, bool IfPredicateInstr, 519 VPTransformState &State); 520 521 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 522 /// is provided, the integer induction variable will first be truncated to 523 /// the corresponding type. 524 void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, 525 VPValue *Def, VPValue *CastDef, 526 VPTransformState &State); 527 528 /// Construct the vector value of a scalarized value \p V one lane at a time. 529 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 530 VPTransformState &State); 531 532 /// Try to vectorize interleaved access group \p Group with the base address 533 /// given in \p Addr, optionally masking the vector operations if \p 534 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 535 /// values in the vectorized loop. 536 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 537 ArrayRef<VPValue *> VPDefs, 538 VPTransformState &State, VPValue *Addr, 539 ArrayRef<VPValue *> StoredValues, 540 VPValue *BlockInMask = nullptr); 541 542 /// Vectorize Load and Store instructions with the base address given in \p 543 /// Addr, optionally masking the vector operations if \p BlockInMask is 544 /// non-null. Use \p State to translate given VPValues to IR values in the 545 /// vectorized loop. 546 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 547 VPValue *Def, VPValue *Addr, 548 VPValue *StoredValue, VPValue *BlockInMask, 549 bool ConsecutiveStride, bool Reverse); 550 551 /// Set the debug location in the builder \p Ptr using the debug location in 552 /// \p V. If \p Ptr is None then it uses the class member's Builder. 553 void setDebugLocFromInst(const Value *V, 554 Optional<IRBuilder<> *> CustomBuilder = None); 555 556 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 557 void fixNonInductionPHIs(VPTransformState &State); 558 559 /// Returns true if the reordering of FP operations is not allowed, but we are 560 /// able to vectorize with strict in-order reductions for the given RdxDesc. 561 bool useOrderedReductions(RecurrenceDescriptor &RdxDesc); 562 563 /// Create a broadcast instruction. This method generates a broadcast 564 /// instruction (shuffle) for loop invariant values and for the induction 565 /// value. If this is the induction variable then we extend it to N, N+1, ... 566 /// this is needed because each iteration in the loop corresponds to a SIMD 567 /// element. 568 virtual Value *getBroadcastInstrs(Value *V); 569 570 protected: 571 friend class LoopVectorizationPlanner; 572 573 /// A small list of PHINodes. 574 using PhiVector = SmallVector<PHINode *, 4>; 575 576 /// A type for scalarized values in the new loop. Each value from the 577 /// original loop, when scalarized, is represented by UF x VF scalar values 578 /// in the new unrolled loop, where UF is the unroll factor and VF is the 579 /// vectorization factor. 580 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 581 582 /// Set up the values of the IVs correctly when exiting the vector loop. 583 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 584 Value *CountRoundDown, Value *EndValue, 585 BasicBlock *MiddleBlock); 586 587 /// Create a new induction variable inside L. 588 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 589 Value *Step, Instruction *DL); 590 591 /// Handle all cross-iteration phis in the header. 592 void fixCrossIterationPHIs(VPTransformState &State); 593 594 /// Create the exit value of first order recurrences in the middle block and 595 /// update their users. 596 void fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, VPTransformState &State); 597 598 /// Create code for the loop exit value of the reduction. 599 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 600 601 /// Clear NSW/NUW flags from reduction instructions if necessary. 602 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 603 VPTransformState &State); 604 605 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 606 /// means we need to add the appropriate incoming value from the middle 607 /// block as exiting edges from the scalar epilogue loop (if present) are 608 /// already in place, and we exit the vector loop exclusively to the middle 609 /// block. 610 void fixLCSSAPHIs(VPTransformState &State); 611 612 /// Iteratively sink the scalarized operands of a predicated instruction into 613 /// the block that was created for it. 614 void sinkScalarOperands(Instruction *PredInst); 615 616 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 617 /// represented as. 618 void truncateToMinimalBitwidths(VPTransformState &State); 619 620 /// This function adds 621 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 622 /// to each vector element of Val. The sequence starts at StartIndex. 623 /// \p Opcode is relevant for FP induction variable. 624 virtual Value * 625 getStepVector(Value *Val, Value *StartIdx, Value *Step, 626 Instruction::BinaryOps Opcode = Instruction::BinaryOpsEnd); 627 628 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 629 /// variable on which to base the steps, \p Step is the size of the step, and 630 /// \p EntryVal is the value from the original loop that maps to the steps. 631 /// Note that \p EntryVal doesn't have to be an induction variable - it 632 /// can also be a truncate instruction. 633 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 634 const InductionDescriptor &ID, VPValue *Def, 635 VPValue *CastDef, VPTransformState &State); 636 637 /// Create a vector induction phi node based on an existing scalar one. \p 638 /// EntryVal is the value from the original loop that maps to the vector phi 639 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 640 /// truncate instruction, instead of widening the original IV, we widen a 641 /// version of the IV truncated to \p EntryVal's type. 642 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 643 Value *Step, Value *Start, 644 Instruction *EntryVal, VPValue *Def, 645 VPValue *CastDef, 646 VPTransformState &State); 647 648 /// Returns true if an instruction \p I should be scalarized instead of 649 /// vectorized for the chosen vectorization factor. 650 bool shouldScalarizeInstruction(Instruction *I) const; 651 652 /// Returns true if we should generate a scalar version of \p IV. 653 bool needsScalarInduction(Instruction *IV) const; 654 655 /// If there is a cast involved in the induction variable \p ID, which should 656 /// be ignored in the vectorized loop body, this function records the 657 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 658 /// cast. We had already proved that the casted Phi is equal to the uncasted 659 /// Phi in the vectorized loop (under a runtime guard), and therefore 660 /// there is no need to vectorize the cast - the same value can be used in the 661 /// vector loop for both the Phi and the cast. 662 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 663 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 664 /// 665 /// \p EntryVal is the value from the original loop that maps to the vector 666 /// phi node and is used to distinguish what is the IV currently being 667 /// processed - original one (if \p EntryVal is a phi corresponding to the 668 /// original IV) or the "newly-created" one based on the proof mentioned above 669 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 670 /// latter case \p EntryVal is a TruncInst and we must not record anything for 671 /// that IV, but it's error-prone to expect callers of this routine to care 672 /// about that, hence this explicit parameter. 673 void recordVectorLoopValueForInductionCast( 674 const InductionDescriptor &ID, const Instruction *EntryVal, 675 Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, 676 unsigned Part, unsigned Lane = UINT_MAX); 677 678 /// Generate a shuffle sequence that will reverse the vector Vec. 679 virtual Value *reverseVector(Value *Vec); 680 681 /// Returns (and creates if needed) the original loop trip count. 682 Value *getOrCreateTripCount(Loop *NewLoop); 683 684 /// Returns (and creates if needed) the trip count of the widened loop. 685 Value *getOrCreateVectorTripCount(Loop *NewLoop); 686 687 /// Returns a bitcasted value to the requested vector type. 688 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 689 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 690 const DataLayout &DL); 691 692 /// Emit a bypass check to see if the vector trip count is zero, including if 693 /// it overflows. 694 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 695 696 /// Emit a bypass check to see if all of the SCEV assumptions we've 697 /// had to make are correct. Returns the block containing the checks or 698 /// nullptr if no checks have been added. 699 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); 700 701 /// Emit bypass checks to check any memory assumptions we may have made. 702 /// Returns the block containing the checks or nullptr if no checks have been 703 /// added. 704 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 705 706 /// Compute the transformed value of Index at offset StartValue using step 707 /// StepValue. 708 /// For integer induction, returns StartValue + Index * StepValue. 709 /// For pointer induction, returns StartValue[Index * StepValue]. 710 /// FIXME: The newly created binary instructions should contain nsw/nuw 711 /// flags, which can be found from the original scalar operations. 712 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 713 const DataLayout &DL, 714 const InductionDescriptor &ID) const; 715 716 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 717 /// vector loop preheader, middle block and scalar preheader. Also 718 /// allocate a loop object for the new vector loop and return it. 719 Loop *createVectorLoopSkeleton(StringRef Prefix); 720 721 /// Create new phi nodes for the induction variables to resume iteration count 722 /// in the scalar epilogue, from where the vectorized loop left off (given by 723 /// \p VectorTripCount). 724 /// In cases where the loop skeleton is more complicated (eg. epilogue 725 /// vectorization) and the resume values can come from an additional bypass 726 /// block, the \p AdditionalBypass pair provides information about the bypass 727 /// block and the end value on the edge from bypass to this loop. 728 void createInductionResumeValues( 729 Loop *L, Value *VectorTripCount, 730 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 731 732 /// Complete the loop skeleton by adding debug MDs, creating appropriate 733 /// conditional branches in the middle block, preparing the builder and 734 /// running the verifier. Take in the vector loop \p L as argument, and return 735 /// the preheader of the completed vector loop. 736 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 737 738 /// Add additional metadata to \p To that was not present on \p Orig. 739 /// 740 /// Currently this is used to add the noalias annotations based on the 741 /// inserted memchecks. Use this for instructions that are *cloned* into the 742 /// vector loop. 743 void addNewMetadata(Instruction *To, const Instruction *Orig); 744 745 /// Add metadata from one instruction to another. 746 /// 747 /// This includes both the original MDs from \p From and additional ones (\see 748 /// addNewMetadata). Use this for *newly created* instructions in the vector 749 /// loop. 750 void addMetadata(Instruction *To, Instruction *From); 751 752 /// Similar to the previous function but it adds the metadata to a 753 /// vector of instructions. 754 void addMetadata(ArrayRef<Value *> To, Instruction *From); 755 756 /// Collect poison-generating recipes that may generate a poison value that is 757 /// used after vectorization, even when their operands are not poison. Those 758 /// recipes meet the following conditions: 759 /// * Contribute to the address computation of a recipe generating a widen 760 /// memory load/store (VPWidenMemoryInstructionRecipe or 761 /// VPInterleaveRecipe). 762 /// * Such a widen memory load/store has at least one underlying Instruction 763 /// that is in a basic block that needs predication and after vectorization 764 /// the generated instruction won't be predicated. 765 void collectPoisonGeneratingRecipes(VPTransformState &State); 766 767 /// Allow subclasses to override and print debug traces before/after vplan 768 /// execution, when trace information is requested. 769 virtual void printDebugTracesAtStart(){}; 770 virtual void printDebugTracesAtEnd(){}; 771 772 /// The original loop. 773 Loop *OrigLoop; 774 775 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 776 /// dynamic knowledge to simplify SCEV expressions and converts them to a 777 /// more usable form. 778 PredicatedScalarEvolution &PSE; 779 780 /// Loop Info. 781 LoopInfo *LI; 782 783 /// Dominator Tree. 784 DominatorTree *DT; 785 786 /// Alias Analysis. 787 AAResults *AA; 788 789 /// Target Library Info. 790 const TargetLibraryInfo *TLI; 791 792 /// Target Transform Info. 793 const TargetTransformInfo *TTI; 794 795 /// Assumption Cache. 796 AssumptionCache *AC; 797 798 /// Interface to emit optimization remarks. 799 OptimizationRemarkEmitter *ORE; 800 801 /// LoopVersioning. It's only set up (non-null) if memchecks were 802 /// used. 803 /// 804 /// This is currently only used to add no-alias metadata based on the 805 /// memchecks. The actually versioning is performed manually. 806 std::unique_ptr<LoopVersioning> LVer; 807 808 /// The vectorization SIMD factor to use. Each vector will have this many 809 /// vector elements. 810 ElementCount VF; 811 812 /// The vectorization unroll factor to use. Each scalar is vectorized to this 813 /// many different vector instructions. 814 unsigned UF; 815 816 /// The builder that we use 817 IRBuilder<> Builder; 818 819 // --- Vectorization state --- 820 821 /// The vector-loop preheader. 822 BasicBlock *LoopVectorPreHeader; 823 824 /// The scalar-loop preheader. 825 BasicBlock *LoopScalarPreHeader; 826 827 /// Middle Block between the vector and the scalar. 828 BasicBlock *LoopMiddleBlock; 829 830 /// The unique ExitBlock of the scalar loop if one exists. Note that 831 /// there can be multiple exiting edges reaching this block. 832 BasicBlock *LoopExitBlock; 833 834 /// The vector loop body. 835 BasicBlock *LoopVectorBody; 836 837 /// The scalar loop body. 838 BasicBlock *LoopScalarBody; 839 840 /// A list of all bypass blocks. The first block is the entry of the loop. 841 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 842 843 /// The new Induction variable which was added to the new block. 844 PHINode *Induction = nullptr; 845 846 /// The induction variable of the old basic block. 847 PHINode *OldInduction = nullptr; 848 849 /// Store instructions that were predicated. 850 SmallVector<Instruction *, 4> PredicatedInstructions; 851 852 /// Trip count of the original loop. 853 Value *TripCount = nullptr; 854 855 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 856 Value *VectorTripCount = nullptr; 857 858 /// The legality analysis. 859 LoopVectorizationLegality *Legal; 860 861 /// The profitablity analysis. 862 LoopVectorizationCostModel *Cost; 863 864 // Record whether runtime checks are added. 865 bool AddedSafetyChecks = false; 866 867 // Holds the end values for each induction variable. We save the end values 868 // so we can later fix-up the external users of the induction variables. 869 DenseMap<PHINode *, Value *> IVEndValues; 870 871 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 872 // fixed up at the end of vector code generation. 873 SmallVector<PHINode *, 8> OrigPHIsToFix; 874 875 /// BFI and PSI are used to check for profile guided size optimizations. 876 BlockFrequencyInfo *BFI; 877 ProfileSummaryInfo *PSI; 878 879 // Whether this loop should be optimized for size based on profile guided size 880 // optimizatios. 881 bool OptForSizeBasedOnProfile; 882 883 /// Structure to hold information about generated runtime checks, responsible 884 /// for cleaning the checks, if vectorization turns out unprofitable. 885 GeneratedRTChecks &RTChecks; 886 }; 887 888 class InnerLoopUnroller : public InnerLoopVectorizer { 889 public: 890 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 891 LoopInfo *LI, DominatorTree *DT, 892 const TargetLibraryInfo *TLI, 893 const TargetTransformInfo *TTI, AssumptionCache *AC, 894 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 895 LoopVectorizationLegality *LVL, 896 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 897 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 898 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 899 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 900 BFI, PSI, Check) {} 901 902 private: 903 Value *getBroadcastInstrs(Value *V) override; 904 Value *getStepVector( 905 Value *Val, Value *StartIdx, Value *Step, 906 Instruction::BinaryOps Opcode = Instruction::BinaryOpsEnd) override; 907 Value *reverseVector(Value *Vec) override; 908 }; 909 910 /// Encapsulate information regarding vectorization of a loop and its epilogue. 911 /// This information is meant to be updated and used across two stages of 912 /// epilogue vectorization. 913 struct EpilogueLoopVectorizationInfo { 914 ElementCount MainLoopVF = ElementCount::getFixed(0); 915 unsigned MainLoopUF = 0; 916 ElementCount EpilogueVF = ElementCount::getFixed(0); 917 unsigned EpilogueUF = 0; 918 BasicBlock *MainLoopIterationCountCheck = nullptr; 919 BasicBlock *EpilogueIterationCountCheck = nullptr; 920 BasicBlock *SCEVSafetyCheck = nullptr; 921 BasicBlock *MemSafetyCheck = nullptr; 922 Value *TripCount = nullptr; 923 Value *VectorTripCount = nullptr; 924 925 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 926 ElementCount EVF, unsigned EUF) 927 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 928 assert(EUF == 1 && 929 "A high UF for the epilogue loop is likely not beneficial."); 930 } 931 }; 932 933 /// An extension of the inner loop vectorizer that creates a skeleton for a 934 /// vectorized loop that has its epilogue (residual) also vectorized. 935 /// The idea is to run the vplan on a given loop twice, firstly to setup the 936 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 937 /// from the first step and vectorize the epilogue. This is achieved by 938 /// deriving two concrete strategy classes from this base class and invoking 939 /// them in succession from the loop vectorizer planner. 940 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 941 public: 942 InnerLoopAndEpilogueVectorizer( 943 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 944 DominatorTree *DT, const TargetLibraryInfo *TLI, 945 const TargetTransformInfo *TTI, AssumptionCache *AC, 946 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 947 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 948 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 949 GeneratedRTChecks &Checks) 950 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 951 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 952 Checks), 953 EPI(EPI) {} 954 955 // Override this function to handle the more complex control flow around the 956 // three loops. 957 BasicBlock *createVectorizedLoopSkeleton() final override { 958 return createEpilogueVectorizedLoopSkeleton(); 959 } 960 961 /// The interface for creating a vectorized skeleton using one of two 962 /// different strategies, each corresponding to one execution of the vplan 963 /// as described above. 964 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 965 966 /// Holds and updates state information required to vectorize the main loop 967 /// and its epilogue in two separate passes. This setup helps us avoid 968 /// regenerating and recomputing runtime safety checks. It also helps us to 969 /// shorten the iteration-count-check path length for the cases where the 970 /// iteration count of the loop is so small that the main vector loop is 971 /// completely skipped. 972 EpilogueLoopVectorizationInfo &EPI; 973 }; 974 975 /// A specialized derived class of inner loop vectorizer that performs 976 /// vectorization of *main* loops in the process of vectorizing loops and their 977 /// epilogues. 978 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 979 public: 980 EpilogueVectorizerMainLoop( 981 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 982 DominatorTree *DT, const TargetLibraryInfo *TLI, 983 const TargetTransformInfo *TTI, AssumptionCache *AC, 984 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 985 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 986 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 987 GeneratedRTChecks &Check) 988 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 989 EPI, LVL, CM, BFI, PSI, Check) {} 990 /// Implements the interface for creating a vectorized skeleton using the 991 /// *main loop* strategy (ie the first pass of vplan execution). 992 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 993 994 protected: 995 /// Emits an iteration count bypass check once for the main loop (when \p 996 /// ForEpilogue is false) and once for the epilogue loop (when \p 997 /// ForEpilogue is true). 998 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 999 bool ForEpilogue); 1000 void printDebugTracesAtStart() override; 1001 void printDebugTracesAtEnd() override; 1002 }; 1003 1004 // A specialized derived class of inner loop vectorizer that performs 1005 // vectorization of *epilogue* loops in the process of vectorizing loops and 1006 // their epilogues. 1007 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 1008 public: 1009 EpilogueVectorizerEpilogueLoop( 1010 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 1011 DominatorTree *DT, const TargetLibraryInfo *TLI, 1012 const TargetTransformInfo *TTI, AssumptionCache *AC, 1013 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 1014 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 1015 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 1016 GeneratedRTChecks &Checks) 1017 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1018 EPI, LVL, CM, BFI, PSI, Checks) {} 1019 /// Implements the interface for creating a vectorized skeleton using the 1020 /// *epilogue loop* strategy (ie the second pass of vplan execution). 1021 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1022 1023 protected: 1024 /// Emits an iteration count bypass check after the main vector loop has 1025 /// finished to see if there are any iterations left to execute by either 1026 /// the vector epilogue or the scalar epilogue. 1027 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1028 BasicBlock *Bypass, 1029 BasicBlock *Insert); 1030 void printDebugTracesAtStart() override; 1031 void printDebugTracesAtEnd() override; 1032 }; 1033 } // end namespace llvm 1034 1035 /// Look for a meaningful debug location on the instruction or it's 1036 /// operands. 1037 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1038 if (!I) 1039 return I; 1040 1041 DebugLoc Empty; 1042 if (I->getDebugLoc() != Empty) 1043 return I; 1044 1045 for (Use &Op : I->operands()) { 1046 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1047 if (OpInst->getDebugLoc() != Empty) 1048 return OpInst; 1049 } 1050 1051 return I; 1052 } 1053 1054 void InnerLoopVectorizer::setDebugLocFromInst( 1055 const Value *V, Optional<IRBuilder<> *> CustomBuilder) { 1056 IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 1057 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 1058 const DILocation *DIL = Inst->getDebugLoc(); 1059 1060 // When a FSDiscriminator is enabled, we don't need to add the multiply 1061 // factors to the discriminators. 1062 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1063 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 1064 // FIXME: For scalable vectors, assume vscale=1. 1065 auto NewDIL = 1066 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1067 if (NewDIL) 1068 B->SetCurrentDebugLocation(NewDIL.getValue()); 1069 else 1070 LLVM_DEBUG(dbgs() 1071 << "Failed to create new discriminator: " 1072 << DIL->getFilename() << " Line: " << DIL->getLine()); 1073 } else 1074 B->SetCurrentDebugLocation(DIL); 1075 } else 1076 B->SetCurrentDebugLocation(DebugLoc()); 1077 } 1078 1079 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 1080 /// is passed, the message relates to that particular instruction. 1081 #ifndef NDEBUG 1082 static void debugVectorizationMessage(const StringRef Prefix, 1083 const StringRef DebugMsg, 1084 Instruction *I) { 1085 dbgs() << "LV: " << Prefix << DebugMsg; 1086 if (I != nullptr) 1087 dbgs() << " " << *I; 1088 else 1089 dbgs() << '.'; 1090 dbgs() << '\n'; 1091 } 1092 #endif 1093 1094 /// Create an analysis remark that explains why vectorization failed 1095 /// 1096 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1097 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1098 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1099 /// the location of the remark. \return the remark object that can be 1100 /// streamed to. 1101 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1102 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1103 Value *CodeRegion = TheLoop->getHeader(); 1104 DebugLoc DL = TheLoop->getStartLoc(); 1105 1106 if (I) { 1107 CodeRegion = I->getParent(); 1108 // If there is no debug location attached to the instruction, revert back to 1109 // using the loop's. 1110 if (I->getDebugLoc()) 1111 DL = I->getDebugLoc(); 1112 } 1113 1114 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1115 } 1116 1117 /// Return a value for Step multiplied by VF. 1118 static Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF, 1119 int64_t Step) { 1120 assert(Ty->isIntegerTy() && "Expected an integer step"); 1121 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 1122 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1123 } 1124 1125 namespace llvm { 1126 1127 /// Return the runtime value for VF. 1128 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) { 1129 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1130 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1131 } 1132 1133 static Value *getRuntimeVFAsFloat(IRBuilder<> &B, Type *FTy, ElementCount VF) { 1134 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 1135 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 1136 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 1137 return B.CreateUIToFP(RuntimeVF, FTy); 1138 } 1139 1140 void reportVectorizationFailure(const StringRef DebugMsg, 1141 const StringRef OREMsg, const StringRef ORETag, 1142 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1143 Instruction *I) { 1144 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1145 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1146 ORE->emit( 1147 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1148 << "loop not vectorized: " << OREMsg); 1149 } 1150 1151 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1152 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1153 Instruction *I) { 1154 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1155 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1156 ORE->emit( 1157 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1158 << Msg); 1159 } 1160 1161 } // end namespace llvm 1162 1163 #ifndef NDEBUG 1164 /// \return string containing a file name and a line # for the given loop. 1165 static std::string getDebugLocString(const Loop *L) { 1166 std::string Result; 1167 if (L) { 1168 raw_string_ostream OS(Result); 1169 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1170 LoopDbgLoc.print(OS); 1171 else 1172 // Just print the module name. 1173 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1174 OS.flush(); 1175 } 1176 return Result; 1177 } 1178 #endif 1179 1180 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1181 const Instruction *Orig) { 1182 // If the loop was versioned with memchecks, add the corresponding no-alias 1183 // metadata. 1184 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1185 LVer->annotateInstWithNoAlias(To, Orig); 1186 } 1187 1188 void InnerLoopVectorizer::collectPoisonGeneratingRecipes( 1189 VPTransformState &State) { 1190 1191 // Collect recipes in the backward slice of `Root` that may generate a poison 1192 // value that is used after vectorization. 1193 SmallPtrSet<VPRecipeBase *, 16> Visited; 1194 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1195 SmallVector<VPRecipeBase *, 16> Worklist; 1196 Worklist.push_back(Root); 1197 1198 // Traverse the backward slice of Root through its use-def chain. 1199 while (!Worklist.empty()) { 1200 VPRecipeBase *CurRec = Worklist.back(); 1201 Worklist.pop_back(); 1202 1203 if (!Visited.insert(CurRec).second) 1204 continue; 1205 1206 // Prune search if we find another recipe generating a widen memory 1207 // instruction. Widen memory instructions involved in address computation 1208 // will lead to gather/scatter instructions, which don't need to be 1209 // handled. 1210 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) || 1211 isa<VPInterleaveRecipe>(CurRec)) 1212 continue; 1213 1214 // This recipe contributes to the address computation of a widen 1215 // load/store. Collect recipe if its underlying instruction has 1216 // poison-generating flags. 1217 Instruction *Instr = CurRec->getUnderlyingInstr(); 1218 if (Instr && cast<Operator>(Instr)->hasPoisonGeneratingFlags()) 1219 State.MayGeneratePoisonRecipes.insert(CurRec); 1220 1221 // Add new definitions to the worklist. 1222 for (VPValue *operand : CurRec->operands()) 1223 if (VPDef *OpDef = operand->getDef()) 1224 Worklist.push_back(cast<VPRecipeBase>(OpDef)); 1225 } 1226 }); 1227 1228 // Traverse all the recipes in the VPlan and collect the poison-generating 1229 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1230 // VPInterleaveRecipe. 1231 auto Iter = depth_first( 1232 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry())); 1233 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1234 for (VPRecipeBase &Recipe : *VPBB) { 1235 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) { 1236 Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr(); 1237 VPDef *AddrDef = WidenRec->getAddr()->getDef(); 1238 if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr && 1239 Legal->blockNeedsPredication(UnderlyingInstr->getParent())) 1240 collectPoisonGeneratingInstrsInBackwardSlice( 1241 cast<VPRecipeBase>(AddrDef)); 1242 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1243 VPDef *AddrDef = InterleaveRec->getAddr()->getDef(); 1244 if (AddrDef) { 1245 // Check if any member of the interleave group needs predication. 1246 const InterleaveGroup<Instruction> *InterGroup = 1247 InterleaveRec->getInterleaveGroup(); 1248 bool NeedPredication = false; 1249 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1250 I < NumMembers; ++I) { 1251 Instruction *Member = InterGroup->getMember(I); 1252 if (Member) 1253 NeedPredication |= 1254 Legal->blockNeedsPredication(Member->getParent()); 1255 } 1256 1257 if (NeedPredication) 1258 collectPoisonGeneratingInstrsInBackwardSlice( 1259 cast<VPRecipeBase>(AddrDef)); 1260 } 1261 } 1262 } 1263 } 1264 } 1265 1266 void InnerLoopVectorizer::addMetadata(Instruction *To, 1267 Instruction *From) { 1268 propagateMetadata(To, From); 1269 addNewMetadata(To, From); 1270 } 1271 1272 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1273 Instruction *From) { 1274 for (Value *V : To) { 1275 if (Instruction *I = dyn_cast<Instruction>(V)) 1276 addMetadata(I, From); 1277 } 1278 } 1279 1280 namespace llvm { 1281 1282 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1283 // lowered. 1284 enum ScalarEpilogueLowering { 1285 1286 // The default: allowing scalar epilogues. 1287 CM_ScalarEpilogueAllowed, 1288 1289 // Vectorization with OptForSize: don't allow epilogues. 1290 CM_ScalarEpilogueNotAllowedOptSize, 1291 1292 // A special case of vectorisation with OptForSize: loops with a very small 1293 // trip count are considered for vectorization under OptForSize, thereby 1294 // making sure the cost of their loop body is dominant, free of runtime 1295 // guards and scalar iteration overheads. 1296 CM_ScalarEpilogueNotAllowedLowTripLoop, 1297 1298 // Loop hint predicate indicating an epilogue is undesired. 1299 CM_ScalarEpilogueNotNeededUsePredicate, 1300 1301 // Directive indicating we must either tail fold or not vectorize 1302 CM_ScalarEpilogueNotAllowedUsePredicate 1303 }; 1304 1305 /// ElementCountComparator creates a total ordering for ElementCount 1306 /// for the purposes of using it in a set structure. 1307 struct ElementCountComparator { 1308 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1309 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1310 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1311 } 1312 }; 1313 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1314 1315 /// LoopVectorizationCostModel - estimates the expected speedups due to 1316 /// vectorization. 1317 /// In many cases vectorization is not profitable. This can happen because of 1318 /// a number of reasons. In this class we mainly attempt to predict the 1319 /// expected speedup/slowdowns due to the supported instruction set. We use the 1320 /// TargetTransformInfo to query the different backends for the cost of 1321 /// different operations. 1322 class LoopVectorizationCostModel { 1323 public: 1324 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1325 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1326 LoopVectorizationLegality *Legal, 1327 const TargetTransformInfo &TTI, 1328 const TargetLibraryInfo *TLI, DemandedBits *DB, 1329 AssumptionCache *AC, 1330 OptimizationRemarkEmitter *ORE, const Function *F, 1331 const LoopVectorizeHints *Hints, 1332 InterleavedAccessInfo &IAI) 1333 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1334 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1335 Hints(Hints), InterleaveInfo(IAI) {} 1336 1337 /// \return An upper bound for the vectorization factors (both fixed and 1338 /// scalable). If the factors are 0, vectorization and interleaving should be 1339 /// avoided up front. 1340 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1341 1342 /// \return True if runtime checks are required for vectorization, and false 1343 /// otherwise. 1344 bool runtimeChecksRequired(); 1345 1346 /// \return The most profitable vectorization factor and the cost of that VF. 1347 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1348 /// then this vectorization factor will be selected if vectorization is 1349 /// possible. 1350 VectorizationFactor 1351 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1352 1353 VectorizationFactor 1354 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1355 const LoopVectorizationPlanner &LVP); 1356 1357 /// Setup cost-based decisions for user vectorization factor. 1358 /// \return true if the UserVF is a feasible VF to be chosen. 1359 bool selectUserVectorizationFactor(ElementCount UserVF) { 1360 collectUniformsAndScalars(UserVF); 1361 collectInstsToScalarize(UserVF); 1362 return expectedCost(UserVF).first.isValid(); 1363 } 1364 1365 /// \return The size (in bits) of the smallest and widest types in the code 1366 /// that needs to be vectorized. We ignore values that remain scalar such as 1367 /// 64 bit loop indices. 1368 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1369 1370 /// \return The desired interleave count. 1371 /// If interleave count has been specified by metadata it will be returned. 1372 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1373 /// are the selected vectorization factor and the cost of the selected VF. 1374 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1375 1376 /// Memory access instruction may be vectorized in more than one way. 1377 /// Form of instruction after vectorization depends on cost. 1378 /// This function takes cost-based decisions for Load/Store instructions 1379 /// and collects them in a map. This decisions map is used for building 1380 /// the lists of loop-uniform and loop-scalar instructions. 1381 /// The calculated cost is saved with widening decision in order to 1382 /// avoid redundant calculations. 1383 void setCostBasedWideningDecision(ElementCount VF); 1384 1385 /// A struct that represents some properties of the register usage 1386 /// of a loop. 1387 struct RegisterUsage { 1388 /// Holds the number of loop invariant values that are used in the loop. 1389 /// The key is ClassID of target-provided register class. 1390 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1391 /// Holds the maximum number of concurrent live intervals in the loop. 1392 /// The key is ClassID of target-provided register class. 1393 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1394 }; 1395 1396 /// \return Returns information about the register usages of the loop for the 1397 /// given vectorization factors. 1398 SmallVector<RegisterUsage, 8> 1399 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1400 1401 /// Collect values we want to ignore in the cost model. 1402 void collectValuesToIgnore(); 1403 1404 /// Collect all element types in the loop for which widening is needed. 1405 void collectElementTypesForWidening(); 1406 1407 /// Split reductions into those that happen in the loop, and those that happen 1408 /// outside. In loop reductions are collected into InLoopReductionChains. 1409 void collectInLoopReductions(); 1410 1411 /// Returns true if we should use strict in-order reductions for the given 1412 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1413 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1414 /// of FP operations. 1415 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1416 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1417 } 1418 1419 /// \returns The smallest bitwidth each instruction can be represented with. 1420 /// The vector equivalents of these instructions should be truncated to this 1421 /// type. 1422 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1423 return MinBWs; 1424 } 1425 1426 /// \returns True if it is more profitable to scalarize instruction \p I for 1427 /// vectorization factor \p VF. 1428 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1429 assert(VF.isVector() && 1430 "Profitable to scalarize relevant only for VF > 1."); 1431 1432 // Cost model is not run in the VPlan-native path - return conservative 1433 // result until this changes. 1434 if (EnableVPlanNativePath) 1435 return false; 1436 1437 auto Scalars = InstsToScalarize.find(VF); 1438 assert(Scalars != InstsToScalarize.end() && 1439 "VF not yet analyzed for scalarization profitability"); 1440 return Scalars->second.find(I) != Scalars->second.end(); 1441 } 1442 1443 /// Returns true if \p I is known to be uniform after vectorization. 1444 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1445 if (VF.isScalar()) 1446 return true; 1447 1448 // Cost model is not run in the VPlan-native path - return conservative 1449 // result until this changes. 1450 if (EnableVPlanNativePath) 1451 return false; 1452 1453 auto UniformsPerVF = Uniforms.find(VF); 1454 assert(UniformsPerVF != Uniforms.end() && 1455 "VF not yet analyzed for uniformity"); 1456 return UniformsPerVF->second.count(I); 1457 } 1458 1459 /// Returns true if \p I is known to be scalar after vectorization. 1460 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1461 if (VF.isScalar()) 1462 return true; 1463 1464 // Cost model is not run in the VPlan-native path - return conservative 1465 // result until this changes. 1466 if (EnableVPlanNativePath) 1467 return false; 1468 1469 auto ScalarsPerVF = Scalars.find(VF); 1470 assert(ScalarsPerVF != Scalars.end() && 1471 "Scalar values are not calculated for VF"); 1472 return ScalarsPerVF->second.count(I); 1473 } 1474 1475 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1476 /// for vectorization factor \p VF. 1477 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1478 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1479 !isProfitableToScalarize(I, VF) && 1480 !isScalarAfterVectorization(I, VF); 1481 } 1482 1483 /// Decision that was taken during cost calculation for memory instruction. 1484 enum InstWidening { 1485 CM_Unknown, 1486 CM_Widen, // For consecutive accesses with stride +1. 1487 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1488 CM_Interleave, 1489 CM_GatherScatter, 1490 CM_Scalarize 1491 }; 1492 1493 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1494 /// instruction \p I and vector width \p VF. 1495 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1496 InstructionCost Cost) { 1497 assert(VF.isVector() && "Expected VF >=2"); 1498 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1499 } 1500 1501 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1502 /// interleaving group \p Grp and vector width \p VF. 1503 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1504 ElementCount VF, InstWidening W, 1505 InstructionCost Cost) { 1506 assert(VF.isVector() && "Expected VF >=2"); 1507 /// Broadcast this decicion to all instructions inside the group. 1508 /// But the cost will be assigned to one instruction only. 1509 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1510 if (auto *I = Grp->getMember(i)) { 1511 if (Grp->getInsertPos() == I) 1512 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1513 else 1514 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1515 } 1516 } 1517 } 1518 1519 /// Return the cost model decision for the given instruction \p I and vector 1520 /// width \p VF. Return CM_Unknown if this instruction did not pass 1521 /// through the cost modeling. 1522 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1523 assert(VF.isVector() && "Expected VF to be a vector VF"); 1524 // Cost model is not run in the VPlan-native path - return conservative 1525 // result until this changes. 1526 if (EnableVPlanNativePath) 1527 return CM_GatherScatter; 1528 1529 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1530 auto Itr = WideningDecisions.find(InstOnVF); 1531 if (Itr == WideningDecisions.end()) 1532 return CM_Unknown; 1533 return Itr->second.first; 1534 } 1535 1536 /// Return the vectorization cost for the given instruction \p I and vector 1537 /// width \p VF. 1538 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1539 assert(VF.isVector() && "Expected VF >=2"); 1540 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1541 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1542 "The cost is not calculated"); 1543 return WideningDecisions[InstOnVF].second; 1544 } 1545 1546 /// Return True if instruction \p I is an optimizable truncate whose operand 1547 /// is an induction variable. Such a truncate will be removed by adding a new 1548 /// induction variable with the destination type. 1549 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1550 // If the instruction is not a truncate, return false. 1551 auto *Trunc = dyn_cast<TruncInst>(I); 1552 if (!Trunc) 1553 return false; 1554 1555 // Get the source and destination types of the truncate. 1556 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1557 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1558 1559 // If the truncate is free for the given types, return false. Replacing a 1560 // free truncate with an induction variable would add an induction variable 1561 // update instruction to each iteration of the loop. We exclude from this 1562 // check the primary induction variable since it will need an update 1563 // instruction regardless. 1564 Value *Op = Trunc->getOperand(0); 1565 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1566 return false; 1567 1568 // If the truncated value is not an induction variable, return false. 1569 return Legal->isInductionPhi(Op); 1570 } 1571 1572 /// Collects the instructions to scalarize for each predicated instruction in 1573 /// the loop. 1574 void collectInstsToScalarize(ElementCount VF); 1575 1576 /// Collect Uniform and Scalar values for the given \p VF. 1577 /// The sets depend on CM decision for Load/Store instructions 1578 /// that may be vectorized as interleave, gather-scatter or scalarized. 1579 void collectUniformsAndScalars(ElementCount VF) { 1580 // Do the analysis once. 1581 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1582 return; 1583 setCostBasedWideningDecision(VF); 1584 collectLoopUniforms(VF); 1585 collectLoopScalars(VF); 1586 } 1587 1588 /// Returns true if the target machine supports masked store operation 1589 /// for the given \p DataType and kind of access to \p Ptr. 1590 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1591 return Legal->isConsecutivePtr(DataType, Ptr) && 1592 TTI.isLegalMaskedStore(DataType, Alignment); 1593 } 1594 1595 /// Returns true if the target machine supports masked load operation 1596 /// for the given \p DataType and kind of access to \p Ptr. 1597 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1598 return Legal->isConsecutivePtr(DataType, Ptr) && 1599 TTI.isLegalMaskedLoad(DataType, Alignment); 1600 } 1601 1602 /// Returns true if the target machine can represent \p V as a masked gather 1603 /// or scatter operation. 1604 bool isLegalGatherOrScatter(Value *V) { 1605 bool LI = isa<LoadInst>(V); 1606 bool SI = isa<StoreInst>(V); 1607 if (!LI && !SI) 1608 return false; 1609 auto *Ty = getLoadStoreType(V); 1610 Align Align = getLoadStoreAlignment(V); 1611 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1612 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1613 } 1614 1615 /// Returns true if the target machine supports all of the reduction 1616 /// variables found for the given VF. 1617 bool canVectorizeReductions(ElementCount VF) const { 1618 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1619 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1620 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1621 })); 1622 } 1623 1624 /// Returns true if \p I is an instruction that will be scalarized with 1625 /// predication. Such instructions include conditional stores and 1626 /// instructions that may divide by zero. 1627 /// If a non-zero VF has been calculated, we check if I will be scalarized 1628 /// predication for that VF. 1629 bool isScalarWithPredication(Instruction *I) const; 1630 1631 // Returns true if \p I is an instruction that will be predicated either 1632 // through scalar predication or masked load/store or masked gather/scatter. 1633 // Superset of instructions that return true for isScalarWithPredication. 1634 bool isPredicatedInst(Instruction *I) { 1635 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1636 return false; 1637 // Loads and stores that need some form of masked operation are predicated 1638 // instructions. 1639 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1640 return Legal->isMaskRequired(I); 1641 return isScalarWithPredication(I); 1642 } 1643 1644 /// Returns true if \p I is a memory instruction with consecutive memory 1645 /// access that can be widened. 1646 bool 1647 memoryInstructionCanBeWidened(Instruction *I, 1648 ElementCount VF = ElementCount::getFixed(1)); 1649 1650 /// Returns true if \p I is a memory instruction in an interleaved-group 1651 /// of memory accesses that can be vectorized with wide vector loads/stores 1652 /// and shuffles. 1653 bool 1654 interleavedAccessCanBeWidened(Instruction *I, 1655 ElementCount VF = ElementCount::getFixed(1)); 1656 1657 /// Check if \p Instr belongs to any interleaved access group. 1658 bool isAccessInterleaved(Instruction *Instr) { 1659 return InterleaveInfo.isInterleaved(Instr); 1660 } 1661 1662 /// Get the interleaved access group that \p Instr belongs to. 1663 const InterleaveGroup<Instruction> * 1664 getInterleavedAccessGroup(Instruction *Instr) { 1665 return InterleaveInfo.getInterleaveGroup(Instr); 1666 } 1667 1668 /// Returns true if we're required to use a scalar epilogue for at least 1669 /// the final iteration of the original loop. 1670 bool requiresScalarEpilogue(ElementCount VF) const { 1671 if (!isScalarEpilogueAllowed()) 1672 return false; 1673 // If we might exit from anywhere but the latch, must run the exiting 1674 // iteration in scalar form. 1675 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1676 return true; 1677 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1678 } 1679 1680 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1681 /// loop hint annotation. 1682 bool isScalarEpilogueAllowed() const { 1683 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1684 } 1685 1686 /// Returns true if all loop blocks should be masked to fold tail loop. 1687 bool foldTailByMasking() const { return FoldTailByMasking; } 1688 1689 /// Returns true if the instructions in this block requires predication 1690 /// for any reason, e.g. because tail folding now requires a predicate 1691 /// or because the block in the original loop was predicated. 1692 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1693 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1694 } 1695 1696 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1697 /// nodes to the chain of instructions representing the reductions. Uses a 1698 /// MapVector to ensure deterministic iteration order. 1699 using ReductionChainMap = 1700 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1701 1702 /// Return the chain of instructions representing an inloop reduction. 1703 const ReductionChainMap &getInLoopReductionChains() const { 1704 return InLoopReductionChains; 1705 } 1706 1707 /// Returns true if the Phi is part of an inloop reduction. 1708 bool isInLoopReduction(PHINode *Phi) const { 1709 return InLoopReductionChains.count(Phi); 1710 } 1711 1712 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1713 /// with factor VF. Return the cost of the instruction, including 1714 /// scalarization overhead if it's needed. 1715 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1716 1717 /// Estimate cost of a call instruction CI if it were vectorized with factor 1718 /// VF. Return the cost of the instruction, including scalarization overhead 1719 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1720 /// scalarized - 1721 /// i.e. either vector version isn't available, or is too expensive. 1722 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1723 bool &NeedToScalarize) const; 1724 1725 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1726 /// that of B. 1727 bool isMoreProfitable(const VectorizationFactor &A, 1728 const VectorizationFactor &B) const; 1729 1730 /// Invalidates decisions already taken by the cost model. 1731 void invalidateCostModelingDecisions() { 1732 WideningDecisions.clear(); 1733 Uniforms.clear(); 1734 Scalars.clear(); 1735 } 1736 1737 private: 1738 unsigned NumPredStores = 0; 1739 1740 /// \return An upper bound for the vectorization factors for both 1741 /// fixed and scalable vectorization, where the minimum-known number of 1742 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1743 /// disabled or unsupported, then the scalable part will be equal to 1744 /// ElementCount::getScalable(0). 1745 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1746 ElementCount UserVF); 1747 1748 /// \return the maximized element count based on the targets vector 1749 /// registers and the loop trip-count, but limited to a maximum safe VF. 1750 /// This is a helper function of computeFeasibleMaxVF. 1751 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1752 /// issue that occurred on one of the buildbots which cannot be reproduced 1753 /// without having access to the properietary compiler (see comments on 1754 /// D98509). The issue is currently under investigation and this workaround 1755 /// will be removed as soon as possible. 1756 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1757 unsigned SmallestType, 1758 unsigned WidestType, 1759 const ElementCount &MaxSafeVF); 1760 1761 /// \return the maximum legal scalable VF, based on the safe max number 1762 /// of elements. 1763 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1764 1765 /// The vectorization cost is a combination of the cost itself and a boolean 1766 /// indicating whether any of the contributing operations will actually 1767 /// operate on vector values after type legalization in the backend. If this 1768 /// latter value is false, then all operations will be scalarized (i.e. no 1769 /// vectorization has actually taken place). 1770 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1771 1772 /// Returns the expected execution cost. The unit of the cost does 1773 /// not matter because we use the 'cost' units to compare different 1774 /// vector widths. The cost that is returned is *not* normalized by 1775 /// the factor width. If \p Invalid is not nullptr, this function 1776 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1777 /// each instruction that has an Invalid cost for the given VF. 1778 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1779 VectorizationCostTy 1780 expectedCost(ElementCount VF, 1781 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1782 1783 /// Returns the execution time cost of an instruction for a given vector 1784 /// width. Vector width of one means scalar. 1785 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1786 1787 /// The cost-computation logic from getInstructionCost which provides 1788 /// the vector type as an output parameter. 1789 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1790 Type *&VectorTy); 1791 1792 /// Return the cost of instructions in an inloop reduction pattern, if I is 1793 /// part of that pattern. 1794 Optional<InstructionCost> 1795 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1796 TTI::TargetCostKind CostKind); 1797 1798 /// Calculate vectorization cost of memory instruction \p I. 1799 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1800 1801 /// The cost computation for scalarized memory instruction. 1802 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1803 1804 /// The cost computation for interleaving group of memory instructions. 1805 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1806 1807 /// The cost computation for Gather/Scatter instruction. 1808 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1809 1810 /// The cost computation for widening instruction \p I with consecutive 1811 /// memory access. 1812 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1813 1814 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1815 /// Load: scalar load + broadcast. 1816 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1817 /// element) 1818 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1819 1820 /// Estimate the overhead of scalarizing an instruction. This is a 1821 /// convenience wrapper for the type-based getScalarizationOverhead API. 1822 InstructionCost getScalarizationOverhead(Instruction *I, 1823 ElementCount VF) const; 1824 1825 /// Returns whether the instruction is a load or store and will be a emitted 1826 /// as a vector operation. 1827 bool isConsecutiveLoadOrStore(Instruction *I); 1828 1829 /// Returns true if an artificially high cost for emulated masked memrefs 1830 /// should be used. 1831 bool useEmulatedMaskMemRefHack(Instruction *I); 1832 1833 /// Map of scalar integer values to the smallest bitwidth they can be legally 1834 /// represented as. The vector equivalents of these values should be truncated 1835 /// to this type. 1836 MapVector<Instruction *, uint64_t> MinBWs; 1837 1838 /// A type representing the costs for instructions if they were to be 1839 /// scalarized rather than vectorized. The entries are Instruction-Cost 1840 /// pairs. 1841 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1842 1843 /// A set containing all BasicBlocks that are known to present after 1844 /// vectorization as a predicated block. 1845 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1846 1847 /// Records whether it is allowed to have the original scalar loop execute at 1848 /// least once. This may be needed as a fallback loop in case runtime 1849 /// aliasing/dependence checks fail, or to handle the tail/remainder 1850 /// iterations when the trip count is unknown or doesn't divide by the VF, 1851 /// or as a peel-loop to handle gaps in interleave-groups. 1852 /// Under optsize and when the trip count is very small we don't allow any 1853 /// iterations to execute in the scalar loop. 1854 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1855 1856 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1857 bool FoldTailByMasking = false; 1858 1859 /// A map holding scalar costs for different vectorization factors. The 1860 /// presence of a cost for an instruction in the mapping indicates that the 1861 /// instruction will be scalarized when vectorizing with the associated 1862 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1863 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1864 1865 /// Holds the instructions known to be uniform after vectorization. 1866 /// The data is collected per VF. 1867 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1868 1869 /// Holds the instructions known to be scalar after vectorization. 1870 /// The data is collected per VF. 1871 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1872 1873 /// Holds the instructions (address computations) that are forced to be 1874 /// scalarized. 1875 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1876 1877 /// PHINodes of the reductions that should be expanded in-loop along with 1878 /// their associated chains of reduction operations, in program order from top 1879 /// (PHI) to bottom 1880 ReductionChainMap InLoopReductionChains; 1881 1882 /// A Map of inloop reduction operations and their immediate chain operand. 1883 /// FIXME: This can be removed once reductions can be costed correctly in 1884 /// vplan. This was added to allow quick lookup to the inloop operations, 1885 /// without having to loop through InLoopReductionChains. 1886 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1887 1888 /// Returns the expected difference in cost from scalarizing the expression 1889 /// feeding a predicated instruction \p PredInst. The instructions to 1890 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1891 /// non-negative return value implies the expression will be scalarized. 1892 /// Currently, only single-use chains are considered for scalarization. 1893 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1894 ElementCount VF); 1895 1896 /// Collect the instructions that are uniform after vectorization. An 1897 /// instruction is uniform if we represent it with a single scalar value in 1898 /// the vectorized loop corresponding to each vector iteration. Examples of 1899 /// uniform instructions include pointer operands of consecutive or 1900 /// interleaved memory accesses. Note that although uniformity implies an 1901 /// instruction will be scalar, the reverse is not true. In general, a 1902 /// scalarized instruction will be represented by VF scalar values in the 1903 /// vectorized loop, each corresponding to an iteration of the original 1904 /// scalar loop. 1905 void collectLoopUniforms(ElementCount VF); 1906 1907 /// Collect the instructions that are scalar after vectorization. An 1908 /// instruction is scalar if it is known to be uniform or will be scalarized 1909 /// during vectorization. Non-uniform scalarized instructions will be 1910 /// represented by VF values in the vectorized loop, each corresponding to an 1911 /// iteration of the original scalar loop. 1912 void collectLoopScalars(ElementCount VF); 1913 1914 /// Keeps cost model vectorization decision and cost for instructions. 1915 /// Right now it is used for memory instructions only. 1916 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1917 std::pair<InstWidening, InstructionCost>>; 1918 1919 DecisionList WideningDecisions; 1920 1921 /// Returns true if \p V is expected to be vectorized and it needs to be 1922 /// extracted. 1923 bool needsExtract(Value *V, ElementCount VF) const { 1924 Instruction *I = dyn_cast<Instruction>(V); 1925 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1926 TheLoop->isLoopInvariant(I)) 1927 return false; 1928 1929 // Assume we can vectorize V (and hence we need extraction) if the 1930 // scalars are not computed yet. This can happen, because it is called 1931 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1932 // the scalars are collected. That should be a safe assumption in most 1933 // cases, because we check if the operands have vectorizable types 1934 // beforehand in LoopVectorizationLegality. 1935 return Scalars.find(VF) == Scalars.end() || 1936 !isScalarAfterVectorization(I, VF); 1937 }; 1938 1939 /// Returns a range containing only operands needing to be extracted. 1940 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1941 ElementCount VF) const { 1942 return SmallVector<Value *, 4>(make_filter_range( 1943 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1944 } 1945 1946 /// Determines if we have the infrastructure to vectorize loop \p L and its 1947 /// epilogue, assuming the main loop is vectorized by \p VF. 1948 bool isCandidateForEpilogueVectorization(const Loop &L, 1949 const ElementCount VF) const; 1950 1951 /// Returns true if epilogue vectorization is considered profitable, and 1952 /// false otherwise. 1953 /// \p VF is the vectorization factor chosen for the original loop. 1954 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1955 1956 public: 1957 /// The loop that we evaluate. 1958 Loop *TheLoop; 1959 1960 /// Predicated scalar evolution analysis. 1961 PredicatedScalarEvolution &PSE; 1962 1963 /// Loop Info analysis. 1964 LoopInfo *LI; 1965 1966 /// Vectorization legality. 1967 LoopVectorizationLegality *Legal; 1968 1969 /// Vector target information. 1970 const TargetTransformInfo &TTI; 1971 1972 /// Target Library Info. 1973 const TargetLibraryInfo *TLI; 1974 1975 /// Demanded bits analysis. 1976 DemandedBits *DB; 1977 1978 /// Assumption cache. 1979 AssumptionCache *AC; 1980 1981 /// Interface to emit optimization remarks. 1982 OptimizationRemarkEmitter *ORE; 1983 1984 const Function *TheFunction; 1985 1986 /// Loop Vectorize Hint. 1987 const LoopVectorizeHints *Hints; 1988 1989 /// The interleave access information contains groups of interleaved accesses 1990 /// with the same stride and close to each other. 1991 InterleavedAccessInfo &InterleaveInfo; 1992 1993 /// Values to ignore in the cost model. 1994 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1995 1996 /// Values to ignore in the cost model when VF > 1. 1997 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1998 1999 /// All element types found in the loop. 2000 SmallPtrSet<Type *, 16> ElementTypesInLoop; 2001 2002 /// Profitable vector factors. 2003 SmallVector<VectorizationFactor, 8> ProfitableVFs; 2004 }; 2005 } // end namespace llvm 2006 2007 /// Helper struct to manage generating runtime checks for vectorization. 2008 /// 2009 /// The runtime checks are created up-front in temporary blocks to allow better 2010 /// estimating the cost and un-linked from the existing IR. After deciding to 2011 /// vectorize, the checks are moved back. If deciding not to vectorize, the 2012 /// temporary blocks are completely removed. 2013 class GeneratedRTChecks { 2014 /// Basic block which contains the generated SCEV checks, if any. 2015 BasicBlock *SCEVCheckBlock = nullptr; 2016 2017 /// The value representing the result of the generated SCEV checks. If it is 2018 /// nullptr, either no SCEV checks have been generated or they have been used. 2019 Value *SCEVCheckCond = nullptr; 2020 2021 /// Basic block which contains the generated memory runtime checks, if any. 2022 BasicBlock *MemCheckBlock = nullptr; 2023 2024 /// The value representing the result of the generated memory runtime checks. 2025 /// If it is nullptr, either no memory runtime checks have been generated or 2026 /// they have been used. 2027 Value *MemRuntimeCheckCond = nullptr; 2028 2029 DominatorTree *DT; 2030 LoopInfo *LI; 2031 2032 SCEVExpander SCEVExp; 2033 SCEVExpander MemCheckExp; 2034 2035 public: 2036 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 2037 const DataLayout &DL) 2038 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 2039 MemCheckExp(SE, DL, "scev.check") {} 2040 2041 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 2042 /// accurately estimate the cost of the runtime checks. The blocks are 2043 /// un-linked from the IR and is added back during vector code generation. If 2044 /// there is no vector code generation, the check blocks are removed 2045 /// completely. 2046 void Create(Loop *L, const LoopAccessInfo &LAI, 2047 const SCEVUnionPredicate &UnionPred) { 2048 2049 BasicBlock *LoopHeader = L->getHeader(); 2050 BasicBlock *Preheader = L->getLoopPreheader(); 2051 2052 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 2053 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 2054 // may be used by SCEVExpander. The blocks will be un-linked from their 2055 // predecessors and removed from LI & DT at the end of the function. 2056 if (!UnionPred.isAlwaysTrue()) { 2057 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 2058 nullptr, "vector.scevcheck"); 2059 2060 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 2061 &UnionPred, SCEVCheckBlock->getTerminator()); 2062 } 2063 2064 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 2065 if (RtPtrChecking.Need) { 2066 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 2067 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 2068 "vector.memcheck"); 2069 2070 MemRuntimeCheckCond = 2071 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 2072 RtPtrChecking.getChecks(), MemCheckExp); 2073 assert(MemRuntimeCheckCond && 2074 "no RT checks generated although RtPtrChecking " 2075 "claimed checks are required"); 2076 } 2077 2078 if (!MemCheckBlock && !SCEVCheckBlock) 2079 return; 2080 2081 // Unhook the temporary block with the checks, update various places 2082 // accordingly. 2083 if (SCEVCheckBlock) 2084 SCEVCheckBlock->replaceAllUsesWith(Preheader); 2085 if (MemCheckBlock) 2086 MemCheckBlock->replaceAllUsesWith(Preheader); 2087 2088 if (SCEVCheckBlock) { 2089 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2090 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 2091 Preheader->getTerminator()->eraseFromParent(); 2092 } 2093 if (MemCheckBlock) { 2094 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2095 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2096 Preheader->getTerminator()->eraseFromParent(); 2097 } 2098 2099 DT->changeImmediateDominator(LoopHeader, Preheader); 2100 if (MemCheckBlock) { 2101 DT->eraseNode(MemCheckBlock); 2102 LI->removeBlock(MemCheckBlock); 2103 } 2104 if (SCEVCheckBlock) { 2105 DT->eraseNode(SCEVCheckBlock); 2106 LI->removeBlock(SCEVCheckBlock); 2107 } 2108 } 2109 2110 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2111 /// unused. 2112 ~GeneratedRTChecks() { 2113 SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT); 2114 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT); 2115 if (!SCEVCheckCond) 2116 SCEVCleaner.markResultUsed(); 2117 2118 if (!MemRuntimeCheckCond) 2119 MemCheckCleaner.markResultUsed(); 2120 2121 if (MemRuntimeCheckCond) { 2122 auto &SE = *MemCheckExp.getSE(); 2123 // Memory runtime check generation creates compares that use expanded 2124 // values. Remove them before running the SCEVExpanderCleaners. 2125 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2126 if (MemCheckExp.isInsertedInstruction(&I)) 2127 continue; 2128 SE.forgetValue(&I); 2129 I.eraseFromParent(); 2130 } 2131 } 2132 MemCheckCleaner.cleanup(); 2133 SCEVCleaner.cleanup(); 2134 2135 if (SCEVCheckCond) 2136 SCEVCheckBlock->eraseFromParent(); 2137 if (MemRuntimeCheckCond) 2138 MemCheckBlock->eraseFromParent(); 2139 } 2140 2141 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2142 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2143 /// depending on the generated condition. 2144 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, 2145 BasicBlock *LoopVectorPreHeader, 2146 BasicBlock *LoopExitBlock) { 2147 if (!SCEVCheckCond) 2148 return nullptr; 2149 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2150 if (C->isZero()) 2151 return nullptr; 2152 2153 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2154 2155 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2156 // Create new preheader for vector loop. 2157 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2158 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2159 2160 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2161 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2162 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2163 SCEVCheckBlock); 2164 2165 DT->addNewBlock(SCEVCheckBlock, Pred); 2166 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2167 2168 ReplaceInstWithInst( 2169 SCEVCheckBlock->getTerminator(), 2170 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2171 // Mark the check as used, to prevent it from being removed during cleanup. 2172 SCEVCheckCond = nullptr; 2173 return SCEVCheckBlock; 2174 } 2175 2176 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2177 /// the branches to branch to the vector preheader or \p Bypass, depending on 2178 /// the generated condition. 2179 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, 2180 BasicBlock *LoopVectorPreHeader) { 2181 // Check if we generated code that checks in runtime if arrays overlap. 2182 if (!MemRuntimeCheckCond) 2183 return nullptr; 2184 2185 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2186 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2187 MemCheckBlock); 2188 2189 DT->addNewBlock(MemCheckBlock, Pred); 2190 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2191 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2192 2193 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2194 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2195 2196 ReplaceInstWithInst( 2197 MemCheckBlock->getTerminator(), 2198 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2199 MemCheckBlock->getTerminator()->setDebugLoc( 2200 Pred->getTerminator()->getDebugLoc()); 2201 2202 // Mark the check as used, to prevent it from being removed during cleanup. 2203 MemRuntimeCheckCond = nullptr; 2204 return MemCheckBlock; 2205 } 2206 }; 2207 2208 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2209 // vectorization. The loop needs to be annotated with #pragma omp simd 2210 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2211 // vector length information is not provided, vectorization is not considered 2212 // explicit. Interleave hints are not allowed either. These limitations will be 2213 // relaxed in the future. 2214 // Please, note that we are currently forced to abuse the pragma 'clang 2215 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2216 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2217 // provides *explicit vectorization hints* (LV can bypass legal checks and 2218 // assume that vectorization is legal). However, both hints are implemented 2219 // using the same metadata (llvm.loop.vectorize, processed by 2220 // LoopVectorizeHints). This will be fixed in the future when the native IR 2221 // representation for pragma 'omp simd' is introduced. 2222 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2223 OptimizationRemarkEmitter *ORE) { 2224 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2225 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2226 2227 // Only outer loops with an explicit vectorization hint are supported. 2228 // Unannotated outer loops are ignored. 2229 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2230 return false; 2231 2232 Function *Fn = OuterLp->getHeader()->getParent(); 2233 if (!Hints.allowVectorization(Fn, OuterLp, 2234 true /*VectorizeOnlyWhenForced*/)) { 2235 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2236 return false; 2237 } 2238 2239 if (Hints.getInterleave() > 1) { 2240 // TODO: Interleave support is future work. 2241 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2242 "outer loops.\n"); 2243 Hints.emitRemarkWithHints(); 2244 return false; 2245 } 2246 2247 return true; 2248 } 2249 2250 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2251 OptimizationRemarkEmitter *ORE, 2252 SmallVectorImpl<Loop *> &V) { 2253 // Collect inner loops and outer loops without irreducible control flow. For 2254 // now, only collect outer loops that have explicit vectorization hints. If we 2255 // are stress testing the VPlan H-CFG construction, we collect the outermost 2256 // loop of every loop nest. 2257 if (L.isInnermost() || VPlanBuildStressTest || 2258 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2259 LoopBlocksRPO RPOT(&L); 2260 RPOT.perform(LI); 2261 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2262 V.push_back(&L); 2263 // TODO: Collect inner loops inside marked outer loops in case 2264 // vectorization fails for the outer loop. Do not invoke 2265 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2266 // already known to be reducible. We can use an inherited attribute for 2267 // that. 2268 return; 2269 } 2270 } 2271 for (Loop *InnerL : L) 2272 collectSupportedLoops(*InnerL, LI, ORE, V); 2273 } 2274 2275 namespace { 2276 2277 /// The LoopVectorize Pass. 2278 struct LoopVectorize : public FunctionPass { 2279 /// Pass identification, replacement for typeid 2280 static char ID; 2281 2282 LoopVectorizePass Impl; 2283 2284 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2285 bool VectorizeOnlyWhenForced = false) 2286 : FunctionPass(ID), 2287 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2288 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2289 } 2290 2291 bool runOnFunction(Function &F) override { 2292 if (skipFunction(F)) 2293 return false; 2294 2295 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2296 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2297 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2298 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2299 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2300 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2301 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2302 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2303 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2304 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2305 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2306 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2307 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2308 2309 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2310 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2311 2312 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2313 GetLAA, *ORE, PSI).MadeAnyChange; 2314 } 2315 2316 void getAnalysisUsage(AnalysisUsage &AU) const override { 2317 AU.addRequired<AssumptionCacheTracker>(); 2318 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2319 AU.addRequired<DominatorTreeWrapperPass>(); 2320 AU.addRequired<LoopInfoWrapperPass>(); 2321 AU.addRequired<ScalarEvolutionWrapperPass>(); 2322 AU.addRequired<TargetTransformInfoWrapperPass>(); 2323 AU.addRequired<AAResultsWrapperPass>(); 2324 AU.addRequired<LoopAccessLegacyAnalysis>(); 2325 AU.addRequired<DemandedBitsWrapperPass>(); 2326 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2327 AU.addRequired<InjectTLIMappingsLegacy>(); 2328 2329 // We currently do not preserve loopinfo/dominator analyses with outer loop 2330 // vectorization. Until this is addressed, mark these analyses as preserved 2331 // only for non-VPlan-native path. 2332 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2333 if (!EnableVPlanNativePath) { 2334 AU.addPreserved<LoopInfoWrapperPass>(); 2335 AU.addPreserved<DominatorTreeWrapperPass>(); 2336 } 2337 2338 AU.addPreserved<BasicAAWrapperPass>(); 2339 AU.addPreserved<GlobalsAAWrapperPass>(); 2340 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2341 } 2342 }; 2343 2344 } // end anonymous namespace 2345 2346 //===----------------------------------------------------------------------===// 2347 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2348 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2349 //===----------------------------------------------------------------------===// 2350 2351 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2352 // We need to place the broadcast of invariant variables outside the loop, 2353 // but only if it's proven safe to do so. Else, broadcast will be inside 2354 // vector loop body. 2355 Instruction *Instr = dyn_cast<Instruction>(V); 2356 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2357 (!Instr || 2358 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2359 // Place the code for broadcasting invariant variables in the new preheader. 2360 IRBuilder<>::InsertPointGuard Guard(Builder); 2361 if (SafeToHoist) 2362 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2363 2364 // Broadcast the scalar into all locations in the vector. 2365 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2366 2367 return Shuf; 2368 } 2369 2370 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2371 const InductionDescriptor &II, Value *Step, Value *Start, 2372 Instruction *EntryVal, VPValue *Def, VPValue *CastDef, 2373 VPTransformState &State) { 2374 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2375 "Expected either an induction phi-node or a truncate of it!"); 2376 2377 // Construct the initial value of the vector IV in the vector loop preheader 2378 auto CurrIP = Builder.saveIP(); 2379 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2380 if (isa<TruncInst>(EntryVal)) { 2381 assert(Start->getType()->isIntegerTy() && 2382 "Truncation requires an integer type"); 2383 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2384 Step = Builder.CreateTrunc(Step, TruncType); 2385 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2386 } 2387 2388 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 2389 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2390 Value *SteppedStart = 2391 getStepVector(SplatStart, Zero, Step, II.getInductionOpcode()); 2392 2393 // We create vector phi nodes for both integer and floating-point induction 2394 // variables. Here, we determine the kind of arithmetic we will perform. 2395 Instruction::BinaryOps AddOp; 2396 Instruction::BinaryOps MulOp; 2397 if (Step->getType()->isIntegerTy()) { 2398 AddOp = Instruction::Add; 2399 MulOp = Instruction::Mul; 2400 } else { 2401 AddOp = II.getInductionOpcode(); 2402 MulOp = Instruction::FMul; 2403 } 2404 2405 // Multiply the vectorization factor by the step using integer or 2406 // floating-point arithmetic as appropriate. 2407 Type *StepType = Step->getType(); 2408 Value *RuntimeVF; 2409 if (Step->getType()->isFloatingPointTy()) 2410 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, VF); 2411 else 2412 RuntimeVF = getRuntimeVF(Builder, StepType, VF); 2413 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 2414 2415 // Create a vector splat to use in the induction update. 2416 // 2417 // FIXME: If the step is non-constant, we create the vector splat with 2418 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2419 // handle a constant vector splat. 2420 Value *SplatVF = isa<Constant>(Mul) 2421 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2422 : Builder.CreateVectorSplat(VF, Mul); 2423 Builder.restoreIP(CurrIP); 2424 2425 // We may need to add the step a number of times, depending on the unroll 2426 // factor. The last of those goes into the PHI. 2427 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2428 &*LoopVectorBody->getFirstInsertionPt()); 2429 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2430 Instruction *LastInduction = VecInd; 2431 for (unsigned Part = 0; Part < UF; ++Part) { 2432 State.set(Def, LastInduction, Part); 2433 2434 if (isa<TruncInst>(EntryVal)) 2435 addMetadata(LastInduction, EntryVal); 2436 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, 2437 State, Part); 2438 2439 LastInduction = cast<Instruction>( 2440 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 2441 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2442 } 2443 2444 // Move the last step to the end of the latch block. This ensures consistent 2445 // placement of all induction updates. 2446 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2447 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2448 auto *ICmp = cast<Instruction>(Br->getCondition()); 2449 LastInduction->moveBefore(ICmp); 2450 LastInduction->setName("vec.ind.next"); 2451 2452 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2453 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2454 } 2455 2456 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2457 return Cost->isScalarAfterVectorization(I, VF) || 2458 Cost->isProfitableToScalarize(I, VF); 2459 } 2460 2461 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2462 if (shouldScalarizeInstruction(IV)) 2463 return true; 2464 auto isScalarInst = [&](User *U) -> bool { 2465 auto *I = cast<Instruction>(U); 2466 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2467 }; 2468 return llvm::any_of(IV->users(), isScalarInst); 2469 } 2470 2471 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2472 const InductionDescriptor &ID, const Instruction *EntryVal, 2473 Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, 2474 unsigned Part, unsigned Lane) { 2475 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2476 "Expected either an induction phi-node or a truncate of it!"); 2477 2478 // This induction variable is not the phi from the original loop but the 2479 // newly-created IV based on the proof that casted Phi is equal to the 2480 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2481 // re-uses the same InductionDescriptor that original IV uses but we don't 2482 // have to do any recording in this case - that is done when original IV is 2483 // processed. 2484 if (isa<TruncInst>(EntryVal)) 2485 return; 2486 2487 if (!CastDef) { 2488 assert(ID.getCastInsts().empty() && 2489 "there are casts for ID, but no CastDef"); 2490 return; 2491 } 2492 assert(!ID.getCastInsts().empty() && 2493 "there is a CastDef, but no casts for ID"); 2494 // Only the first Cast instruction in the Casts vector is of interest. 2495 // The rest of the Casts (if exist) have no uses outside the 2496 // induction update chain itself. 2497 if (Lane < UINT_MAX) 2498 State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); 2499 else 2500 State.set(CastDef, VectorLoopVal, Part); 2501 } 2502 2503 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2504 TruncInst *Trunc, VPValue *Def, 2505 VPValue *CastDef, 2506 VPTransformState &State) { 2507 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2508 "Primary induction variable must have an integer type"); 2509 2510 auto II = Legal->getInductionVars().find(IV); 2511 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2512 2513 auto ID = II->second; 2514 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2515 2516 // The value from the original loop to which we are mapping the new induction 2517 // variable. 2518 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2519 2520 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2521 2522 // Generate code for the induction step. Note that induction steps are 2523 // required to be loop-invariant 2524 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2525 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2526 "Induction step should be loop invariant"); 2527 if (PSE.getSE()->isSCEVable(IV->getType())) { 2528 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2529 return Exp.expandCodeFor(Step, Step->getType(), 2530 LoopVectorPreHeader->getTerminator()); 2531 } 2532 return cast<SCEVUnknown>(Step)->getValue(); 2533 }; 2534 2535 // The scalar value to broadcast. This is derived from the canonical 2536 // induction variable. If a truncation type is given, truncate the canonical 2537 // induction variable and step. Otherwise, derive these values from the 2538 // induction descriptor. 2539 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2540 Value *ScalarIV = Induction; 2541 if (IV != OldInduction) { 2542 ScalarIV = IV->getType()->isIntegerTy() 2543 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2544 : Builder.CreateCast(Instruction::SIToFP, Induction, 2545 IV->getType()); 2546 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2547 ScalarIV->setName("offset.idx"); 2548 } 2549 if (Trunc) { 2550 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2551 assert(Step->getType()->isIntegerTy() && 2552 "Truncation requires an integer step"); 2553 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2554 Step = Builder.CreateTrunc(Step, TruncType); 2555 } 2556 return ScalarIV; 2557 }; 2558 2559 // Create the vector values from the scalar IV, in the absence of creating a 2560 // vector IV. 2561 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2562 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2563 for (unsigned Part = 0; Part < UF; ++Part) { 2564 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2565 Value *StartIdx; 2566 if (Step->getType()->isFloatingPointTy()) 2567 StartIdx = getRuntimeVFAsFloat(Builder, Step->getType(), VF * Part); 2568 else 2569 StartIdx = getRuntimeVF(Builder, Step->getType(), VF * Part); 2570 2571 Value *EntryPart = 2572 getStepVector(Broadcasted, StartIdx, Step, ID.getInductionOpcode()); 2573 State.set(Def, EntryPart, Part); 2574 if (Trunc) 2575 addMetadata(EntryPart, Trunc); 2576 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, 2577 State, Part); 2578 } 2579 }; 2580 2581 // Fast-math-flags propagate from the original induction instruction. 2582 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 2583 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 2584 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 2585 2586 // Now do the actual transformations, and start with creating the step value. 2587 Value *Step = CreateStepValue(ID.getStep()); 2588 if (VF.isZero() || VF.isScalar()) { 2589 Value *ScalarIV = CreateScalarIV(Step); 2590 CreateSplatIV(ScalarIV, Step); 2591 return; 2592 } 2593 2594 // Determine if we want a scalar version of the induction variable. This is 2595 // true if the induction variable itself is not widened, or if it has at 2596 // least one user in the loop that is not widened. 2597 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2598 if (!NeedsScalarIV) { 2599 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2600 State); 2601 return; 2602 } 2603 2604 // Try to create a new independent vector induction variable. If we can't 2605 // create the phi node, we will splat the scalar induction variable in each 2606 // loop iteration. 2607 if (!shouldScalarizeInstruction(EntryVal)) { 2608 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2609 State); 2610 Value *ScalarIV = CreateScalarIV(Step); 2611 // Create scalar steps that can be used by instructions we will later 2612 // scalarize. Note that the addition of the scalar steps will not increase 2613 // the number of instructions in the loop in the common case prior to 2614 // InstCombine. We will be trading one vector extract for each scalar step. 2615 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2616 return; 2617 } 2618 2619 // All IV users are scalar instructions, so only emit a scalar IV, not a 2620 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2621 // predicate used by the masked loads/stores. 2622 Value *ScalarIV = CreateScalarIV(Step); 2623 if (!Cost->isScalarEpilogueAllowed()) 2624 CreateSplatIV(ScalarIV, Step); 2625 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2626 } 2627 2628 Value *InnerLoopVectorizer::getStepVector(Value *Val, Value *StartIdx, 2629 Value *Step, 2630 Instruction::BinaryOps BinOp) { 2631 // Create and check the types. 2632 auto *ValVTy = cast<VectorType>(Val->getType()); 2633 ElementCount VLen = ValVTy->getElementCount(); 2634 2635 Type *STy = Val->getType()->getScalarType(); 2636 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2637 "Induction Step must be an integer or FP"); 2638 assert(Step->getType() == STy && "Step has wrong type"); 2639 2640 SmallVector<Constant *, 8> Indices; 2641 2642 // Create a vector of consecutive numbers from zero to VF. 2643 VectorType *InitVecValVTy = ValVTy; 2644 Type *InitVecValSTy = STy; 2645 if (STy->isFloatingPointTy()) { 2646 InitVecValSTy = 2647 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2648 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2649 } 2650 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2651 2652 // Splat the StartIdx 2653 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2654 2655 if (STy->isIntegerTy()) { 2656 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2657 Step = Builder.CreateVectorSplat(VLen, Step); 2658 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2659 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2660 // which can be found from the original scalar operations. 2661 Step = Builder.CreateMul(InitVec, Step); 2662 return Builder.CreateAdd(Val, Step, "induction"); 2663 } 2664 2665 // Floating point induction. 2666 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2667 "Binary Opcode should be specified for FP induction"); 2668 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2669 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2670 2671 Step = Builder.CreateVectorSplat(VLen, Step); 2672 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2673 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2674 } 2675 2676 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2677 Instruction *EntryVal, 2678 const InductionDescriptor &ID, 2679 VPValue *Def, VPValue *CastDef, 2680 VPTransformState &State) { 2681 // We shouldn't have to build scalar steps if we aren't vectorizing. 2682 assert(VF.isVector() && "VF should be greater than one"); 2683 // Get the value type and ensure it and the step have the same integer type. 2684 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2685 assert(ScalarIVTy == Step->getType() && 2686 "Val and Step should have the same type"); 2687 2688 // We build scalar steps for both integer and floating-point induction 2689 // variables. Here, we determine the kind of arithmetic we will perform. 2690 Instruction::BinaryOps AddOp; 2691 Instruction::BinaryOps MulOp; 2692 if (ScalarIVTy->isIntegerTy()) { 2693 AddOp = Instruction::Add; 2694 MulOp = Instruction::Mul; 2695 } else { 2696 AddOp = ID.getInductionOpcode(); 2697 MulOp = Instruction::FMul; 2698 } 2699 2700 // Determine the number of scalars we need to generate for each unroll 2701 // iteration. If EntryVal is uniform, we only need to generate the first 2702 // lane. Otherwise, we generate all VF values. 2703 bool IsUniform = 2704 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF); 2705 unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue(); 2706 // Compute the scalar steps and save the results in State. 2707 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2708 ScalarIVTy->getScalarSizeInBits()); 2709 Type *VecIVTy = nullptr; 2710 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2711 if (!IsUniform && VF.isScalable()) { 2712 VecIVTy = VectorType::get(ScalarIVTy, VF); 2713 UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF)); 2714 SplatStep = Builder.CreateVectorSplat(VF, Step); 2715 SplatIV = Builder.CreateVectorSplat(VF, ScalarIV); 2716 } 2717 2718 for (unsigned Part = 0; Part < UF; ++Part) { 2719 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, VF, Part); 2720 2721 if (!IsUniform && VF.isScalable()) { 2722 auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0); 2723 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2724 if (ScalarIVTy->isFloatingPointTy()) 2725 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2726 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2727 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2728 State.set(Def, Add, Part); 2729 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2730 Part); 2731 // It's useful to record the lane values too for the known minimum number 2732 // of elements so we do those below. This improves the code quality when 2733 // trying to extract the first element, for example. 2734 } 2735 2736 if (ScalarIVTy->isFloatingPointTy()) 2737 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2738 2739 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2740 Value *StartIdx = Builder.CreateBinOp( 2741 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2742 // The step returned by `createStepForVF` is a runtime-evaluated value 2743 // when VF is scalable. Otherwise, it should be folded into a Constant. 2744 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2745 "Expected StartIdx to be folded to a constant when VF is not " 2746 "scalable"); 2747 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2748 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2749 State.set(Def, Add, VPIteration(Part, Lane)); 2750 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2751 Part, Lane); 2752 } 2753 } 2754 } 2755 2756 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2757 const VPIteration &Instance, 2758 VPTransformState &State) { 2759 Value *ScalarInst = State.get(Def, Instance); 2760 Value *VectorValue = State.get(Def, Instance.Part); 2761 VectorValue = Builder.CreateInsertElement( 2762 VectorValue, ScalarInst, 2763 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2764 State.set(Def, VectorValue, Instance.Part); 2765 } 2766 2767 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2768 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2769 return Builder.CreateVectorReverse(Vec, "reverse"); 2770 } 2771 2772 // Return whether we allow using masked interleave-groups (for dealing with 2773 // strided loads/stores that reside in predicated blocks, or for dealing 2774 // with gaps). 2775 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2776 // If an override option has been passed in for interleaved accesses, use it. 2777 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2778 return EnableMaskedInterleavedMemAccesses; 2779 2780 return TTI.enableMaskedInterleavedAccessVectorization(); 2781 } 2782 2783 // Try to vectorize the interleave group that \p Instr belongs to. 2784 // 2785 // E.g. Translate following interleaved load group (factor = 3): 2786 // for (i = 0; i < N; i+=3) { 2787 // R = Pic[i]; // Member of index 0 2788 // G = Pic[i+1]; // Member of index 1 2789 // B = Pic[i+2]; // Member of index 2 2790 // ... // do something to R, G, B 2791 // } 2792 // To: 2793 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2794 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2795 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2796 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2797 // 2798 // Or translate following interleaved store group (factor = 3): 2799 // for (i = 0; i < N; i+=3) { 2800 // ... do something to R, G, B 2801 // Pic[i] = R; // Member of index 0 2802 // Pic[i+1] = G; // Member of index 1 2803 // Pic[i+2] = B; // Member of index 2 2804 // } 2805 // To: 2806 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2807 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2808 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2809 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2810 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2811 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2812 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2813 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2814 VPValue *BlockInMask) { 2815 Instruction *Instr = Group->getInsertPos(); 2816 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2817 2818 // Prepare for the vector type of the interleaved load/store. 2819 Type *ScalarTy = getLoadStoreType(Instr); 2820 unsigned InterleaveFactor = Group->getFactor(); 2821 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2822 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2823 2824 // Prepare for the new pointers. 2825 SmallVector<Value *, 2> AddrParts; 2826 unsigned Index = Group->getIndex(Instr); 2827 2828 // TODO: extend the masked interleaved-group support to reversed access. 2829 assert((!BlockInMask || !Group->isReverse()) && 2830 "Reversed masked interleave-group not supported."); 2831 2832 // If the group is reverse, adjust the index to refer to the last vector lane 2833 // instead of the first. We adjust the index from the first vector lane, 2834 // rather than directly getting the pointer for lane VF - 1, because the 2835 // pointer operand of the interleaved access is supposed to be uniform. For 2836 // uniform instructions, we're only required to generate a value for the 2837 // first vector lane in each unroll iteration. 2838 if (Group->isReverse()) 2839 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2840 2841 for (unsigned Part = 0; Part < UF; Part++) { 2842 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2843 setDebugLocFromInst(AddrPart); 2844 2845 // Notice current instruction could be any index. Need to adjust the address 2846 // to the member of index 0. 2847 // 2848 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2849 // b = A[i]; // Member of index 0 2850 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2851 // 2852 // E.g. A[i+1] = a; // Member of index 1 2853 // A[i] = b; // Member of index 0 2854 // A[i+2] = c; // Member of index 2 (Current instruction) 2855 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2856 2857 bool InBounds = false; 2858 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2859 InBounds = gep->isInBounds(); 2860 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2861 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2862 2863 // Cast to the vector pointer type. 2864 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2865 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2866 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2867 } 2868 2869 setDebugLocFromInst(Instr); 2870 Value *PoisonVec = PoisonValue::get(VecTy); 2871 2872 Value *MaskForGaps = nullptr; 2873 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2874 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2875 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2876 } 2877 2878 // Vectorize the interleaved load group. 2879 if (isa<LoadInst>(Instr)) { 2880 // For each unroll part, create a wide load for the group. 2881 SmallVector<Value *, 2> NewLoads; 2882 for (unsigned Part = 0; Part < UF; Part++) { 2883 Instruction *NewLoad; 2884 if (BlockInMask || MaskForGaps) { 2885 assert(useMaskedInterleavedAccesses(*TTI) && 2886 "masked interleaved groups are not allowed."); 2887 Value *GroupMask = MaskForGaps; 2888 if (BlockInMask) { 2889 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2890 Value *ShuffledMask = Builder.CreateShuffleVector( 2891 BlockInMaskPart, 2892 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2893 "interleaved.mask"); 2894 GroupMask = MaskForGaps 2895 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2896 MaskForGaps) 2897 : ShuffledMask; 2898 } 2899 NewLoad = 2900 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2901 GroupMask, PoisonVec, "wide.masked.vec"); 2902 } 2903 else 2904 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2905 Group->getAlign(), "wide.vec"); 2906 Group->addMetadata(NewLoad); 2907 NewLoads.push_back(NewLoad); 2908 } 2909 2910 // For each member in the group, shuffle out the appropriate data from the 2911 // wide loads. 2912 unsigned J = 0; 2913 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2914 Instruction *Member = Group->getMember(I); 2915 2916 // Skip the gaps in the group. 2917 if (!Member) 2918 continue; 2919 2920 auto StrideMask = 2921 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2922 for (unsigned Part = 0; Part < UF; Part++) { 2923 Value *StridedVec = Builder.CreateShuffleVector( 2924 NewLoads[Part], StrideMask, "strided.vec"); 2925 2926 // If this member has different type, cast the result type. 2927 if (Member->getType() != ScalarTy) { 2928 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2929 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2930 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2931 } 2932 2933 if (Group->isReverse()) 2934 StridedVec = reverseVector(StridedVec); 2935 2936 State.set(VPDefs[J], StridedVec, Part); 2937 } 2938 ++J; 2939 } 2940 return; 2941 } 2942 2943 // The sub vector type for current instruction. 2944 auto *SubVT = VectorType::get(ScalarTy, VF); 2945 2946 // Vectorize the interleaved store group. 2947 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2948 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2949 "masked interleaved groups are not allowed."); 2950 assert((!MaskForGaps || !VF.isScalable()) && 2951 "masking gaps for scalable vectors is not yet supported."); 2952 for (unsigned Part = 0; Part < UF; Part++) { 2953 // Collect the stored vector from each member. 2954 SmallVector<Value *, 4> StoredVecs; 2955 for (unsigned i = 0; i < InterleaveFactor; i++) { 2956 assert((Group->getMember(i) || MaskForGaps) && 2957 "Fail to get a member from an interleaved store group"); 2958 Instruction *Member = Group->getMember(i); 2959 2960 // Skip the gaps in the group. 2961 if (!Member) { 2962 Value *Undef = PoisonValue::get(SubVT); 2963 StoredVecs.push_back(Undef); 2964 continue; 2965 } 2966 2967 Value *StoredVec = State.get(StoredValues[i], Part); 2968 2969 if (Group->isReverse()) 2970 StoredVec = reverseVector(StoredVec); 2971 2972 // If this member has different type, cast it to a unified type. 2973 2974 if (StoredVec->getType() != SubVT) 2975 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2976 2977 StoredVecs.push_back(StoredVec); 2978 } 2979 2980 // Concatenate all vectors into a wide vector. 2981 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2982 2983 // Interleave the elements in the wide vector. 2984 Value *IVec = Builder.CreateShuffleVector( 2985 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2986 "interleaved.vec"); 2987 2988 Instruction *NewStoreInstr; 2989 if (BlockInMask || MaskForGaps) { 2990 Value *GroupMask = MaskForGaps; 2991 if (BlockInMask) { 2992 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2993 Value *ShuffledMask = Builder.CreateShuffleVector( 2994 BlockInMaskPart, 2995 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2996 "interleaved.mask"); 2997 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2998 ShuffledMask, MaskForGaps) 2999 : ShuffledMask; 3000 } 3001 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 3002 Group->getAlign(), GroupMask); 3003 } else 3004 NewStoreInstr = 3005 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 3006 3007 Group->addMetadata(NewStoreInstr); 3008 } 3009 } 3010 3011 void InnerLoopVectorizer::vectorizeMemoryInstruction( 3012 Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, 3013 VPValue *StoredValue, VPValue *BlockInMask, bool ConsecutiveStride, 3014 bool Reverse) { 3015 // Attempt to issue a wide load. 3016 LoadInst *LI = dyn_cast<LoadInst>(Instr); 3017 StoreInst *SI = dyn_cast<StoreInst>(Instr); 3018 3019 assert((LI || SI) && "Invalid Load/Store instruction"); 3020 assert((!SI || StoredValue) && "No stored value provided for widened store"); 3021 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 3022 3023 Type *ScalarDataTy = getLoadStoreType(Instr); 3024 3025 auto *DataTy = VectorType::get(ScalarDataTy, VF); 3026 const Align Alignment = getLoadStoreAlignment(Instr); 3027 bool CreateGatherScatter = !ConsecutiveStride; 3028 3029 VectorParts BlockInMaskParts(UF); 3030 bool isMaskRequired = BlockInMask; 3031 if (isMaskRequired) 3032 for (unsigned Part = 0; Part < UF; ++Part) 3033 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 3034 3035 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 3036 // Calculate the pointer for the specific unroll-part. 3037 GetElementPtrInst *PartPtr = nullptr; 3038 3039 bool InBounds = false; 3040 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 3041 InBounds = gep->isInBounds(); 3042 if (Reverse) { 3043 // If the address is consecutive but reversed, then the 3044 // wide store needs to start at the last vector element. 3045 // RunTimeVF = VScale * VF.getKnownMinValue() 3046 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 3047 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF); 3048 // NumElt = -Part * RunTimeVF 3049 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 3050 // LastLane = 1 - RunTimeVF 3051 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 3052 PartPtr = 3053 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 3054 PartPtr->setIsInBounds(InBounds); 3055 PartPtr = cast<GetElementPtrInst>( 3056 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 3057 PartPtr->setIsInBounds(InBounds); 3058 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 3059 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 3060 } else { 3061 Value *Increment = 3062 createStepForVF(Builder, Builder.getInt32Ty(), VF, Part); 3063 PartPtr = cast<GetElementPtrInst>( 3064 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 3065 PartPtr->setIsInBounds(InBounds); 3066 } 3067 3068 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 3069 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 3070 }; 3071 3072 // Handle Stores: 3073 if (SI) { 3074 setDebugLocFromInst(SI); 3075 3076 for (unsigned Part = 0; Part < UF; ++Part) { 3077 Instruction *NewSI = nullptr; 3078 Value *StoredVal = State.get(StoredValue, Part); 3079 if (CreateGatherScatter) { 3080 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 3081 Value *VectorGep = State.get(Addr, Part); 3082 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 3083 MaskPart); 3084 } else { 3085 if (Reverse) { 3086 // If we store to reverse consecutive memory locations, then we need 3087 // to reverse the order of elements in the stored value. 3088 StoredVal = reverseVector(StoredVal); 3089 // We don't want to update the value in the map as it might be used in 3090 // another expression. So don't call resetVectorValue(StoredVal). 3091 } 3092 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 3093 if (isMaskRequired) 3094 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 3095 BlockInMaskParts[Part]); 3096 else 3097 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 3098 } 3099 addMetadata(NewSI, SI); 3100 } 3101 return; 3102 } 3103 3104 // Handle loads. 3105 assert(LI && "Must have a load instruction"); 3106 setDebugLocFromInst(LI); 3107 for (unsigned Part = 0; Part < UF; ++Part) { 3108 Value *NewLI; 3109 if (CreateGatherScatter) { 3110 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 3111 Value *VectorGep = State.get(Addr, Part); 3112 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 3113 nullptr, "wide.masked.gather"); 3114 addMetadata(NewLI, LI); 3115 } else { 3116 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 3117 if (isMaskRequired) 3118 NewLI = Builder.CreateMaskedLoad( 3119 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 3120 PoisonValue::get(DataTy), "wide.masked.load"); 3121 else 3122 NewLI = 3123 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 3124 3125 // Add metadata to the load, but setVectorValue to the reverse shuffle. 3126 addMetadata(NewLI, LI); 3127 if (Reverse) 3128 NewLI = reverseVector(NewLI); 3129 } 3130 3131 State.set(Def, NewLI, Part); 3132 } 3133 } 3134 3135 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 3136 VPReplicateRecipe *RepRecipe, 3137 const VPIteration &Instance, 3138 bool IfPredicateInstr, 3139 VPTransformState &State) { 3140 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 3141 3142 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 3143 // the first lane and part. 3144 if (isa<NoAliasScopeDeclInst>(Instr)) 3145 if (!Instance.isFirstIteration()) 3146 return; 3147 3148 setDebugLocFromInst(Instr); 3149 3150 // Does this instruction return a value ? 3151 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3152 3153 Instruction *Cloned = Instr->clone(); 3154 if (!IsVoidRetTy) 3155 Cloned->setName(Instr->getName() + ".cloned"); 3156 3157 // If the scalarized instruction contributes to the address computation of a 3158 // widen masked load/store which was in a basic block that needed predication 3159 // and is not predicated after vectorization, we can't propagate 3160 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized 3161 // instruction could feed a poison value to the base address of the widen 3162 // load/store. 3163 if (State.MayGeneratePoisonRecipes.count(RepRecipe) > 0) 3164 Cloned->dropPoisonGeneratingFlags(); 3165 3166 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 3167 Builder.GetInsertPoint()); 3168 // Replace the operands of the cloned instructions with their scalar 3169 // equivalents in the new loop. 3170 for (unsigned op = 0, e = RepRecipe->getNumOperands(); op != e; ++op) { 3171 auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); 3172 auto InputInstance = Instance; 3173 if (!Operand || !OrigLoop->contains(Operand) || 3174 (Cost->isUniformAfterVectorization(Operand, State.VF))) 3175 InputInstance.Lane = VPLane::getFirstLane(); 3176 auto *NewOp = State.get(RepRecipe->getOperand(op), InputInstance); 3177 Cloned->setOperand(op, NewOp); 3178 } 3179 addNewMetadata(Cloned, Instr); 3180 3181 // Place the cloned scalar in the new loop. 3182 Builder.Insert(Cloned); 3183 3184 State.set(RepRecipe, Cloned, Instance); 3185 3186 // If we just cloned a new assumption, add it the assumption cache. 3187 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 3188 AC->registerAssumption(II); 3189 3190 // End if-block. 3191 if (IfPredicateInstr) 3192 PredicatedInstructions.push_back(Cloned); 3193 } 3194 3195 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3196 Value *End, Value *Step, 3197 Instruction *DL) { 3198 BasicBlock *Header = L->getHeader(); 3199 BasicBlock *Latch = L->getLoopLatch(); 3200 // As we're just creating this loop, it's possible no latch exists 3201 // yet. If so, use the header as this will be a single block loop. 3202 if (!Latch) 3203 Latch = Header; 3204 3205 IRBuilder<> B(&*Header->getFirstInsertionPt()); 3206 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3207 setDebugLocFromInst(OldInst, &B); 3208 auto *Induction = B.CreatePHI(Start->getType(), 2, "index"); 3209 3210 B.SetInsertPoint(Latch->getTerminator()); 3211 setDebugLocFromInst(OldInst, &B); 3212 3213 // Create i+1 and fill the PHINode. 3214 // 3215 // If the tail is not folded, we know that End - Start >= Step (either 3216 // statically or through the minimum iteration checks). We also know that both 3217 // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV + 3218 // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned 3219 // overflows and we can mark the induction increment as NUW. 3220 Value *Next = B.CreateAdd(Induction, Step, "index.next", 3221 /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false); 3222 Induction->addIncoming(Start, L->getLoopPreheader()); 3223 Induction->addIncoming(Next, Latch); 3224 // Create the compare. 3225 Value *ICmp = B.CreateICmpEQ(Next, End); 3226 B.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 3227 3228 // Now we have two terminators. Remove the old one from the block. 3229 Latch->getTerminator()->eraseFromParent(); 3230 3231 return Induction; 3232 } 3233 3234 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3235 if (TripCount) 3236 return TripCount; 3237 3238 assert(L && "Create Trip Count for null loop."); 3239 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3240 // Find the loop boundaries. 3241 ScalarEvolution *SE = PSE.getSE(); 3242 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3243 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 3244 "Invalid loop count"); 3245 3246 Type *IdxTy = Legal->getWidestInductionType(); 3247 assert(IdxTy && "No type for induction"); 3248 3249 // The exit count might have the type of i64 while the phi is i32. This can 3250 // happen if we have an induction variable that is sign extended before the 3251 // compare. The only way that we get a backedge taken count is that the 3252 // induction variable was signed and as such will not overflow. In such a case 3253 // truncation is legal. 3254 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3255 IdxTy->getPrimitiveSizeInBits()) 3256 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3257 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3258 3259 // Get the total trip count from the count by adding 1. 3260 const SCEV *ExitCount = SE->getAddExpr( 3261 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3262 3263 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3264 3265 // Expand the trip count and place the new instructions in the preheader. 3266 // Notice that the pre-header does not change, only the loop body. 3267 SCEVExpander Exp(*SE, DL, "induction"); 3268 3269 // Count holds the overall loop count (N). 3270 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3271 L->getLoopPreheader()->getTerminator()); 3272 3273 if (TripCount->getType()->isPointerTy()) 3274 TripCount = 3275 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3276 L->getLoopPreheader()->getTerminator()); 3277 3278 return TripCount; 3279 } 3280 3281 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3282 if (VectorTripCount) 3283 return VectorTripCount; 3284 3285 Value *TC = getOrCreateTripCount(L); 3286 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3287 3288 Type *Ty = TC->getType(); 3289 // This is where we can make the step a runtime constant. 3290 Value *Step = createStepForVF(Builder, Ty, VF, UF); 3291 3292 // If the tail is to be folded by masking, round the number of iterations N 3293 // up to a multiple of Step instead of rounding down. This is done by first 3294 // adding Step-1 and then rounding down. Note that it's ok if this addition 3295 // overflows: the vector induction variable will eventually wrap to zero given 3296 // that it starts at zero and its Step is a power of two; the loop will then 3297 // exit, with the last early-exit vector comparison also producing all-true. 3298 if (Cost->foldTailByMasking()) { 3299 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3300 "VF*UF must be a power of 2 when folding tail by masking"); 3301 assert(!VF.isScalable() && 3302 "Tail folding not yet supported for scalable vectors"); 3303 TC = Builder.CreateAdd( 3304 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 3305 } 3306 3307 // Now we need to generate the expression for the part of the loop that the 3308 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3309 // iterations are not required for correctness, or N - Step, otherwise. Step 3310 // is equal to the vectorization factor (number of SIMD elements) times the 3311 // unroll factor (number of SIMD instructions). 3312 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3313 3314 // There are cases where we *must* run at least one iteration in the remainder 3315 // loop. See the cost model for when this can happen. If the step evenly 3316 // divides the trip count, we set the remainder to be equal to the step. If 3317 // the step does not evenly divide the trip count, no adjustment is necessary 3318 // since there will already be scalar iterations. Note that the minimum 3319 // iterations check ensures that N >= Step. 3320 if (Cost->requiresScalarEpilogue(VF)) { 3321 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3322 R = Builder.CreateSelect(IsZero, Step, R); 3323 } 3324 3325 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3326 3327 return VectorTripCount; 3328 } 3329 3330 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3331 const DataLayout &DL) { 3332 // Verify that V is a vector type with same number of elements as DstVTy. 3333 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3334 unsigned VF = DstFVTy->getNumElements(); 3335 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3336 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3337 Type *SrcElemTy = SrcVecTy->getElementType(); 3338 Type *DstElemTy = DstFVTy->getElementType(); 3339 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3340 "Vector elements must have same size"); 3341 3342 // Do a direct cast if element types are castable. 3343 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3344 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3345 } 3346 // V cannot be directly casted to desired vector type. 3347 // May happen when V is a floating point vector but DstVTy is a vector of 3348 // pointers or vice-versa. Handle this using a two-step bitcast using an 3349 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3350 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3351 "Only one type should be a pointer type"); 3352 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3353 "Only one type should be a floating point type"); 3354 Type *IntTy = 3355 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3356 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3357 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3358 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3359 } 3360 3361 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3362 BasicBlock *Bypass) { 3363 Value *Count = getOrCreateTripCount(L); 3364 // Reuse existing vector loop preheader for TC checks. 3365 // Note that new preheader block is generated for vector loop. 3366 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3367 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3368 3369 // Generate code to check if the loop's trip count is less than VF * UF, or 3370 // equal to it in case a scalar epilogue is required; this implies that the 3371 // vector trip count is zero. This check also covers the case where adding one 3372 // to the backedge-taken count overflowed leading to an incorrect trip count 3373 // of zero. In this case we will also jump to the scalar loop. 3374 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 3375 : ICmpInst::ICMP_ULT; 3376 3377 // If tail is to be folded, vector loop takes care of all iterations. 3378 Value *CheckMinIters = Builder.getFalse(); 3379 if (!Cost->foldTailByMasking()) { 3380 Value *Step = createStepForVF(Builder, Count->getType(), VF, UF); 3381 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3382 } 3383 // Create new preheader for vector loop. 3384 LoopVectorPreHeader = 3385 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3386 "vector.ph"); 3387 3388 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3389 DT->getNode(Bypass)->getIDom()) && 3390 "TC check is expected to dominate Bypass"); 3391 3392 // Update dominator for Bypass & LoopExit (if needed). 3393 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3394 if (!Cost->requiresScalarEpilogue(VF)) 3395 // If there is an epilogue which must run, there's no edge from the 3396 // middle block to exit blocks and thus no need to update the immediate 3397 // dominator of the exit blocks. 3398 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3399 3400 ReplaceInstWithInst( 3401 TCCheckBlock->getTerminator(), 3402 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3403 LoopBypassBlocks.push_back(TCCheckBlock); 3404 } 3405 3406 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3407 3408 BasicBlock *const SCEVCheckBlock = 3409 RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); 3410 if (!SCEVCheckBlock) 3411 return nullptr; 3412 3413 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3414 (OptForSizeBasedOnProfile && 3415 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3416 "Cannot SCEV check stride or overflow when optimizing for size"); 3417 3418 3419 // Update dominator only if this is first RT check. 3420 if (LoopBypassBlocks.empty()) { 3421 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3422 if (!Cost->requiresScalarEpilogue(VF)) 3423 // If there is an epilogue which must run, there's no edge from the 3424 // middle block to exit blocks and thus no need to update the immediate 3425 // dominator of the exit blocks. 3426 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3427 } 3428 3429 LoopBypassBlocks.push_back(SCEVCheckBlock); 3430 AddedSafetyChecks = true; 3431 return SCEVCheckBlock; 3432 } 3433 3434 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 3435 BasicBlock *Bypass) { 3436 // VPlan-native path does not do any analysis for runtime checks currently. 3437 if (EnableVPlanNativePath) 3438 return nullptr; 3439 3440 BasicBlock *const MemCheckBlock = 3441 RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); 3442 3443 // Check if we generated code that checks in runtime if arrays overlap. We put 3444 // the checks into a separate block to make the more common case of few 3445 // elements faster. 3446 if (!MemCheckBlock) 3447 return nullptr; 3448 3449 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3450 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3451 "Cannot emit memory checks when optimizing for size, unless forced " 3452 "to vectorize."); 3453 ORE->emit([&]() { 3454 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3455 L->getStartLoc(), L->getHeader()) 3456 << "Code-size may be reduced by not forcing " 3457 "vectorization, or by source-code modifications " 3458 "eliminating the need for runtime checks " 3459 "(e.g., adding 'restrict')."; 3460 }); 3461 } 3462 3463 LoopBypassBlocks.push_back(MemCheckBlock); 3464 3465 AddedSafetyChecks = true; 3466 3467 // We currently don't use LoopVersioning for the actual loop cloning but we 3468 // still use it to add the noalias metadata. 3469 LVer = std::make_unique<LoopVersioning>( 3470 *Legal->getLAI(), 3471 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3472 DT, PSE.getSE()); 3473 LVer->prepareNoAliasMetadata(); 3474 return MemCheckBlock; 3475 } 3476 3477 Value *InnerLoopVectorizer::emitTransformedIndex( 3478 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3479 const InductionDescriptor &ID) const { 3480 3481 SCEVExpander Exp(*SE, DL, "induction"); 3482 auto Step = ID.getStep(); 3483 auto StartValue = ID.getStartValue(); 3484 assert(Index->getType()->getScalarType() == Step->getType() && 3485 "Index scalar type does not match StepValue type"); 3486 3487 // Note: the IR at this point is broken. We cannot use SE to create any new 3488 // SCEV and then expand it, hoping that SCEV's simplification will give us 3489 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3490 // lead to various SCEV crashes. So all we can do is to use builder and rely 3491 // on InstCombine for future simplifications. Here we handle some trivial 3492 // cases only. 3493 auto CreateAdd = [&B](Value *X, Value *Y) { 3494 assert(X->getType() == Y->getType() && "Types don't match!"); 3495 if (auto *CX = dyn_cast<ConstantInt>(X)) 3496 if (CX->isZero()) 3497 return Y; 3498 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3499 if (CY->isZero()) 3500 return X; 3501 return B.CreateAdd(X, Y); 3502 }; 3503 3504 // We allow X to be a vector type, in which case Y will potentially be 3505 // splatted into a vector with the same element count. 3506 auto CreateMul = [&B](Value *X, Value *Y) { 3507 assert(X->getType()->getScalarType() == Y->getType() && 3508 "Types don't match!"); 3509 if (auto *CX = dyn_cast<ConstantInt>(X)) 3510 if (CX->isOne()) 3511 return Y; 3512 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3513 if (CY->isOne()) 3514 return X; 3515 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 3516 if (XVTy && !isa<VectorType>(Y->getType())) 3517 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 3518 return B.CreateMul(X, Y); 3519 }; 3520 3521 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3522 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3523 // the DomTree is not kept up-to-date for additional blocks generated in the 3524 // vector loop. By using the header as insertion point, we guarantee that the 3525 // expanded instructions dominate all their uses. 3526 auto GetInsertPoint = [this, &B]() { 3527 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3528 if (InsertBB != LoopVectorBody && 3529 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3530 return LoopVectorBody->getTerminator(); 3531 return &*B.GetInsertPoint(); 3532 }; 3533 3534 switch (ID.getKind()) { 3535 case InductionDescriptor::IK_IntInduction: { 3536 assert(!isa<VectorType>(Index->getType()) && 3537 "Vector indices not supported for integer inductions yet"); 3538 assert(Index->getType() == StartValue->getType() && 3539 "Index type does not match StartValue type"); 3540 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3541 return B.CreateSub(StartValue, Index); 3542 auto *Offset = CreateMul( 3543 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3544 return CreateAdd(StartValue, Offset); 3545 } 3546 case InductionDescriptor::IK_PtrInduction: { 3547 assert(isa<SCEVConstant>(Step) && 3548 "Expected constant step for pointer induction"); 3549 return B.CreateGEP( 3550 ID.getElementType(), StartValue, 3551 CreateMul(Index, 3552 Exp.expandCodeFor(Step, Index->getType()->getScalarType(), 3553 GetInsertPoint()))); 3554 } 3555 case InductionDescriptor::IK_FpInduction: { 3556 assert(!isa<VectorType>(Index->getType()) && 3557 "Vector indices not supported for FP inductions yet"); 3558 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3559 auto InductionBinOp = ID.getInductionBinOp(); 3560 assert(InductionBinOp && 3561 (InductionBinOp->getOpcode() == Instruction::FAdd || 3562 InductionBinOp->getOpcode() == Instruction::FSub) && 3563 "Original bin op should be defined for FP induction"); 3564 3565 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3566 Value *MulExp = B.CreateFMul(StepValue, Index); 3567 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3568 "induction"); 3569 } 3570 case InductionDescriptor::IK_NoInduction: 3571 return nullptr; 3572 } 3573 llvm_unreachable("invalid enum"); 3574 } 3575 3576 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3577 LoopScalarBody = OrigLoop->getHeader(); 3578 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3579 assert(LoopVectorPreHeader && "Invalid loop structure"); 3580 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3581 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3582 "multiple exit loop without required epilogue?"); 3583 3584 LoopMiddleBlock = 3585 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3586 LI, nullptr, Twine(Prefix) + "middle.block"); 3587 LoopScalarPreHeader = 3588 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3589 nullptr, Twine(Prefix) + "scalar.ph"); 3590 3591 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3592 3593 // Set up the middle block terminator. Two cases: 3594 // 1) If we know that we must execute the scalar epilogue, emit an 3595 // unconditional branch. 3596 // 2) Otherwise, we must have a single unique exit block (due to how we 3597 // implement the multiple exit case). In this case, set up a conditonal 3598 // branch from the middle block to the loop scalar preheader, and the 3599 // exit block. completeLoopSkeleton will update the condition to use an 3600 // iteration check, if required to decide whether to execute the remainder. 3601 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3602 BranchInst::Create(LoopScalarPreHeader) : 3603 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3604 Builder.getTrue()); 3605 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3606 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3607 3608 // We intentionally don't let SplitBlock to update LoopInfo since 3609 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3610 // LoopVectorBody is explicitly added to the correct place few lines later. 3611 LoopVectorBody = 3612 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3613 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3614 3615 // Update dominator for loop exit. 3616 if (!Cost->requiresScalarEpilogue(VF)) 3617 // If there is an epilogue which must run, there's no edge from the 3618 // middle block to exit blocks and thus no need to update the immediate 3619 // dominator of the exit blocks. 3620 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3621 3622 // Create and register the new vector loop. 3623 Loop *Lp = LI->AllocateLoop(); 3624 Loop *ParentLoop = OrigLoop->getParentLoop(); 3625 3626 // Insert the new loop into the loop nest and register the new basic blocks 3627 // before calling any utilities such as SCEV that require valid LoopInfo. 3628 if (ParentLoop) { 3629 ParentLoop->addChildLoop(Lp); 3630 } else { 3631 LI->addTopLevelLoop(Lp); 3632 } 3633 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3634 return Lp; 3635 } 3636 3637 void InnerLoopVectorizer::createInductionResumeValues( 3638 Loop *L, Value *VectorTripCount, 3639 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3640 assert(VectorTripCount && L && "Expected valid arguments"); 3641 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3642 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3643 "Inconsistent information about additional bypass."); 3644 // We are going to resume the execution of the scalar loop. 3645 // Go over all of the induction variables that we found and fix the 3646 // PHIs that are left in the scalar version of the loop. 3647 // The starting values of PHI nodes depend on the counter of the last 3648 // iteration in the vectorized loop. 3649 // If we come from a bypass edge then we need to start from the original 3650 // start value. 3651 for (auto &InductionEntry : Legal->getInductionVars()) { 3652 PHINode *OrigPhi = InductionEntry.first; 3653 InductionDescriptor II = InductionEntry.second; 3654 3655 // Create phi nodes to merge from the backedge-taken check block. 3656 PHINode *BCResumeVal = 3657 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3658 LoopScalarPreHeader->getTerminator()); 3659 // Copy original phi DL over to the new one. 3660 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3661 Value *&EndValue = IVEndValues[OrigPhi]; 3662 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3663 if (OrigPhi == OldInduction) { 3664 // We know what the end value is. 3665 EndValue = VectorTripCount; 3666 } else { 3667 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3668 3669 // Fast-math-flags propagate from the original induction instruction. 3670 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3671 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3672 3673 Type *StepType = II.getStep()->getType(); 3674 Instruction::CastOps CastOp = 3675 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3676 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3677 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3678 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3679 EndValue->setName("ind.end"); 3680 3681 // Compute the end value for the additional bypass (if applicable). 3682 if (AdditionalBypass.first) { 3683 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3684 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3685 StepType, true); 3686 CRD = 3687 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3688 EndValueFromAdditionalBypass = 3689 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3690 EndValueFromAdditionalBypass->setName("ind.end"); 3691 } 3692 } 3693 // The new PHI merges the original incoming value, in case of a bypass, 3694 // or the value at the end of the vectorized loop. 3695 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3696 3697 // Fix the scalar body counter (PHI node). 3698 // The old induction's phi node in the scalar body needs the truncated 3699 // value. 3700 for (BasicBlock *BB : LoopBypassBlocks) 3701 BCResumeVal->addIncoming(II.getStartValue(), BB); 3702 3703 if (AdditionalBypass.first) 3704 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3705 EndValueFromAdditionalBypass); 3706 3707 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3708 } 3709 } 3710 3711 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3712 MDNode *OrigLoopID) { 3713 assert(L && "Expected valid loop."); 3714 3715 // The trip counts should be cached by now. 3716 Value *Count = getOrCreateTripCount(L); 3717 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3718 3719 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3720 3721 // Add a check in the middle block to see if we have completed 3722 // all of the iterations in the first vector loop. Three cases: 3723 // 1) If we require a scalar epilogue, there is no conditional branch as 3724 // we unconditionally branch to the scalar preheader. Do nothing. 3725 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3726 // Thus if tail is to be folded, we know we don't need to run the 3727 // remainder and we can use the previous value for the condition (true). 3728 // 3) Otherwise, construct a runtime check. 3729 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3730 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3731 Count, VectorTripCount, "cmp.n", 3732 LoopMiddleBlock->getTerminator()); 3733 3734 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3735 // of the corresponding compare because they may have ended up with 3736 // different line numbers and we want to avoid awkward line stepping while 3737 // debugging. Eg. if the compare has got a line number inside the loop. 3738 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3739 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3740 } 3741 3742 // Get ready to start creating new instructions into the vectorized body. 3743 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3744 "Inconsistent vector loop preheader"); 3745 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3746 3747 Optional<MDNode *> VectorizedLoopID = 3748 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3749 LLVMLoopVectorizeFollowupVectorized}); 3750 if (VectorizedLoopID.hasValue()) { 3751 L->setLoopID(VectorizedLoopID.getValue()); 3752 3753 // Do not setAlreadyVectorized if loop attributes have been defined 3754 // explicitly. 3755 return LoopVectorPreHeader; 3756 } 3757 3758 // Keep all loop hints from the original loop on the vector loop (we'll 3759 // replace the vectorizer-specific hints below). 3760 if (MDNode *LID = OrigLoop->getLoopID()) 3761 L->setLoopID(LID); 3762 3763 LoopVectorizeHints Hints(L, true, *ORE); 3764 Hints.setAlreadyVectorized(); 3765 3766 #ifdef EXPENSIVE_CHECKS 3767 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3768 LI->verify(*DT); 3769 #endif 3770 3771 return LoopVectorPreHeader; 3772 } 3773 3774 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3775 /* 3776 In this function we generate a new loop. The new loop will contain 3777 the vectorized instructions while the old loop will continue to run the 3778 scalar remainder. 3779 3780 [ ] <-- loop iteration number check. 3781 / | 3782 / v 3783 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3784 | / | 3785 | / v 3786 || [ ] <-- vector pre header. 3787 |/ | 3788 | v 3789 | [ ] \ 3790 | [ ]_| <-- vector loop. 3791 | | 3792 | v 3793 \ -[ ] <--- middle-block. 3794 \/ | 3795 /\ v 3796 | ->[ ] <--- new preheader. 3797 | | 3798 (opt) v <-- edge from middle to exit iff epilogue is not required. 3799 | [ ] \ 3800 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3801 \ | 3802 \ v 3803 >[ ] <-- exit block(s). 3804 ... 3805 */ 3806 3807 // Get the metadata of the original loop before it gets modified. 3808 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3809 3810 // Workaround! Compute the trip count of the original loop and cache it 3811 // before we start modifying the CFG. This code has a systemic problem 3812 // wherein it tries to run analysis over partially constructed IR; this is 3813 // wrong, and not simply for SCEV. The trip count of the original loop 3814 // simply happens to be prone to hitting this in practice. In theory, we 3815 // can hit the same issue for any SCEV, or ValueTracking query done during 3816 // mutation. See PR49900. 3817 getOrCreateTripCount(OrigLoop); 3818 3819 // Create an empty vector loop, and prepare basic blocks for the runtime 3820 // checks. 3821 Loop *Lp = createVectorLoopSkeleton(""); 3822 3823 // Now, compare the new count to zero. If it is zero skip the vector loop and 3824 // jump to the scalar loop. This check also covers the case where the 3825 // backedge-taken count is uint##_max: adding one to it will overflow leading 3826 // to an incorrect trip count of zero. In this (rare) case we will also jump 3827 // to the scalar loop. 3828 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3829 3830 // Generate the code to check any assumptions that we've made for SCEV 3831 // expressions. 3832 emitSCEVChecks(Lp, LoopScalarPreHeader); 3833 3834 // Generate the code that checks in runtime if arrays overlap. We put the 3835 // checks into a separate block to make the more common case of few elements 3836 // faster. 3837 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3838 3839 // Some loops have a single integer induction variable, while other loops 3840 // don't. One example is c++ iterators that often have multiple pointer 3841 // induction variables. In the code below we also support a case where we 3842 // don't have a single induction variable. 3843 // 3844 // We try to obtain an induction variable from the original loop as hard 3845 // as possible. However if we don't find one that: 3846 // - is an integer 3847 // - counts from zero, stepping by one 3848 // - is the size of the widest induction variable type 3849 // then we create a new one. 3850 OldInduction = Legal->getPrimaryInduction(); 3851 Type *IdxTy = Legal->getWidestInductionType(); 3852 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3853 // The loop step is equal to the vectorization factor (num of SIMD elements) 3854 // times the unroll factor (num of SIMD instructions). 3855 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3856 Value *Step = createStepForVF(Builder, IdxTy, VF, UF); 3857 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3858 Induction = 3859 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3860 getDebugLocFromInstOrOperands(OldInduction)); 3861 3862 // Emit phis for the new starting index of the scalar loop. 3863 createInductionResumeValues(Lp, CountRoundDown); 3864 3865 return completeLoopSkeleton(Lp, OrigLoopID); 3866 } 3867 3868 // Fix up external users of the induction variable. At this point, we are 3869 // in LCSSA form, with all external PHIs that use the IV having one input value, 3870 // coming from the remainder loop. We need those PHIs to also have a correct 3871 // value for the IV when arriving directly from the middle block. 3872 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3873 const InductionDescriptor &II, 3874 Value *CountRoundDown, Value *EndValue, 3875 BasicBlock *MiddleBlock) { 3876 // There are two kinds of external IV usages - those that use the value 3877 // computed in the last iteration (the PHI) and those that use the penultimate 3878 // value (the value that feeds into the phi from the loop latch). 3879 // We allow both, but they, obviously, have different values. 3880 3881 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3882 3883 DenseMap<Value *, Value *> MissingVals; 3884 3885 // An external user of the last iteration's value should see the value that 3886 // the remainder loop uses to initialize its own IV. 3887 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3888 for (User *U : PostInc->users()) { 3889 Instruction *UI = cast<Instruction>(U); 3890 if (!OrigLoop->contains(UI)) { 3891 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3892 MissingVals[UI] = EndValue; 3893 } 3894 } 3895 3896 // An external user of the penultimate value need to see EndValue - Step. 3897 // The simplest way to get this is to recompute it from the constituent SCEVs, 3898 // that is Start + (Step * (CRD - 1)). 3899 for (User *U : OrigPhi->users()) { 3900 auto *UI = cast<Instruction>(U); 3901 if (!OrigLoop->contains(UI)) { 3902 const DataLayout &DL = 3903 OrigLoop->getHeader()->getModule()->getDataLayout(); 3904 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3905 3906 IRBuilder<> B(MiddleBlock->getTerminator()); 3907 3908 // Fast-math-flags propagate from the original induction instruction. 3909 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3910 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3911 3912 Value *CountMinusOne = B.CreateSub( 3913 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3914 Value *CMO = 3915 !II.getStep()->getType()->isIntegerTy() 3916 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3917 II.getStep()->getType()) 3918 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3919 CMO->setName("cast.cmo"); 3920 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3921 Escape->setName("ind.escape"); 3922 MissingVals[UI] = Escape; 3923 } 3924 } 3925 3926 for (auto &I : MissingVals) { 3927 PHINode *PHI = cast<PHINode>(I.first); 3928 // One corner case we have to handle is two IVs "chasing" each-other, 3929 // that is %IV2 = phi [...], [ %IV1, %latch ] 3930 // In this case, if IV1 has an external use, we need to avoid adding both 3931 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3932 // don't already have an incoming value for the middle block. 3933 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3934 PHI->addIncoming(I.second, MiddleBlock); 3935 } 3936 } 3937 3938 namespace { 3939 3940 struct CSEDenseMapInfo { 3941 static bool canHandle(const Instruction *I) { 3942 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3943 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3944 } 3945 3946 static inline Instruction *getEmptyKey() { 3947 return DenseMapInfo<Instruction *>::getEmptyKey(); 3948 } 3949 3950 static inline Instruction *getTombstoneKey() { 3951 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3952 } 3953 3954 static unsigned getHashValue(const Instruction *I) { 3955 assert(canHandle(I) && "Unknown instruction!"); 3956 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3957 I->value_op_end())); 3958 } 3959 3960 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3961 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3962 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3963 return LHS == RHS; 3964 return LHS->isIdenticalTo(RHS); 3965 } 3966 }; 3967 3968 } // end anonymous namespace 3969 3970 ///Perform cse of induction variable instructions. 3971 static void cse(BasicBlock *BB) { 3972 // Perform simple cse. 3973 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3974 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3975 if (!CSEDenseMapInfo::canHandle(&In)) 3976 continue; 3977 3978 // Check if we can replace this instruction with any of the 3979 // visited instructions. 3980 if (Instruction *V = CSEMap.lookup(&In)) { 3981 In.replaceAllUsesWith(V); 3982 In.eraseFromParent(); 3983 continue; 3984 } 3985 3986 CSEMap[&In] = &In; 3987 } 3988 } 3989 3990 InstructionCost 3991 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3992 bool &NeedToScalarize) const { 3993 Function *F = CI->getCalledFunction(); 3994 Type *ScalarRetTy = CI->getType(); 3995 SmallVector<Type *, 4> Tys, ScalarTys; 3996 for (auto &ArgOp : CI->args()) 3997 ScalarTys.push_back(ArgOp->getType()); 3998 3999 // Estimate cost of scalarized vector call. The source operands are assumed 4000 // to be vectors, so we need to extract individual elements from there, 4001 // execute VF scalar calls, and then gather the result into the vector return 4002 // value. 4003 InstructionCost ScalarCallCost = 4004 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 4005 if (VF.isScalar()) 4006 return ScalarCallCost; 4007 4008 // Compute corresponding vector type for return value and arguments. 4009 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 4010 for (Type *ScalarTy : ScalarTys) 4011 Tys.push_back(ToVectorTy(ScalarTy, VF)); 4012 4013 // Compute costs of unpacking argument values for the scalar calls and 4014 // packing the return values to a vector. 4015 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 4016 4017 InstructionCost Cost = 4018 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 4019 4020 // If we can't emit a vector call for this function, then the currently found 4021 // cost is the cost we need to return. 4022 NeedToScalarize = true; 4023 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4024 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 4025 4026 if (!TLI || CI->isNoBuiltin() || !VecFunc) 4027 return Cost; 4028 4029 // If the corresponding vector cost is cheaper, return its cost. 4030 InstructionCost VectorCallCost = 4031 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 4032 if (VectorCallCost < Cost) { 4033 NeedToScalarize = false; 4034 Cost = VectorCallCost; 4035 } 4036 return Cost; 4037 } 4038 4039 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 4040 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 4041 return Elt; 4042 return VectorType::get(Elt, VF); 4043 } 4044 4045 InstructionCost 4046 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 4047 ElementCount VF) const { 4048 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4049 assert(ID && "Expected intrinsic call!"); 4050 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 4051 FastMathFlags FMF; 4052 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 4053 FMF = FPMO->getFastMathFlags(); 4054 4055 SmallVector<const Value *> Arguments(CI->args()); 4056 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 4057 SmallVector<Type *> ParamTys; 4058 std::transform(FTy->param_begin(), FTy->param_end(), 4059 std::back_inserter(ParamTys), 4060 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 4061 4062 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 4063 dyn_cast<IntrinsicInst>(CI)); 4064 return TTI.getIntrinsicInstrCost(CostAttrs, 4065 TargetTransformInfo::TCK_RecipThroughput); 4066 } 4067 4068 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 4069 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 4070 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 4071 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 4072 } 4073 4074 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 4075 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 4076 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 4077 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 4078 } 4079 4080 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 4081 // For every instruction `I` in MinBWs, truncate the operands, create a 4082 // truncated version of `I` and reextend its result. InstCombine runs 4083 // later and will remove any ext/trunc pairs. 4084 SmallPtrSet<Value *, 4> Erased; 4085 for (const auto &KV : Cost->getMinimalBitwidths()) { 4086 // If the value wasn't vectorized, we must maintain the original scalar 4087 // type. The absence of the value from State indicates that it 4088 // wasn't vectorized. 4089 // FIXME: Should not rely on getVPValue at this point. 4090 VPValue *Def = State.Plan->getVPValue(KV.first, true); 4091 if (!State.hasAnyVectorValue(Def)) 4092 continue; 4093 for (unsigned Part = 0; Part < UF; ++Part) { 4094 Value *I = State.get(Def, Part); 4095 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 4096 continue; 4097 Type *OriginalTy = I->getType(); 4098 Type *ScalarTruncatedTy = 4099 IntegerType::get(OriginalTy->getContext(), KV.second); 4100 auto *TruncatedTy = VectorType::get( 4101 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 4102 if (TruncatedTy == OriginalTy) 4103 continue; 4104 4105 IRBuilder<> B(cast<Instruction>(I)); 4106 auto ShrinkOperand = [&](Value *V) -> Value * { 4107 if (auto *ZI = dyn_cast<ZExtInst>(V)) 4108 if (ZI->getSrcTy() == TruncatedTy) 4109 return ZI->getOperand(0); 4110 return B.CreateZExtOrTrunc(V, TruncatedTy); 4111 }; 4112 4113 // The actual instruction modification depends on the instruction type, 4114 // unfortunately. 4115 Value *NewI = nullptr; 4116 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 4117 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 4118 ShrinkOperand(BO->getOperand(1))); 4119 4120 // Any wrapping introduced by shrinking this operation shouldn't be 4121 // considered undefined behavior. So, we can't unconditionally copy 4122 // arithmetic wrapping flags to NewI. 4123 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 4124 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 4125 NewI = 4126 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 4127 ShrinkOperand(CI->getOperand(1))); 4128 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 4129 NewI = B.CreateSelect(SI->getCondition(), 4130 ShrinkOperand(SI->getTrueValue()), 4131 ShrinkOperand(SI->getFalseValue())); 4132 } else if (auto *CI = dyn_cast<CastInst>(I)) { 4133 switch (CI->getOpcode()) { 4134 default: 4135 llvm_unreachable("Unhandled cast!"); 4136 case Instruction::Trunc: 4137 NewI = ShrinkOperand(CI->getOperand(0)); 4138 break; 4139 case Instruction::SExt: 4140 NewI = B.CreateSExtOrTrunc( 4141 CI->getOperand(0), 4142 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4143 break; 4144 case Instruction::ZExt: 4145 NewI = B.CreateZExtOrTrunc( 4146 CI->getOperand(0), 4147 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4148 break; 4149 } 4150 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 4151 auto Elements0 = 4152 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 4153 auto *O0 = B.CreateZExtOrTrunc( 4154 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 4155 auto Elements1 = 4156 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 4157 auto *O1 = B.CreateZExtOrTrunc( 4158 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 4159 4160 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 4161 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 4162 // Don't do anything with the operands, just extend the result. 4163 continue; 4164 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 4165 auto Elements = 4166 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 4167 auto *O0 = B.CreateZExtOrTrunc( 4168 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4169 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 4170 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 4171 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 4172 auto Elements = 4173 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 4174 auto *O0 = B.CreateZExtOrTrunc( 4175 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4176 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 4177 } else { 4178 // If we don't know what to do, be conservative and don't do anything. 4179 continue; 4180 } 4181 4182 // Lastly, extend the result. 4183 NewI->takeName(cast<Instruction>(I)); 4184 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 4185 I->replaceAllUsesWith(Res); 4186 cast<Instruction>(I)->eraseFromParent(); 4187 Erased.insert(I); 4188 State.reset(Def, Res, Part); 4189 } 4190 } 4191 4192 // We'll have created a bunch of ZExts that are now parentless. Clean up. 4193 for (const auto &KV : Cost->getMinimalBitwidths()) { 4194 // If the value wasn't vectorized, we must maintain the original scalar 4195 // type. The absence of the value from State indicates that it 4196 // wasn't vectorized. 4197 // FIXME: Should not rely on getVPValue at this point. 4198 VPValue *Def = State.Plan->getVPValue(KV.first, true); 4199 if (!State.hasAnyVectorValue(Def)) 4200 continue; 4201 for (unsigned Part = 0; Part < UF; ++Part) { 4202 Value *I = State.get(Def, Part); 4203 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 4204 if (Inst && Inst->use_empty()) { 4205 Value *NewI = Inst->getOperand(0); 4206 Inst->eraseFromParent(); 4207 State.reset(Def, NewI, Part); 4208 } 4209 } 4210 } 4211 } 4212 4213 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 4214 // Insert truncates and extends for any truncated instructions as hints to 4215 // InstCombine. 4216 if (VF.isVector()) 4217 truncateToMinimalBitwidths(State); 4218 4219 // Fix widened non-induction PHIs by setting up the PHI operands. 4220 if (OrigPHIsToFix.size()) { 4221 assert(EnableVPlanNativePath && 4222 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 4223 fixNonInductionPHIs(State); 4224 } 4225 4226 // At this point every instruction in the original loop is widened to a 4227 // vector form. Now we need to fix the recurrences in the loop. These PHI 4228 // nodes are currently empty because we did not want to introduce cycles. 4229 // This is the second stage of vectorizing recurrences. 4230 fixCrossIterationPHIs(State); 4231 4232 // Forget the original basic block. 4233 PSE.getSE()->forgetLoop(OrigLoop); 4234 4235 // If we inserted an edge from the middle block to the unique exit block, 4236 // update uses outside the loop (phis) to account for the newly inserted 4237 // edge. 4238 if (!Cost->requiresScalarEpilogue(VF)) { 4239 // Fix-up external users of the induction variables. 4240 for (auto &Entry : Legal->getInductionVars()) 4241 fixupIVUsers(Entry.first, Entry.second, 4242 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4243 IVEndValues[Entry.first], LoopMiddleBlock); 4244 4245 fixLCSSAPHIs(State); 4246 } 4247 4248 for (Instruction *PI : PredicatedInstructions) 4249 sinkScalarOperands(&*PI); 4250 4251 // Remove redundant induction instructions. 4252 cse(LoopVectorBody); 4253 4254 // Set/update profile weights for the vector and remainder loops as original 4255 // loop iterations are now distributed among them. Note that original loop 4256 // represented by LoopScalarBody becomes remainder loop after vectorization. 4257 // 4258 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 4259 // end up getting slightly roughened result but that should be OK since 4260 // profile is not inherently precise anyway. Note also possible bypass of 4261 // vector code caused by legality checks is ignored, assigning all the weight 4262 // to the vector loop, optimistically. 4263 // 4264 // For scalable vectorization we can't know at compile time how many iterations 4265 // of the loop are handled in one vector iteration, so instead assume a pessimistic 4266 // vscale of '1'. 4267 setProfileInfoAfterUnrolling( 4268 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 4269 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 4270 } 4271 4272 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 4273 // In order to support recurrences we need to be able to vectorize Phi nodes. 4274 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4275 // stage #2: We now need to fix the recurrences by adding incoming edges to 4276 // the currently empty PHI nodes. At this point every instruction in the 4277 // original loop is widened to a vector form so we can use them to construct 4278 // the incoming edges. 4279 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); 4280 for (VPRecipeBase &R : Header->phis()) { 4281 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 4282 fixReduction(ReductionPhi, State); 4283 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 4284 fixFirstOrderRecurrence(FOR, State); 4285 } 4286 } 4287 4288 void InnerLoopVectorizer::fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, 4289 VPTransformState &State) { 4290 // This is the second phase of vectorizing first-order recurrences. An 4291 // overview of the transformation is described below. Suppose we have the 4292 // following loop. 4293 // 4294 // for (int i = 0; i < n; ++i) 4295 // b[i] = a[i] - a[i - 1]; 4296 // 4297 // There is a first-order recurrence on "a". For this loop, the shorthand 4298 // scalar IR looks like: 4299 // 4300 // scalar.ph: 4301 // s_init = a[-1] 4302 // br scalar.body 4303 // 4304 // scalar.body: 4305 // i = phi [0, scalar.ph], [i+1, scalar.body] 4306 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4307 // s2 = a[i] 4308 // b[i] = s2 - s1 4309 // br cond, scalar.body, ... 4310 // 4311 // In this example, s1 is a recurrence because it's value depends on the 4312 // previous iteration. In the first phase of vectorization, we created a 4313 // vector phi v1 for s1. We now complete the vectorization and produce the 4314 // shorthand vector IR shown below (for VF = 4, UF = 1). 4315 // 4316 // vector.ph: 4317 // v_init = vector(..., ..., ..., a[-1]) 4318 // br vector.body 4319 // 4320 // vector.body 4321 // i = phi [0, vector.ph], [i+4, vector.body] 4322 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4323 // v2 = a[i, i+1, i+2, i+3]; 4324 // v3 = vector(v1(3), v2(0, 1, 2)) 4325 // b[i, i+1, i+2, i+3] = v2 - v3 4326 // br cond, vector.body, middle.block 4327 // 4328 // middle.block: 4329 // x = v2(3) 4330 // br scalar.ph 4331 // 4332 // scalar.ph: 4333 // s_init = phi [x, middle.block], [a[-1], otherwise] 4334 // br scalar.body 4335 // 4336 // After execution completes the vector loop, we extract the next value of 4337 // the recurrence (x) to use as the initial value in the scalar loop. 4338 4339 // Extract the last vector element in the middle block. This will be the 4340 // initial value for the recurrence when jumping to the scalar loop. 4341 VPValue *PreviousDef = PhiR->getBackedgeValue(); 4342 Value *Incoming = State.get(PreviousDef, UF - 1); 4343 auto *ExtractForScalar = Incoming; 4344 auto *IdxTy = Builder.getInt32Ty(); 4345 if (VF.isVector()) { 4346 auto *One = ConstantInt::get(IdxTy, 1); 4347 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4348 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4349 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 4350 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 4351 "vector.recur.extract"); 4352 } 4353 // Extract the second last element in the middle block if the 4354 // Phi is used outside the loop. We need to extract the phi itself 4355 // and not the last element (the phi update in the current iteration). This 4356 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4357 // when the scalar loop is not run at all. 4358 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4359 if (VF.isVector()) { 4360 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4361 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 4362 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4363 Incoming, Idx, "vector.recur.extract.for.phi"); 4364 } else if (UF > 1) 4365 // When loop is unrolled without vectorizing, initialize 4366 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 4367 // of `Incoming`. This is analogous to the vectorized case above: extracting 4368 // the second last element when VF > 1. 4369 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4370 4371 // Fix the initial value of the original recurrence in the scalar loop. 4372 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4373 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 4374 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4375 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 4376 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4377 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4378 Start->addIncoming(Incoming, BB); 4379 } 4380 4381 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4382 Phi->setName("scalar.recur"); 4383 4384 // Finally, fix users of the recurrence outside the loop. The users will need 4385 // either the last value of the scalar recurrence or the last value of the 4386 // vector recurrence we extracted in the middle block. Since the loop is in 4387 // LCSSA form, we just need to find all the phi nodes for the original scalar 4388 // recurrence in the exit block, and then add an edge for the middle block. 4389 // Note that LCSSA does not imply single entry when the original scalar loop 4390 // had multiple exiting edges (as we always run the last iteration in the 4391 // scalar epilogue); in that case, there is no edge from middle to exit and 4392 // and thus no phis which needed updated. 4393 if (!Cost->requiresScalarEpilogue(VF)) 4394 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4395 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) 4396 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4397 } 4398 4399 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 4400 VPTransformState &State) { 4401 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 4402 // Get it's reduction variable descriptor. 4403 assert(Legal->isReductionVariable(OrigPhi) && 4404 "Unable to find the reduction variable"); 4405 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 4406 4407 RecurKind RK = RdxDesc.getRecurrenceKind(); 4408 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4409 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4410 setDebugLocFromInst(ReductionStartValue); 4411 4412 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 4413 // This is the vector-clone of the value that leaves the loop. 4414 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4415 4416 // Wrap flags are in general invalid after vectorization, clear them. 4417 clearReductionWrapFlags(RdxDesc, State); 4418 4419 // Before each round, move the insertion point right between 4420 // the PHIs and the values we are going to write. 4421 // This allows us to write both PHINodes and the extractelement 4422 // instructions. 4423 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4424 4425 setDebugLocFromInst(LoopExitInst); 4426 4427 Type *PhiTy = OrigPhi->getType(); 4428 // If tail is folded by masking, the vector value to leave the loop should be 4429 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4430 // instead of the former. For an inloop reduction the reduction will already 4431 // be predicated, and does not need to be handled here. 4432 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 4433 for (unsigned Part = 0; Part < UF; ++Part) { 4434 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4435 Value *Sel = nullptr; 4436 for (User *U : VecLoopExitInst->users()) { 4437 if (isa<SelectInst>(U)) { 4438 assert(!Sel && "Reduction exit feeding two selects"); 4439 Sel = U; 4440 } else 4441 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4442 } 4443 assert(Sel && "Reduction exit feeds no select"); 4444 State.reset(LoopExitInstDef, Sel, Part); 4445 4446 // If the target can create a predicated operator for the reduction at no 4447 // extra cost in the loop (for example a predicated vadd), it can be 4448 // cheaper for the select to remain in the loop than be sunk out of it, 4449 // and so use the select value for the phi instead of the old 4450 // LoopExitValue. 4451 if (PreferPredicatedReductionSelect || 4452 TTI->preferPredicatedReductionSelect( 4453 RdxDesc.getOpcode(), PhiTy, 4454 TargetTransformInfo::ReductionFlags())) { 4455 auto *VecRdxPhi = 4456 cast<PHINode>(State.get(PhiR, Part)); 4457 VecRdxPhi->setIncomingValueForBlock( 4458 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4459 } 4460 } 4461 } 4462 4463 // If the vector reduction can be performed in a smaller type, we truncate 4464 // then extend the loop exit value to enable InstCombine to evaluate the 4465 // entire expression in the smaller type. 4466 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 4467 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 4468 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4469 Builder.SetInsertPoint( 4470 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4471 VectorParts RdxParts(UF); 4472 for (unsigned Part = 0; Part < UF; ++Part) { 4473 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4474 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4475 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4476 : Builder.CreateZExt(Trunc, VecTy); 4477 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 4478 if (U != Trunc) { 4479 U->replaceUsesOfWith(RdxParts[Part], Extnd); 4480 RdxParts[Part] = Extnd; 4481 } 4482 } 4483 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4484 for (unsigned Part = 0; Part < UF; ++Part) { 4485 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4486 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4487 } 4488 } 4489 4490 // Reduce all of the unrolled parts into a single vector. 4491 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4492 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4493 4494 // The middle block terminator has already been assigned a DebugLoc here (the 4495 // OrigLoop's single latch terminator). We want the whole middle block to 4496 // appear to execute on this line because: (a) it is all compiler generated, 4497 // (b) these instructions are always executed after evaluating the latch 4498 // conditional branch, and (c) other passes may add new predecessors which 4499 // terminate on this line. This is the easiest way to ensure we don't 4500 // accidentally cause an extra step back into the loop while debugging. 4501 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 4502 if (PhiR->isOrdered()) 4503 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 4504 else { 4505 // Floating-point operations should have some FMF to enable the reduction. 4506 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4507 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4508 for (unsigned Part = 1; Part < UF; ++Part) { 4509 Value *RdxPart = State.get(LoopExitInstDef, Part); 4510 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4511 ReducedPartRdx = Builder.CreateBinOp( 4512 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4513 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 4514 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 4515 ReducedPartRdx, RdxPart); 4516 else 4517 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4518 } 4519 } 4520 4521 // Create the reduction after the loop. Note that inloop reductions create the 4522 // target reduction in the loop using a Reduction recipe. 4523 if (VF.isVector() && !PhiR->isInLoop()) { 4524 ReducedPartRdx = 4525 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 4526 // If the reduction can be performed in a smaller type, we need to extend 4527 // the reduction to the wider type before we branch to the original loop. 4528 if (PhiTy != RdxDesc.getRecurrenceType()) 4529 ReducedPartRdx = RdxDesc.isSigned() 4530 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4531 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4532 } 4533 4534 // Create a phi node that merges control-flow from the backedge-taken check 4535 // block and the middle block. 4536 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4537 LoopScalarPreHeader->getTerminator()); 4538 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4539 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4540 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4541 4542 // Now, we need to fix the users of the reduction variable 4543 // inside and outside of the scalar remainder loop. 4544 4545 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4546 // in the exit blocks. See comment on analogous loop in 4547 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4548 if (!Cost->requiresScalarEpilogue(VF)) 4549 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4550 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) 4551 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4552 4553 // Fix the scalar loop reduction variable with the incoming reduction sum 4554 // from the vector body and from the backedge value. 4555 int IncomingEdgeBlockIdx = 4556 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4557 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4558 // Pick the other block. 4559 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4560 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4561 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4562 } 4563 4564 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4565 VPTransformState &State) { 4566 RecurKind RK = RdxDesc.getRecurrenceKind(); 4567 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4568 return; 4569 4570 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4571 assert(LoopExitInstr && "null loop exit instruction"); 4572 SmallVector<Instruction *, 8> Worklist; 4573 SmallPtrSet<Instruction *, 8> Visited; 4574 Worklist.push_back(LoopExitInstr); 4575 Visited.insert(LoopExitInstr); 4576 4577 while (!Worklist.empty()) { 4578 Instruction *Cur = Worklist.pop_back_val(); 4579 if (isa<OverflowingBinaryOperator>(Cur)) 4580 for (unsigned Part = 0; Part < UF; ++Part) { 4581 // FIXME: Should not rely on getVPValue at this point. 4582 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4583 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4584 } 4585 4586 for (User *U : Cur->users()) { 4587 Instruction *UI = cast<Instruction>(U); 4588 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4589 Visited.insert(UI).second) 4590 Worklist.push_back(UI); 4591 } 4592 } 4593 } 4594 4595 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4596 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4597 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4598 // Some phis were already hand updated by the reduction and recurrence 4599 // code above, leave them alone. 4600 continue; 4601 4602 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4603 // Non-instruction incoming values will have only one value. 4604 4605 VPLane Lane = VPLane::getFirstLane(); 4606 if (isa<Instruction>(IncomingValue) && 4607 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4608 VF)) 4609 Lane = VPLane::getLastLaneForVF(VF); 4610 4611 // Can be a loop invariant incoming value or the last scalar value to be 4612 // extracted from the vectorized loop. 4613 // FIXME: Should not rely on getVPValue at this point. 4614 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4615 Value *lastIncomingValue = 4616 OrigLoop->isLoopInvariant(IncomingValue) 4617 ? IncomingValue 4618 : State.get(State.Plan->getVPValue(IncomingValue, true), 4619 VPIteration(UF - 1, Lane)); 4620 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4621 } 4622 } 4623 4624 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4625 // The basic block and loop containing the predicated instruction. 4626 auto *PredBB = PredInst->getParent(); 4627 auto *VectorLoop = LI->getLoopFor(PredBB); 4628 4629 // Initialize a worklist with the operands of the predicated instruction. 4630 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4631 4632 // Holds instructions that we need to analyze again. An instruction may be 4633 // reanalyzed if we don't yet know if we can sink it or not. 4634 SmallVector<Instruction *, 8> InstsToReanalyze; 4635 4636 // Returns true if a given use occurs in the predicated block. Phi nodes use 4637 // their operands in their corresponding predecessor blocks. 4638 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4639 auto *I = cast<Instruction>(U.getUser()); 4640 BasicBlock *BB = I->getParent(); 4641 if (auto *Phi = dyn_cast<PHINode>(I)) 4642 BB = Phi->getIncomingBlock( 4643 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4644 return BB == PredBB; 4645 }; 4646 4647 // Iteratively sink the scalarized operands of the predicated instruction 4648 // into the block we created for it. When an instruction is sunk, it's 4649 // operands are then added to the worklist. The algorithm ends after one pass 4650 // through the worklist doesn't sink a single instruction. 4651 bool Changed; 4652 do { 4653 // Add the instructions that need to be reanalyzed to the worklist, and 4654 // reset the changed indicator. 4655 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4656 InstsToReanalyze.clear(); 4657 Changed = false; 4658 4659 while (!Worklist.empty()) { 4660 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4661 4662 // We can't sink an instruction if it is a phi node, is not in the loop, 4663 // or may have side effects. 4664 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4665 I->mayHaveSideEffects()) 4666 continue; 4667 4668 // If the instruction is already in PredBB, check if we can sink its 4669 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4670 // sinking the scalar instruction I, hence it appears in PredBB; but it 4671 // may have failed to sink I's operands (recursively), which we try 4672 // (again) here. 4673 if (I->getParent() == PredBB) { 4674 Worklist.insert(I->op_begin(), I->op_end()); 4675 continue; 4676 } 4677 4678 // It's legal to sink the instruction if all its uses occur in the 4679 // predicated block. Otherwise, there's nothing to do yet, and we may 4680 // need to reanalyze the instruction. 4681 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4682 InstsToReanalyze.push_back(I); 4683 continue; 4684 } 4685 4686 // Move the instruction to the beginning of the predicated block, and add 4687 // it's operands to the worklist. 4688 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4689 Worklist.insert(I->op_begin(), I->op_end()); 4690 4691 // The sinking may have enabled other instructions to be sunk, so we will 4692 // need to iterate. 4693 Changed = true; 4694 } 4695 } while (Changed); 4696 } 4697 4698 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4699 for (PHINode *OrigPhi : OrigPHIsToFix) { 4700 VPWidenPHIRecipe *VPPhi = 4701 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4702 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4703 // Make sure the builder has a valid insert point. 4704 Builder.SetInsertPoint(NewPhi); 4705 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4706 VPValue *Inc = VPPhi->getIncomingValue(i); 4707 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4708 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4709 } 4710 } 4711 } 4712 4713 bool InnerLoopVectorizer::useOrderedReductions(RecurrenceDescriptor &RdxDesc) { 4714 return Cost->useOrderedReductions(RdxDesc); 4715 } 4716 4717 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, 4718 VPWidenGEPRecipe *WidenGEPRec, 4719 VPUser &Operands, unsigned UF, 4720 ElementCount VF, bool IsPtrLoopInvariant, 4721 SmallBitVector &IsIndexLoopInvariant, 4722 VPTransformState &State) { 4723 // Construct a vector GEP by widening the operands of the scalar GEP as 4724 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4725 // results in a vector of pointers when at least one operand of the GEP 4726 // is vector-typed. Thus, to keep the representation compact, we only use 4727 // vector-typed operands for loop-varying values. 4728 4729 if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4730 // If we are vectorizing, but the GEP has only loop-invariant operands, 4731 // the GEP we build (by only using vector-typed operands for 4732 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4733 // produce a vector of pointers, we need to either arbitrarily pick an 4734 // operand to broadcast, or broadcast a clone of the original GEP. 4735 // Here, we broadcast a clone of the original. 4736 // 4737 // TODO: If at some point we decide to scalarize instructions having 4738 // loop-invariant operands, this special case will no longer be 4739 // required. We would add the scalarization decision to 4740 // collectLoopScalars() and teach getVectorValue() to broadcast 4741 // the lane-zero scalar value. 4742 auto *Clone = Builder.Insert(GEP->clone()); 4743 for (unsigned Part = 0; Part < UF; ++Part) { 4744 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4745 State.set(WidenGEPRec, EntryPart, Part); 4746 addMetadata(EntryPart, GEP); 4747 } 4748 } else { 4749 // If the GEP has at least one loop-varying operand, we are sure to 4750 // produce a vector of pointers. But if we are only unrolling, we want 4751 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4752 // produce with the code below will be scalar (if VF == 1) or vector 4753 // (otherwise). Note that for the unroll-only case, we still maintain 4754 // values in the vector mapping with initVector, as we do for other 4755 // instructions. 4756 for (unsigned Part = 0; Part < UF; ++Part) { 4757 // The pointer operand of the new GEP. If it's loop-invariant, we 4758 // won't broadcast it. 4759 auto *Ptr = IsPtrLoopInvariant 4760 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 4761 : State.get(Operands.getOperand(0), Part); 4762 4763 // Collect all the indices for the new GEP. If any index is 4764 // loop-invariant, we won't broadcast it. 4765 SmallVector<Value *, 4> Indices; 4766 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { 4767 VPValue *Operand = Operands.getOperand(I); 4768 if (IsIndexLoopInvariant[I - 1]) 4769 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 4770 else 4771 Indices.push_back(State.get(Operand, Part)); 4772 } 4773 4774 // If the GEP instruction is vectorized and was in a basic block that 4775 // needed predication, we can't propagate the poison-generating 'inbounds' 4776 // flag. The control flow has been linearized and the GEP is no longer 4777 // guarded by the predicate, which could make the 'inbounds' properties to 4778 // no longer hold. 4779 bool IsInBounds = GEP->isInBounds() && 4780 State.MayGeneratePoisonRecipes.count(WidenGEPRec) == 0; 4781 4782 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4783 // but it should be a vector, otherwise. 4784 auto *NewGEP = 4785 IsInBounds 4786 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4787 Indices) 4788 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4789 assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) && 4790 "NewGEP is not a pointer vector"); 4791 State.set(WidenGEPRec, NewGEP, Part); 4792 addMetadata(NewGEP, GEP); 4793 } 4794 } 4795 } 4796 4797 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4798 VPWidenPHIRecipe *PhiR, 4799 VPTransformState &State) { 4800 PHINode *P = cast<PHINode>(PN); 4801 if (EnableVPlanNativePath) { 4802 // Currently we enter here in the VPlan-native path for non-induction 4803 // PHIs where all control flow is uniform. We simply widen these PHIs. 4804 // Create a vector phi with no operands - the vector phi operands will be 4805 // set at the end of vector code generation. 4806 Type *VecTy = (State.VF.isScalar()) 4807 ? PN->getType() 4808 : VectorType::get(PN->getType(), State.VF); 4809 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4810 State.set(PhiR, VecPhi, 0); 4811 OrigPHIsToFix.push_back(P); 4812 4813 return; 4814 } 4815 4816 assert(PN->getParent() == OrigLoop->getHeader() && 4817 "Non-header phis should have been handled elsewhere"); 4818 4819 // In order to support recurrences we need to be able to vectorize Phi nodes. 4820 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4821 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4822 // this value when we vectorize all of the instructions that use the PHI. 4823 4824 assert(!Legal->isReductionVariable(P) && 4825 "reductions should be handled elsewhere"); 4826 4827 setDebugLocFromInst(P); 4828 4829 // This PHINode must be an induction variable. 4830 // Make sure that we know about it. 4831 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4832 4833 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4834 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4835 4836 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4837 // which can be found from the original scalar operations. 4838 switch (II.getKind()) { 4839 case InductionDescriptor::IK_NoInduction: 4840 llvm_unreachable("Unknown induction"); 4841 case InductionDescriptor::IK_IntInduction: 4842 case InductionDescriptor::IK_FpInduction: 4843 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4844 case InductionDescriptor::IK_PtrInduction: { 4845 // Handle the pointer induction variable case. 4846 assert(P->getType()->isPointerTy() && "Unexpected type."); 4847 4848 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4849 // This is the normalized GEP that starts counting at zero. 4850 Value *PtrInd = 4851 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4852 // Determine the number of scalars we need to generate for each unroll 4853 // iteration. If the instruction is uniform, we only need to generate the 4854 // first lane. Otherwise, we generate all VF values. 4855 bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); 4856 unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue(); 4857 4858 bool NeedsVectorIndex = !IsUniform && VF.isScalable(); 4859 Value *UnitStepVec = nullptr, *PtrIndSplat = nullptr; 4860 if (NeedsVectorIndex) { 4861 Type *VecIVTy = VectorType::get(PtrInd->getType(), VF); 4862 UnitStepVec = Builder.CreateStepVector(VecIVTy); 4863 PtrIndSplat = Builder.CreateVectorSplat(VF, PtrInd); 4864 } 4865 4866 for (unsigned Part = 0; Part < UF; ++Part) { 4867 Value *PartStart = 4868 createStepForVF(Builder, PtrInd->getType(), VF, Part); 4869 4870 if (NeedsVectorIndex) { 4871 // Here we cache the whole vector, which means we can support the 4872 // extraction of any lane. However, in some cases the extractelement 4873 // instruction that is generated for scalar uses of this vector (e.g. 4874 // a load instruction) is not folded away. Therefore we still 4875 // calculate values for the first n lanes to avoid redundant moves 4876 // (when extracting the 0th element) and to produce scalar code (i.e. 4877 // additional add/gep instructions instead of expensive extractelement 4878 // instructions) when extracting higher-order elements. 4879 Value *PartStartSplat = Builder.CreateVectorSplat(VF, PartStart); 4880 Value *Indices = Builder.CreateAdd(PartStartSplat, UnitStepVec); 4881 Value *GlobalIndices = Builder.CreateAdd(PtrIndSplat, Indices); 4882 Value *SclrGep = 4883 emitTransformedIndex(Builder, GlobalIndices, PSE.getSE(), DL, II); 4884 SclrGep->setName("next.gep"); 4885 State.set(PhiR, SclrGep, Part); 4886 } 4887 4888 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4889 Value *Idx = Builder.CreateAdd( 4890 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 4891 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4892 Value *SclrGep = 4893 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4894 SclrGep->setName("next.gep"); 4895 State.set(PhiR, SclrGep, VPIteration(Part, Lane)); 4896 } 4897 } 4898 return; 4899 } 4900 assert(isa<SCEVConstant>(II.getStep()) && 4901 "Induction step not a SCEV constant!"); 4902 Type *PhiType = II.getStep()->getType(); 4903 4904 // Build a pointer phi 4905 Value *ScalarStartValue = II.getStartValue(); 4906 Type *ScStValueType = ScalarStartValue->getType(); 4907 PHINode *NewPointerPhi = 4908 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4909 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4910 4911 // A pointer induction, performed by using a gep 4912 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4913 Instruction *InductionLoc = LoopLatch->getTerminator(); 4914 const SCEV *ScalarStep = II.getStep(); 4915 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4916 Value *ScalarStepValue = 4917 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4918 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); 4919 Value *NumUnrolledElems = 4920 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 4921 Value *InductionGEP = GetElementPtrInst::Create( 4922 II.getElementType(), NewPointerPhi, 4923 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 4924 InductionLoc); 4925 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4926 4927 // Create UF many actual address geps that use the pointer 4928 // phi as base and a vectorized version of the step value 4929 // (<step*0, ..., step*N>) as offset. 4930 for (unsigned Part = 0; Part < State.UF; ++Part) { 4931 Type *VecPhiType = VectorType::get(PhiType, State.VF); 4932 Value *StartOffsetScalar = 4933 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 4934 Value *StartOffset = 4935 Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 4936 // Create a vector of consecutive numbers from zero to VF. 4937 StartOffset = 4938 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); 4939 4940 Value *GEP = Builder.CreateGEP( 4941 II.getElementType(), NewPointerPhi, 4942 Builder.CreateMul( 4943 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), 4944 "vector.gep")); 4945 State.set(PhiR, GEP, Part); 4946 } 4947 } 4948 } 4949 } 4950 4951 /// A helper function for checking whether an integer division-related 4952 /// instruction may divide by zero (in which case it must be predicated if 4953 /// executed conditionally in the scalar code). 4954 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4955 /// Non-zero divisors that are non compile-time constants will not be 4956 /// converted into multiplication, so we will still end up scalarizing 4957 /// the division, but can do so w/o predication. 4958 static bool mayDivideByZero(Instruction &I) { 4959 assert((I.getOpcode() == Instruction::UDiv || 4960 I.getOpcode() == Instruction::SDiv || 4961 I.getOpcode() == Instruction::URem || 4962 I.getOpcode() == Instruction::SRem) && 4963 "Unexpected instruction"); 4964 Value *Divisor = I.getOperand(1); 4965 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4966 return !CInt || CInt->isZero(); 4967 } 4968 4969 void InnerLoopVectorizer::widenInstruction(Instruction &I, 4970 VPWidenRecipe *WidenRec, 4971 VPTransformState &State) { 4972 switch (I.getOpcode()) { 4973 case Instruction::Call: 4974 case Instruction::Br: 4975 case Instruction::PHI: 4976 case Instruction::GetElementPtr: 4977 case Instruction::Select: 4978 llvm_unreachable("This instruction is handled by a different recipe."); 4979 case Instruction::UDiv: 4980 case Instruction::SDiv: 4981 case Instruction::SRem: 4982 case Instruction::URem: 4983 case Instruction::Add: 4984 case Instruction::FAdd: 4985 case Instruction::Sub: 4986 case Instruction::FSub: 4987 case Instruction::FNeg: 4988 case Instruction::Mul: 4989 case Instruction::FMul: 4990 case Instruction::FDiv: 4991 case Instruction::FRem: 4992 case Instruction::Shl: 4993 case Instruction::LShr: 4994 case Instruction::AShr: 4995 case Instruction::And: 4996 case Instruction::Or: 4997 case Instruction::Xor: { 4998 // Just widen unops and binops. 4999 setDebugLocFromInst(&I); 5000 5001 for (unsigned Part = 0; Part < UF; ++Part) { 5002 SmallVector<Value *, 2> Ops; 5003 for (VPValue *VPOp : WidenRec->operands()) 5004 Ops.push_back(State.get(VPOp, Part)); 5005 5006 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 5007 5008 if (auto *VecOp = dyn_cast<Instruction>(V)) { 5009 VecOp->copyIRFlags(&I); 5010 5011 // If the instruction is vectorized and was in a basic block that needed 5012 // predication, we can't propagate poison-generating flags (nuw/nsw, 5013 // exact, etc.). The control flow has been linearized and the 5014 // instruction is no longer guarded by the predicate, which could make 5015 // the flag properties to no longer hold. 5016 if (State.MayGeneratePoisonRecipes.count(WidenRec) > 0) 5017 VecOp->dropPoisonGeneratingFlags(); 5018 } 5019 5020 // Use this vector value for all users of the original instruction. 5021 State.set(WidenRec, V, Part); 5022 addMetadata(V, &I); 5023 } 5024 5025 break; 5026 } 5027 case Instruction::ICmp: 5028 case Instruction::FCmp: { 5029 // Widen compares. Generate vector compares. 5030 bool FCmp = (I.getOpcode() == Instruction::FCmp); 5031 auto *Cmp = cast<CmpInst>(&I); 5032 setDebugLocFromInst(Cmp); 5033 for (unsigned Part = 0; Part < UF; ++Part) { 5034 Value *A = State.get(WidenRec->getOperand(0), Part); 5035 Value *B = State.get(WidenRec->getOperand(1), Part); 5036 Value *C = nullptr; 5037 if (FCmp) { 5038 // Propagate fast math flags. 5039 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 5040 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 5041 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 5042 } else { 5043 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 5044 } 5045 State.set(WidenRec, C, Part); 5046 addMetadata(C, &I); 5047 } 5048 5049 break; 5050 } 5051 5052 case Instruction::ZExt: 5053 case Instruction::SExt: 5054 case Instruction::FPToUI: 5055 case Instruction::FPToSI: 5056 case Instruction::FPExt: 5057 case Instruction::PtrToInt: 5058 case Instruction::IntToPtr: 5059 case Instruction::SIToFP: 5060 case Instruction::UIToFP: 5061 case Instruction::Trunc: 5062 case Instruction::FPTrunc: 5063 case Instruction::BitCast: { 5064 auto *CI = cast<CastInst>(&I); 5065 setDebugLocFromInst(CI); 5066 5067 /// Vectorize casts. 5068 Type *DestTy = 5069 (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); 5070 5071 for (unsigned Part = 0; Part < UF; ++Part) { 5072 Value *A = State.get(WidenRec->getOperand(0), Part); 5073 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 5074 State.set(WidenRec, Cast, Part); 5075 addMetadata(Cast, &I); 5076 } 5077 break; 5078 } 5079 default: 5080 // This instruction is not vectorized by simple widening. 5081 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 5082 llvm_unreachable("Unhandled instruction!"); 5083 } // end of switch. 5084 } 5085 5086 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 5087 VPUser &ArgOperands, 5088 VPTransformState &State) { 5089 assert(!isa<DbgInfoIntrinsic>(I) && 5090 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 5091 setDebugLocFromInst(&I); 5092 5093 Module *M = I.getParent()->getParent()->getParent(); 5094 auto *CI = cast<CallInst>(&I); 5095 5096 SmallVector<Type *, 4> Tys; 5097 for (Value *ArgOperand : CI->args()) 5098 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 5099 5100 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 5101 5102 // The flag shows whether we use Intrinsic or a usual Call for vectorized 5103 // version of the instruction. 5104 // Is it beneficial to perform intrinsic call compared to lib call? 5105 bool NeedToScalarize = false; 5106 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 5107 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 5108 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 5109 assert((UseVectorIntrinsic || !NeedToScalarize) && 5110 "Instruction should be scalarized elsewhere."); 5111 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 5112 "Either the intrinsic cost or vector call cost must be valid"); 5113 5114 for (unsigned Part = 0; Part < UF; ++Part) { 5115 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 5116 SmallVector<Value *, 4> Args; 5117 for (auto &I : enumerate(ArgOperands.operands())) { 5118 // Some intrinsics have a scalar argument - don't replace it with a 5119 // vector. 5120 Value *Arg; 5121 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 5122 Arg = State.get(I.value(), Part); 5123 else { 5124 Arg = State.get(I.value(), VPIteration(0, 0)); 5125 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 5126 TysForDecl.push_back(Arg->getType()); 5127 } 5128 Args.push_back(Arg); 5129 } 5130 5131 Function *VectorF; 5132 if (UseVectorIntrinsic) { 5133 // Use vector version of the intrinsic. 5134 if (VF.isVector()) 5135 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 5136 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 5137 assert(VectorF && "Can't retrieve vector intrinsic."); 5138 } else { 5139 // Use vector version of the function call. 5140 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 5141 #ifndef NDEBUG 5142 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 5143 "Can't create vector function."); 5144 #endif 5145 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 5146 } 5147 SmallVector<OperandBundleDef, 1> OpBundles; 5148 CI->getOperandBundlesAsDefs(OpBundles); 5149 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 5150 5151 if (isa<FPMathOperator>(V)) 5152 V->copyFastMathFlags(CI); 5153 5154 State.set(Def, V, Part); 5155 addMetadata(V, &I); 5156 } 5157 } 5158 5159 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, 5160 VPUser &Operands, 5161 bool InvariantCond, 5162 VPTransformState &State) { 5163 setDebugLocFromInst(&I); 5164 5165 // The condition can be loop invariant but still defined inside the 5166 // loop. This means that we can't just use the original 'cond' value. 5167 // We have to take the 'vectorized' value and pick the first lane. 5168 // Instcombine will make this a no-op. 5169 auto *InvarCond = InvariantCond 5170 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 5171 : nullptr; 5172 5173 for (unsigned Part = 0; Part < UF; ++Part) { 5174 Value *Cond = 5175 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); 5176 Value *Op0 = State.get(Operands.getOperand(1), Part); 5177 Value *Op1 = State.get(Operands.getOperand(2), Part); 5178 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); 5179 State.set(VPDef, Sel, Part); 5180 addMetadata(Sel, &I); 5181 } 5182 } 5183 5184 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 5185 // We should not collect Scalars more than once per VF. Right now, this 5186 // function is called from collectUniformsAndScalars(), which already does 5187 // this check. Collecting Scalars for VF=1 does not make any sense. 5188 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 5189 "This function should not be visited twice for the same VF"); 5190 5191 SmallSetVector<Instruction *, 8> Worklist; 5192 5193 // These sets are used to seed the analysis with pointers used by memory 5194 // accesses that will remain scalar. 5195 SmallSetVector<Instruction *, 8> ScalarPtrs; 5196 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 5197 auto *Latch = TheLoop->getLoopLatch(); 5198 5199 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 5200 // The pointer operands of loads and stores will be scalar as long as the 5201 // memory access is not a gather or scatter operation. The value operand of a 5202 // store will remain scalar if the store is scalarized. 5203 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 5204 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 5205 assert(WideningDecision != CM_Unknown && 5206 "Widening decision should be ready at this moment"); 5207 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 5208 if (Ptr == Store->getValueOperand()) 5209 return WideningDecision == CM_Scalarize; 5210 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 5211 "Ptr is neither a value or pointer operand"); 5212 return WideningDecision != CM_GatherScatter; 5213 }; 5214 5215 // A helper that returns true if the given value is a bitcast or 5216 // getelementptr instruction contained in the loop. 5217 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 5218 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 5219 isa<GetElementPtrInst>(V)) && 5220 !TheLoop->isLoopInvariant(V); 5221 }; 5222 5223 auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { 5224 if (!isa<PHINode>(Ptr) || 5225 !Legal->getInductionVars().count(cast<PHINode>(Ptr))) 5226 return false; 5227 auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; 5228 if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) 5229 return false; 5230 return isScalarUse(MemAccess, Ptr); 5231 }; 5232 5233 // A helper that evaluates a memory access's use of a pointer. If the 5234 // pointer is actually the pointer induction of a loop, it is being 5235 // inserted into Worklist. If the use will be a scalar use, and the 5236 // pointer is only used by memory accesses, we place the pointer in 5237 // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. 5238 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5239 if (isScalarPtrInduction(MemAccess, Ptr)) { 5240 Worklist.insert(cast<Instruction>(Ptr)); 5241 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr 5242 << "\n"); 5243 5244 Instruction *Update = cast<Instruction>( 5245 cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); 5246 5247 // If there is more than one user of Update (Ptr), we shouldn't assume it 5248 // will be scalar after vectorisation as other users of the instruction 5249 // may require widening. Otherwise, add it to ScalarPtrs. 5250 if (Update->hasOneUse() && cast<Value>(*Update->user_begin()) == Ptr) { 5251 ScalarPtrs.insert(Update); 5252 return; 5253 } 5254 } 5255 // We only care about bitcast and getelementptr instructions contained in 5256 // the loop. 5257 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5258 return; 5259 5260 // If the pointer has already been identified as scalar (e.g., if it was 5261 // also identified as uniform), there's nothing to do. 5262 auto *I = cast<Instruction>(Ptr); 5263 if (Worklist.count(I)) 5264 return; 5265 5266 // If the use of the pointer will be a scalar use, and all users of the 5267 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 5268 // place the pointer in PossibleNonScalarPtrs. 5269 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 5270 return isa<LoadInst>(U) || isa<StoreInst>(U); 5271 })) 5272 ScalarPtrs.insert(I); 5273 else 5274 PossibleNonScalarPtrs.insert(I); 5275 }; 5276 5277 // We seed the scalars analysis with three classes of instructions: (1) 5278 // instructions marked uniform-after-vectorization and (2) bitcast, 5279 // getelementptr and (pointer) phi instructions used by memory accesses 5280 // requiring a scalar use. 5281 // 5282 // (1) Add to the worklist all instructions that have been identified as 5283 // uniform-after-vectorization. 5284 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5285 5286 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5287 // memory accesses requiring a scalar use. The pointer operands of loads and 5288 // stores will be scalar as long as the memory accesses is not a gather or 5289 // scatter operation. The value operand of a store will remain scalar if the 5290 // store is scalarized. 5291 for (auto *BB : TheLoop->blocks()) 5292 for (auto &I : *BB) { 5293 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5294 evaluatePtrUse(Load, Load->getPointerOperand()); 5295 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5296 evaluatePtrUse(Store, Store->getPointerOperand()); 5297 evaluatePtrUse(Store, Store->getValueOperand()); 5298 } 5299 } 5300 for (auto *I : ScalarPtrs) 5301 if (!PossibleNonScalarPtrs.count(I)) { 5302 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5303 Worklist.insert(I); 5304 } 5305 5306 // Insert the forced scalars. 5307 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5308 // induction variable when the PHI user is scalarized. 5309 auto ForcedScalar = ForcedScalars.find(VF); 5310 if (ForcedScalar != ForcedScalars.end()) 5311 for (auto *I : ForcedScalar->second) 5312 Worklist.insert(I); 5313 5314 // Expand the worklist by looking through any bitcasts and getelementptr 5315 // instructions we've already identified as scalar. This is similar to the 5316 // expansion step in collectLoopUniforms(); however, here we're only 5317 // expanding to include additional bitcasts and getelementptr instructions. 5318 unsigned Idx = 0; 5319 while (Idx != Worklist.size()) { 5320 Instruction *Dst = Worklist[Idx++]; 5321 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5322 continue; 5323 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5324 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 5325 auto *J = cast<Instruction>(U); 5326 return !TheLoop->contains(J) || Worklist.count(J) || 5327 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5328 isScalarUse(J, Src)); 5329 })) { 5330 Worklist.insert(Src); 5331 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5332 } 5333 } 5334 5335 // An induction variable will remain scalar if all users of the induction 5336 // variable and induction variable update remain scalar. 5337 for (auto &Induction : Legal->getInductionVars()) { 5338 auto *Ind = Induction.first; 5339 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5340 5341 // If tail-folding is applied, the primary induction variable will be used 5342 // to feed a vector compare. 5343 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 5344 continue; 5345 5346 // Determine if all users of the induction variable are scalar after 5347 // vectorization. 5348 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5349 auto *I = cast<Instruction>(U); 5350 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5351 }); 5352 if (!ScalarInd) 5353 continue; 5354 5355 // Determine if all users of the induction variable update instruction are 5356 // scalar after vectorization. 5357 auto ScalarIndUpdate = 5358 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5359 auto *I = cast<Instruction>(U); 5360 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5361 }); 5362 if (!ScalarIndUpdate) 5363 continue; 5364 5365 // The induction variable and its update instruction will remain scalar. 5366 Worklist.insert(Ind); 5367 Worklist.insert(IndUpdate); 5368 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5369 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 5370 << "\n"); 5371 } 5372 5373 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5374 } 5375 5376 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const { 5377 if (!blockNeedsPredicationForAnyReason(I->getParent())) 5378 return false; 5379 switch(I->getOpcode()) { 5380 default: 5381 break; 5382 case Instruction::Load: 5383 case Instruction::Store: { 5384 if (!Legal->isMaskRequired(I)) 5385 return false; 5386 auto *Ptr = getLoadStorePointerOperand(I); 5387 auto *Ty = getLoadStoreType(I); 5388 const Align Alignment = getLoadStoreAlignment(I); 5389 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 5390 TTI.isLegalMaskedGather(Ty, Alignment)) 5391 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 5392 TTI.isLegalMaskedScatter(Ty, Alignment)); 5393 } 5394 case Instruction::UDiv: 5395 case Instruction::SDiv: 5396 case Instruction::SRem: 5397 case Instruction::URem: 5398 return mayDivideByZero(*I); 5399 } 5400 return false; 5401 } 5402 5403 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5404 Instruction *I, ElementCount VF) { 5405 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5406 assert(getWideningDecision(I, VF) == CM_Unknown && 5407 "Decision should not be set yet."); 5408 auto *Group = getInterleavedAccessGroup(I); 5409 assert(Group && "Must have a group."); 5410 5411 // If the instruction's allocated size doesn't equal it's type size, it 5412 // requires padding and will be scalarized. 5413 auto &DL = I->getModule()->getDataLayout(); 5414 auto *ScalarTy = getLoadStoreType(I); 5415 if (hasIrregularType(ScalarTy, DL)) 5416 return false; 5417 5418 // Check if masking is required. 5419 // A Group may need masking for one of two reasons: it resides in a block that 5420 // needs predication, or it was decided to use masking to deal with gaps 5421 // (either a gap at the end of a load-access that may result in a speculative 5422 // load, or any gaps in a store-access). 5423 bool PredicatedAccessRequiresMasking = 5424 blockNeedsPredicationForAnyReason(I->getParent()) && 5425 Legal->isMaskRequired(I); 5426 bool LoadAccessWithGapsRequiresEpilogMasking = 5427 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 5428 !isScalarEpilogueAllowed(); 5429 bool StoreAccessWithGapsRequiresMasking = 5430 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 5431 if (!PredicatedAccessRequiresMasking && 5432 !LoadAccessWithGapsRequiresEpilogMasking && 5433 !StoreAccessWithGapsRequiresMasking) 5434 return true; 5435 5436 // If masked interleaving is required, we expect that the user/target had 5437 // enabled it, because otherwise it either wouldn't have been created or 5438 // it should have been invalidated by the CostModel. 5439 assert(useMaskedInterleavedAccesses(TTI) && 5440 "Masked interleave-groups for predicated accesses are not enabled."); 5441 5442 if (Group->isReverse()) 5443 return false; 5444 5445 auto *Ty = getLoadStoreType(I); 5446 const Align Alignment = getLoadStoreAlignment(I); 5447 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5448 : TTI.isLegalMaskedStore(Ty, Alignment); 5449 } 5450 5451 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5452 Instruction *I, ElementCount VF) { 5453 // Get and ensure we have a valid memory instruction. 5454 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 5455 5456 auto *Ptr = getLoadStorePointerOperand(I); 5457 auto *ScalarTy = getLoadStoreType(I); 5458 5459 // In order to be widened, the pointer should be consecutive, first of all. 5460 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 5461 return false; 5462 5463 // If the instruction is a store located in a predicated block, it will be 5464 // scalarized. 5465 if (isScalarWithPredication(I)) 5466 return false; 5467 5468 // If the instruction's allocated size doesn't equal it's type size, it 5469 // requires padding and will be scalarized. 5470 auto &DL = I->getModule()->getDataLayout(); 5471 if (hasIrregularType(ScalarTy, DL)) 5472 return false; 5473 5474 return true; 5475 } 5476 5477 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5478 // We should not collect Uniforms more than once per VF. Right now, 5479 // this function is called from collectUniformsAndScalars(), which 5480 // already does this check. Collecting Uniforms for VF=1 does not make any 5481 // sense. 5482 5483 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5484 "This function should not be visited twice for the same VF"); 5485 5486 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5487 // not analyze again. Uniforms.count(VF) will return 1. 5488 Uniforms[VF].clear(); 5489 5490 // We now know that the loop is vectorizable! 5491 // Collect instructions inside the loop that will remain uniform after 5492 // vectorization. 5493 5494 // Global values, params and instructions outside of current loop are out of 5495 // scope. 5496 auto isOutOfScope = [&](Value *V) -> bool { 5497 Instruction *I = dyn_cast<Instruction>(V); 5498 return (!I || !TheLoop->contains(I)); 5499 }; 5500 5501 // Worklist containing uniform instructions demanding lane 0. 5502 SetVector<Instruction *> Worklist; 5503 BasicBlock *Latch = TheLoop->getLoopLatch(); 5504 5505 // Add uniform instructions demanding lane 0 to the worklist. Instructions 5506 // that are scalar with predication must not be considered uniform after 5507 // vectorization, because that would create an erroneous replicating region 5508 // where only a single instance out of VF should be formed. 5509 // TODO: optimize such seldom cases if found important, see PR40816. 5510 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5511 if (isOutOfScope(I)) { 5512 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5513 << *I << "\n"); 5514 return; 5515 } 5516 if (isScalarWithPredication(I)) { 5517 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5518 << *I << "\n"); 5519 return; 5520 } 5521 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5522 Worklist.insert(I); 5523 }; 5524 5525 // Start with the conditional branch. If the branch condition is an 5526 // instruction contained in the loop that is only used by the branch, it is 5527 // uniform. 5528 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5529 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5530 addToWorklistIfAllowed(Cmp); 5531 5532 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5533 InstWidening WideningDecision = getWideningDecision(I, VF); 5534 assert(WideningDecision != CM_Unknown && 5535 "Widening decision should be ready at this moment"); 5536 5537 // A uniform memory op is itself uniform. We exclude uniform stores 5538 // here as they demand the last lane, not the first one. 5539 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5540 assert(WideningDecision == CM_Scalarize); 5541 return true; 5542 } 5543 5544 return (WideningDecision == CM_Widen || 5545 WideningDecision == CM_Widen_Reverse || 5546 WideningDecision == CM_Interleave); 5547 }; 5548 5549 5550 // Returns true if Ptr is the pointer operand of a memory access instruction 5551 // I, and I is known to not require scalarization. 5552 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5553 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5554 }; 5555 5556 // Holds a list of values which are known to have at least one uniform use. 5557 // Note that there may be other uses which aren't uniform. A "uniform use" 5558 // here is something which only demands lane 0 of the unrolled iterations; 5559 // it does not imply that all lanes produce the same value (e.g. this is not 5560 // the usual meaning of uniform) 5561 SetVector<Value *> HasUniformUse; 5562 5563 // Scan the loop for instructions which are either a) known to have only 5564 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5565 for (auto *BB : TheLoop->blocks()) 5566 for (auto &I : *BB) { 5567 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 5568 switch (II->getIntrinsicID()) { 5569 case Intrinsic::sideeffect: 5570 case Intrinsic::experimental_noalias_scope_decl: 5571 case Intrinsic::assume: 5572 case Intrinsic::lifetime_start: 5573 case Intrinsic::lifetime_end: 5574 if (TheLoop->hasLoopInvariantOperands(&I)) 5575 addToWorklistIfAllowed(&I); 5576 break; 5577 default: 5578 break; 5579 } 5580 } 5581 5582 // ExtractValue instructions must be uniform, because the operands are 5583 // known to be loop-invariant. 5584 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 5585 assert(isOutOfScope(EVI->getAggregateOperand()) && 5586 "Expected aggregate value to be loop invariant"); 5587 addToWorklistIfAllowed(EVI); 5588 continue; 5589 } 5590 5591 // If there's no pointer operand, there's nothing to do. 5592 auto *Ptr = getLoadStorePointerOperand(&I); 5593 if (!Ptr) 5594 continue; 5595 5596 // A uniform memory op is itself uniform. We exclude uniform stores 5597 // here as they demand the last lane, not the first one. 5598 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5599 addToWorklistIfAllowed(&I); 5600 5601 if (isUniformDecision(&I, VF)) { 5602 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5603 HasUniformUse.insert(Ptr); 5604 } 5605 } 5606 5607 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5608 // demanding) users. Since loops are assumed to be in LCSSA form, this 5609 // disallows uses outside the loop as well. 5610 for (auto *V : HasUniformUse) { 5611 if (isOutOfScope(V)) 5612 continue; 5613 auto *I = cast<Instruction>(V); 5614 auto UsersAreMemAccesses = 5615 llvm::all_of(I->users(), [&](User *U) -> bool { 5616 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5617 }); 5618 if (UsersAreMemAccesses) 5619 addToWorklistIfAllowed(I); 5620 } 5621 5622 // Expand Worklist in topological order: whenever a new instruction 5623 // is added , its users should be already inside Worklist. It ensures 5624 // a uniform instruction will only be used by uniform instructions. 5625 unsigned idx = 0; 5626 while (idx != Worklist.size()) { 5627 Instruction *I = Worklist[idx++]; 5628 5629 for (auto OV : I->operand_values()) { 5630 // isOutOfScope operands cannot be uniform instructions. 5631 if (isOutOfScope(OV)) 5632 continue; 5633 // First order recurrence Phi's should typically be considered 5634 // non-uniform. 5635 auto *OP = dyn_cast<PHINode>(OV); 5636 if (OP && Legal->isFirstOrderRecurrence(OP)) 5637 continue; 5638 // If all the users of the operand are uniform, then add the 5639 // operand into the uniform worklist. 5640 auto *OI = cast<Instruction>(OV); 5641 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5642 auto *J = cast<Instruction>(U); 5643 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5644 })) 5645 addToWorklistIfAllowed(OI); 5646 } 5647 } 5648 5649 // For an instruction to be added into Worklist above, all its users inside 5650 // the loop should also be in Worklist. However, this condition cannot be 5651 // true for phi nodes that form a cyclic dependence. We must process phi 5652 // nodes separately. An induction variable will remain uniform if all users 5653 // of the induction variable and induction variable update remain uniform. 5654 // The code below handles both pointer and non-pointer induction variables. 5655 for (auto &Induction : Legal->getInductionVars()) { 5656 auto *Ind = Induction.first; 5657 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5658 5659 // Determine if all users of the induction variable are uniform after 5660 // vectorization. 5661 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5662 auto *I = cast<Instruction>(U); 5663 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5664 isVectorizedMemAccessUse(I, Ind); 5665 }); 5666 if (!UniformInd) 5667 continue; 5668 5669 // Determine if all users of the induction variable update instruction are 5670 // uniform after vectorization. 5671 auto UniformIndUpdate = 5672 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5673 auto *I = cast<Instruction>(U); 5674 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5675 isVectorizedMemAccessUse(I, IndUpdate); 5676 }); 5677 if (!UniformIndUpdate) 5678 continue; 5679 5680 // The induction variable and its update instruction will remain uniform. 5681 addToWorklistIfAllowed(Ind); 5682 addToWorklistIfAllowed(IndUpdate); 5683 } 5684 5685 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5686 } 5687 5688 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5689 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5690 5691 if (Legal->getRuntimePointerChecking()->Need) { 5692 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5693 "runtime pointer checks needed. Enable vectorization of this " 5694 "loop with '#pragma clang loop vectorize(enable)' when " 5695 "compiling with -Os/-Oz", 5696 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5697 return true; 5698 } 5699 5700 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5701 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5702 "runtime SCEV checks needed. Enable vectorization of this " 5703 "loop with '#pragma clang loop vectorize(enable)' when " 5704 "compiling with -Os/-Oz", 5705 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5706 return true; 5707 } 5708 5709 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5710 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5711 reportVectorizationFailure("Runtime stride check for small trip count", 5712 "runtime stride == 1 checks needed. Enable vectorization of " 5713 "this loop without such check by compiling with -Os/-Oz", 5714 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5715 return true; 5716 } 5717 5718 return false; 5719 } 5720 5721 ElementCount 5722 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 5723 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 5724 return ElementCount::getScalable(0); 5725 5726 if (Hints->isScalableVectorizationDisabled()) { 5727 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 5728 "ScalableVectorizationDisabled", ORE, TheLoop); 5729 return ElementCount::getScalable(0); 5730 } 5731 5732 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 5733 5734 auto MaxScalableVF = ElementCount::getScalable( 5735 std::numeric_limits<ElementCount::ScalarTy>::max()); 5736 5737 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 5738 // FIXME: While for scalable vectors this is currently sufficient, this should 5739 // be replaced by a more detailed mechanism that filters out specific VFs, 5740 // instead of invalidating vectorization for a whole set of VFs based on the 5741 // MaxVF. 5742 5743 // Disable scalable vectorization if the loop contains unsupported reductions. 5744 if (!canVectorizeReductions(MaxScalableVF)) { 5745 reportVectorizationInfo( 5746 "Scalable vectorization not supported for the reduction " 5747 "operations found in this loop.", 5748 "ScalableVFUnfeasible", ORE, TheLoop); 5749 return ElementCount::getScalable(0); 5750 } 5751 5752 // Disable scalable vectorization if the loop contains any instructions 5753 // with element types not supported for scalable vectors. 5754 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 5755 return !Ty->isVoidTy() && 5756 !this->TTI.isElementTypeLegalForScalableVector(Ty); 5757 })) { 5758 reportVectorizationInfo("Scalable vectorization is not supported " 5759 "for all element types found in this loop.", 5760 "ScalableVFUnfeasible", ORE, TheLoop); 5761 return ElementCount::getScalable(0); 5762 } 5763 5764 if (Legal->isSafeForAnyVectorWidth()) 5765 return MaxScalableVF; 5766 5767 // Limit MaxScalableVF by the maximum safe dependence distance. 5768 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5769 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) { 5770 unsigned VScaleMax = TheFunction->getFnAttribute(Attribute::VScaleRange) 5771 .getVScaleRangeArgs() 5772 .second; 5773 if (VScaleMax > 0) 5774 MaxVScale = VScaleMax; 5775 } 5776 MaxScalableVF = ElementCount::getScalable( 5777 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5778 if (!MaxScalableVF) 5779 reportVectorizationInfo( 5780 "Max legal vector width too small, scalable vectorization " 5781 "unfeasible.", 5782 "ScalableVFUnfeasible", ORE, TheLoop); 5783 5784 return MaxScalableVF; 5785 } 5786 5787 FixedScalableVFPair 5788 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5789 ElementCount UserVF) { 5790 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5791 unsigned SmallestType, WidestType; 5792 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5793 5794 // Get the maximum safe dependence distance in bits computed by LAA. 5795 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5796 // the memory accesses that is most restrictive (involved in the smallest 5797 // dependence distance). 5798 unsigned MaxSafeElements = 5799 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 5800 5801 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 5802 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 5803 5804 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 5805 << ".\n"); 5806 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 5807 << ".\n"); 5808 5809 // First analyze the UserVF, fall back if the UserVF should be ignored. 5810 if (UserVF) { 5811 auto MaxSafeUserVF = 5812 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 5813 5814 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 5815 // If `VF=vscale x N` is safe, then so is `VF=N` 5816 if (UserVF.isScalable()) 5817 return FixedScalableVFPair( 5818 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 5819 else 5820 return UserVF; 5821 } 5822 5823 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 5824 5825 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 5826 // is better to ignore the hint and let the compiler choose a suitable VF. 5827 if (!UserVF.isScalable()) { 5828 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5829 << " is unsafe, clamping to max safe VF=" 5830 << MaxSafeFixedVF << ".\n"); 5831 ORE->emit([&]() { 5832 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5833 TheLoop->getStartLoc(), 5834 TheLoop->getHeader()) 5835 << "User-specified vectorization factor " 5836 << ore::NV("UserVectorizationFactor", UserVF) 5837 << " is unsafe, clamping to maximum safe vectorization factor " 5838 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 5839 }); 5840 return MaxSafeFixedVF; 5841 } 5842 5843 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 5844 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5845 << " is ignored because scalable vectors are not " 5846 "available.\n"); 5847 ORE->emit([&]() { 5848 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5849 TheLoop->getStartLoc(), 5850 TheLoop->getHeader()) 5851 << "User-specified vectorization factor " 5852 << ore::NV("UserVectorizationFactor", UserVF) 5853 << " is ignored because the target does not support scalable " 5854 "vectors. The compiler will pick a more suitable value."; 5855 }); 5856 } else { 5857 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5858 << " is unsafe. Ignoring scalable UserVF.\n"); 5859 ORE->emit([&]() { 5860 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5861 TheLoop->getStartLoc(), 5862 TheLoop->getHeader()) 5863 << "User-specified vectorization factor " 5864 << ore::NV("UserVectorizationFactor", UserVF) 5865 << " is unsafe. Ignoring the hint to let the compiler pick a " 5866 "more suitable value."; 5867 }); 5868 } 5869 } 5870 5871 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5872 << " / " << WidestType << " bits.\n"); 5873 5874 FixedScalableVFPair Result(ElementCount::getFixed(1), 5875 ElementCount::getScalable(0)); 5876 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5877 WidestType, MaxSafeFixedVF)) 5878 Result.FixedVF = MaxVF; 5879 5880 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5881 WidestType, MaxSafeScalableVF)) 5882 if (MaxVF.isScalable()) { 5883 Result.ScalableVF = MaxVF; 5884 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5885 << "\n"); 5886 } 5887 5888 return Result; 5889 } 5890 5891 FixedScalableVFPair 5892 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5893 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5894 // TODO: It may by useful to do since it's still likely to be dynamically 5895 // uniform if the target can skip. 5896 reportVectorizationFailure( 5897 "Not inserting runtime ptr check for divergent target", 5898 "runtime pointer checks needed. Not enabled for divergent target", 5899 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5900 return FixedScalableVFPair::getNone(); 5901 } 5902 5903 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5904 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5905 if (TC == 1) { 5906 reportVectorizationFailure("Single iteration (non) loop", 5907 "loop trip count is one, irrelevant for vectorization", 5908 "SingleIterationLoop", ORE, TheLoop); 5909 return FixedScalableVFPair::getNone(); 5910 } 5911 5912 switch (ScalarEpilogueStatus) { 5913 case CM_ScalarEpilogueAllowed: 5914 return computeFeasibleMaxVF(TC, UserVF); 5915 case CM_ScalarEpilogueNotAllowedUsePredicate: 5916 LLVM_FALLTHROUGH; 5917 case CM_ScalarEpilogueNotNeededUsePredicate: 5918 LLVM_DEBUG( 5919 dbgs() << "LV: vector predicate hint/switch found.\n" 5920 << "LV: Not allowing scalar epilogue, creating predicated " 5921 << "vector loop.\n"); 5922 break; 5923 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5924 // fallthrough as a special case of OptForSize 5925 case CM_ScalarEpilogueNotAllowedOptSize: 5926 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5927 LLVM_DEBUG( 5928 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5929 else 5930 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5931 << "count.\n"); 5932 5933 // Bail if runtime checks are required, which are not good when optimising 5934 // for size. 5935 if (runtimeChecksRequired()) 5936 return FixedScalableVFPair::getNone(); 5937 5938 break; 5939 } 5940 5941 // The only loops we can vectorize without a scalar epilogue, are loops with 5942 // a bottom-test and a single exiting block. We'd have to handle the fact 5943 // that not every instruction executes on the last iteration. This will 5944 // require a lane mask which varies through the vector loop body. (TODO) 5945 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5946 // If there was a tail-folding hint/switch, but we can't fold the tail by 5947 // masking, fallback to a vectorization with a scalar epilogue. 5948 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5949 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5950 "scalar epilogue instead.\n"); 5951 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5952 return computeFeasibleMaxVF(TC, UserVF); 5953 } 5954 return FixedScalableVFPair::getNone(); 5955 } 5956 5957 // Now try the tail folding 5958 5959 // Invalidate interleave groups that require an epilogue if we can't mask 5960 // the interleave-group. 5961 if (!useMaskedInterleavedAccesses(TTI)) { 5962 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5963 "No decisions should have been taken at this point"); 5964 // Note: There is no need to invalidate any cost modeling decisions here, as 5965 // non where taken so far. 5966 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5967 } 5968 5969 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF); 5970 // Avoid tail folding if the trip count is known to be a multiple of any VF 5971 // we chose. 5972 // FIXME: The condition below pessimises the case for fixed-width vectors, 5973 // when scalable VFs are also candidates for vectorization. 5974 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5975 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5976 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5977 "MaxFixedVF must be a power of 2"); 5978 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5979 : MaxFixedVF.getFixedValue(); 5980 ScalarEvolution *SE = PSE.getSE(); 5981 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5982 const SCEV *ExitCount = SE->getAddExpr( 5983 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5984 const SCEV *Rem = SE->getURemExpr( 5985 SE->applyLoopGuards(ExitCount, TheLoop), 5986 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5987 if (Rem->isZero()) { 5988 // Accept MaxFixedVF if we do not have a tail. 5989 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5990 return MaxFactors; 5991 } 5992 } 5993 5994 // For scalable vectors, don't use tail folding as this is currently not yet 5995 // supported. The code is likely to have ended up here if the tripcount is 5996 // low, in which case it makes sense not to use scalable vectors. 5997 if (MaxFactors.ScalableVF.isVector()) 5998 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5999 6000 // If we don't know the precise trip count, or if the trip count that we 6001 // found modulo the vectorization factor is not zero, try to fold the tail 6002 // by masking. 6003 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 6004 if (Legal->prepareToFoldTailByMasking()) { 6005 FoldTailByMasking = true; 6006 return MaxFactors; 6007 } 6008 6009 // If there was a tail-folding hint/switch, but we can't fold the tail by 6010 // masking, fallback to a vectorization with a scalar epilogue. 6011 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 6012 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 6013 "scalar epilogue instead.\n"); 6014 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 6015 return MaxFactors; 6016 } 6017 6018 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 6019 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 6020 return FixedScalableVFPair::getNone(); 6021 } 6022 6023 if (TC == 0) { 6024 reportVectorizationFailure( 6025 "Unable to calculate the loop count due to complex control flow", 6026 "unable to calculate the loop count due to complex control flow", 6027 "UnknownLoopCountComplexCFG", ORE, TheLoop); 6028 return FixedScalableVFPair::getNone(); 6029 } 6030 6031 reportVectorizationFailure( 6032 "Cannot optimize for size and vectorize at the same time.", 6033 "cannot optimize for size and vectorize at the same time. " 6034 "Enable vectorization of this loop with '#pragma clang loop " 6035 "vectorize(enable)' when compiling with -Os/-Oz", 6036 "NoTailLoopWithOptForSize", ORE, TheLoop); 6037 return FixedScalableVFPair::getNone(); 6038 } 6039 6040 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 6041 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 6042 const ElementCount &MaxSafeVF) { 6043 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 6044 TypeSize WidestRegister = TTI.getRegisterBitWidth( 6045 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 6046 : TargetTransformInfo::RGK_FixedWidthVector); 6047 6048 // Convenience function to return the minimum of two ElementCounts. 6049 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 6050 assert((LHS.isScalable() == RHS.isScalable()) && 6051 "Scalable flags must match"); 6052 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 6053 }; 6054 6055 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 6056 // Note that both WidestRegister and WidestType may not be a powers of 2. 6057 auto MaxVectorElementCount = ElementCount::get( 6058 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 6059 ComputeScalableMaxVF); 6060 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 6061 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 6062 << (MaxVectorElementCount * WidestType) << " bits.\n"); 6063 6064 if (!MaxVectorElementCount) { 6065 LLVM_DEBUG(dbgs() << "LV: The target has no " 6066 << (ComputeScalableMaxVF ? "scalable" : "fixed") 6067 << " vector registers.\n"); 6068 return ElementCount::getFixed(1); 6069 } 6070 6071 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 6072 if (ConstTripCount && 6073 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 6074 isPowerOf2_32(ConstTripCount)) { 6075 // We need to clamp the VF to be the ConstTripCount. There is no point in 6076 // choosing a higher viable VF as done in the loop below. If 6077 // MaxVectorElementCount is scalable, we only fall back on a fixed VF when 6078 // the TC is less than or equal to the known number of lanes. 6079 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 6080 << ConstTripCount << "\n"); 6081 return TripCountEC; 6082 } 6083 6084 ElementCount MaxVF = MaxVectorElementCount; 6085 if (TTI.shouldMaximizeVectorBandwidth() || 6086 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 6087 auto MaxVectorElementCountMaxBW = ElementCount::get( 6088 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 6089 ComputeScalableMaxVF); 6090 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 6091 6092 // Collect all viable vectorization factors larger than the default MaxVF 6093 // (i.e. MaxVectorElementCount). 6094 SmallVector<ElementCount, 8> VFs; 6095 for (ElementCount VS = MaxVectorElementCount * 2; 6096 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 6097 VFs.push_back(VS); 6098 6099 // For each VF calculate its register usage. 6100 auto RUs = calculateRegisterUsage(VFs); 6101 6102 // Select the largest VF which doesn't require more registers than existing 6103 // ones. 6104 for (int i = RUs.size() - 1; i >= 0; --i) { 6105 bool Selected = true; 6106 for (auto &pair : RUs[i].MaxLocalUsers) { 6107 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6108 if (pair.second > TargetNumRegisters) 6109 Selected = false; 6110 } 6111 if (Selected) { 6112 MaxVF = VFs[i]; 6113 break; 6114 } 6115 } 6116 if (ElementCount MinVF = 6117 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 6118 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 6119 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 6120 << ") with target's minimum: " << MinVF << '\n'); 6121 MaxVF = MinVF; 6122 } 6123 } 6124 } 6125 return MaxVF; 6126 } 6127 6128 bool LoopVectorizationCostModel::isMoreProfitable( 6129 const VectorizationFactor &A, const VectorizationFactor &B) const { 6130 InstructionCost CostA = A.Cost; 6131 InstructionCost CostB = B.Cost; 6132 6133 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 6134 6135 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 6136 MaxTripCount) { 6137 // If we are folding the tail and the trip count is a known (possibly small) 6138 // constant, the trip count will be rounded up to an integer number of 6139 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 6140 // which we compare directly. When not folding the tail, the total cost will 6141 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 6142 // approximated with the per-lane cost below instead of using the tripcount 6143 // as here. 6144 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 6145 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 6146 return RTCostA < RTCostB; 6147 } 6148 6149 // Improve estimate for the vector width if it is scalable. 6150 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 6151 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 6152 if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) { 6153 if (A.Width.isScalable()) 6154 EstimatedWidthA *= VScale.getValue(); 6155 if (B.Width.isScalable()) 6156 EstimatedWidthB *= VScale.getValue(); 6157 } 6158 6159 // When set to preferred, for now assume vscale may be larger than 1 (or the 6160 // one being tuned for), so that scalable vectorization is slightly favorable 6161 // over fixed-width vectorization. 6162 if (Hints->isScalableVectorizationPreferred()) 6163 if (A.Width.isScalable() && !B.Width.isScalable()) 6164 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 6165 6166 // To avoid the need for FP division: 6167 // (CostA / A.Width) < (CostB / B.Width) 6168 // <=> (CostA * B.Width) < (CostB * A.Width) 6169 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 6170 } 6171 6172 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 6173 const ElementCountSet &VFCandidates) { 6174 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 6175 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 6176 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 6177 assert(VFCandidates.count(ElementCount::getFixed(1)) && 6178 "Expected Scalar VF to be a candidate"); 6179 6180 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 6181 VectorizationFactor ChosenFactor = ScalarCost; 6182 6183 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 6184 if (ForceVectorization && VFCandidates.size() > 1) { 6185 // Ignore scalar width, because the user explicitly wants vectorization. 6186 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 6187 // evaluation. 6188 ChosenFactor.Cost = InstructionCost::getMax(); 6189 } 6190 6191 SmallVector<InstructionVFPair> InvalidCosts; 6192 for (const auto &i : VFCandidates) { 6193 // The cost for scalar VF=1 is already calculated, so ignore it. 6194 if (i.isScalar()) 6195 continue; 6196 6197 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 6198 VectorizationFactor Candidate(i, C.first); 6199 6200 #ifndef NDEBUG 6201 unsigned AssumedMinimumVscale = 1; 6202 if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) 6203 AssumedMinimumVscale = VScale.getValue(); 6204 unsigned Width = 6205 Candidate.Width.isScalable() 6206 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 6207 : Candidate.Width.getFixedValue(); 6208 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 6209 << " costs: " << (Candidate.Cost / Width)); 6210 if (i.isScalable()) 6211 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 6212 << AssumedMinimumVscale << ")"); 6213 LLVM_DEBUG(dbgs() << ".\n"); 6214 #endif 6215 6216 if (!C.second && !ForceVectorization) { 6217 LLVM_DEBUG( 6218 dbgs() << "LV: Not considering vector loop of width " << i 6219 << " because it will not generate any vector instructions.\n"); 6220 continue; 6221 } 6222 6223 // If profitable add it to ProfitableVF list. 6224 if (isMoreProfitable(Candidate, ScalarCost)) 6225 ProfitableVFs.push_back(Candidate); 6226 6227 if (isMoreProfitable(Candidate, ChosenFactor)) 6228 ChosenFactor = Candidate; 6229 } 6230 6231 // Emit a report of VFs with invalid costs in the loop. 6232 if (!InvalidCosts.empty()) { 6233 // Group the remarks per instruction, keeping the instruction order from 6234 // InvalidCosts. 6235 std::map<Instruction *, unsigned> Numbering; 6236 unsigned I = 0; 6237 for (auto &Pair : InvalidCosts) 6238 if (!Numbering.count(Pair.first)) 6239 Numbering[Pair.first] = I++; 6240 6241 // Sort the list, first on instruction(number) then on VF. 6242 llvm::sort(InvalidCosts, 6243 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 6244 if (Numbering[A.first] != Numbering[B.first]) 6245 return Numbering[A.first] < Numbering[B.first]; 6246 ElementCountComparator ECC; 6247 return ECC(A.second, B.second); 6248 }); 6249 6250 // For a list of ordered instruction-vf pairs: 6251 // [(load, vf1), (load, vf2), (store, vf1)] 6252 // Group the instructions together to emit separate remarks for: 6253 // load (vf1, vf2) 6254 // store (vf1) 6255 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 6256 auto Subset = ArrayRef<InstructionVFPair>(); 6257 do { 6258 if (Subset.empty()) 6259 Subset = Tail.take_front(1); 6260 6261 Instruction *I = Subset.front().first; 6262 6263 // If the next instruction is different, or if there are no other pairs, 6264 // emit a remark for the collated subset. e.g. 6265 // [(load, vf1), (load, vf2))] 6266 // to emit: 6267 // remark: invalid costs for 'load' at VF=(vf, vf2) 6268 if (Subset == Tail || Tail[Subset.size()].first != I) { 6269 std::string OutString; 6270 raw_string_ostream OS(OutString); 6271 assert(!Subset.empty() && "Unexpected empty range"); 6272 OS << "Instruction with invalid costs prevented vectorization at VF=("; 6273 for (auto &Pair : Subset) 6274 OS << (Pair.second == Subset.front().second ? "" : ", ") 6275 << Pair.second; 6276 OS << "):"; 6277 if (auto *CI = dyn_cast<CallInst>(I)) 6278 OS << " call to " << CI->getCalledFunction()->getName(); 6279 else 6280 OS << " " << I->getOpcodeName(); 6281 OS.flush(); 6282 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 6283 Tail = Tail.drop_front(Subset.size()); 6284 Subset = {}; 6285 } else 6286 // Grow the subset by one element 6287 Subset = Tail.take_front(Subset.size() + 1); 6288 } while (!Tail.empty()); 6289 } 6290 6291 if (!EnableCondStoresVectorization && NumPredStores) { 6292 reportVectorizationFailure("There are conditional stores.", 6293 "store that is conditionally executed prevents vectorization", 6294 "ConditionalStore", ORE, TheLoop); 6295 ChosenFactor = ScalarCost; 6296 } 6297 6298 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 6299 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 6300 << "LV: Vectorization seems to be not beneficial, " 6301 << "but was forced by a user.\n"); 6302 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 6303 return ChosenFactor; 6304 } 6305 6306 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 6307 const Loop &L, ElementCount VF) const { 6308 // Cross iteration phis such as reductions need special handling and are 6309 // currently unsupported. 6310 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 6311 return Legal->isFirstOrderRecurrence(&Phi) || 6312 Legal->isReductionVariable(&Phi); 6313 })) 6314 return false; 6315 6316 // Phis with uses outside of the loop require special handling and are 6317 // currently unsupported. 6318 for (auto &Entry : Legal->getInductionVars()) { 6319 // Look for uses of the value of the induction at the last iteration. 6320 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 6321 for (User *U : PostInc->users()) 6322 if (!L.contains(cast<Instruction>(U))) 6323 return false; 6324 // Look for uses of penultimate value of the induction. 6325 for (User *U : Entry.first->users()) 6326 if (!L.contains(cast<Instruction>(U))) 6327 return false; 6328 } 6329 6330 // Induction variables that are widened require special handling that is 6331 // currently not supported. 6332 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 6333 return !(this->isScalarAfterVectorization(Entry.first, VF) || 6334 this->isProfitableToScalarize(Entry.first, VF)); 6335 })) 6336 return false; 6337 6338 // Epilogue vectorization code has not been auditted to ensure it handles 6339 // non-latch exits properly. It may be fine, but it needs auditted and 6340 // tested. 6341 if (L.getExitingBlock() != L.getLoopLatch()) 6342 return false; 6343 6344 return true; 6345 } 6346 6347 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 6348 const ElementCount VF) const { 6349 // FIXME: We need a much better cost-model to take different parameters such 6350 // as register pressure, code size increase and cost of extra branches into 6351 // account. For now we apply a very crude heuristic and only consider loops 6352 // with vectorization factors larger than a certain value. 6353 // We also consider epilogue vectorization unprofitable for targets that don't 6354 // consider interleaving beneficial (eg. MVE). 6355 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 6356 return false; 6357 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 6358 return true; 6359 return false; 6360 } 6361 6362 VectorizationFactor 6363 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 6364 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 6365 VectorizationFactor Result = VectorizationFactor::Disabled(); 6366 if (!EnableEpilogueVectorization) { 6367 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 6368 return Result; 6369 } 6370 6371 if (!isScalarEpilogueAllowed()) { 6372 LLVM_DEBUG( 6373 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 6374 "allowed.\n";); 6375 return Result; 6376 } 6377 6378 // Not really a cost consideration, but check for unsupported cases here to 6379 // simplify the logic. 6380 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 6381 LLVM_DEBUG( 6382 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 6383 "not a supported candidate.\n";); 6384 return Result; 6385 } 6386 6387 if (EpilogueVectorizationForceVF > 1) { 6388 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 6389 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 6390 if (LVP.hasPlanWithVF(ForcedEC)) 6391 return {ForcedEC, 0}; 6392 else { 6393 LLVM_DEBUG( 6394 dbgs() 6395 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 6396 return Result; 6397 } 6398 } 6399 6400 if (TheLoop->getHeader()->getParent()->hasOptSize() || 6401 TheLoop->getHeader()->getParent()->hasMinSize()) { 6402 LLVM_DEBUG( 6403 dbgs() 6404 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 6405 return Result; 6406 } 6407 6408 auto FixedMainLoopVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 6409 if (MainLoopVF.isScalable()) 6410 LLVM_DEBUG( 6411 dbgs() << "LEV: Epilogue vectorization using scalable vectors not " 6412 "yet supported. Converting to fixed-width (VF=" 6413 << FixedMainLoopVF << ") instead\n"); 6414 6415 if (!isEpilogueVectorizationProfitable(FixedMainLoopVF)) { 6416 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 6417 "this loop\n"); 6418 return Result; 6419 } 6420 6421 for (auto &NextVF : ProfitableVFs) 6422 if (ElementCount::isKnownLT(NextVF.Width, FixedMainLoopVF) && 6423 (Result.Width.getFixedValue() == 1 || 6424 isMoreProfitable(NextVF, Result)) && 6425 LVP.hasPlanWithVF(NextVF.Width)) 6426 Result = NextVF; 6427 6428 if (Result != VectorizationFactor::Disabled()) 6429 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 6430 << Result.Width.getFixedValue() << "\n";); 6431 return Result; 6432 } 6433 6434 std::pair<unsigned, unsigned> 6435 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6436 unsigned MinWidth = -1U; 6437 unsigned MaxWidth = 8; 6438 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6439 for (Type *T : ElementTypesInLoop) { 6440 MinWidth = std::min<unsigned>( 6441 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 6442 MaxWidth = std::max<unsigned>( 6443 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 6444 } 6445 return {MinWidth, MaxWidth}; 6446 } 6447 6448 void LoopVectorizationCostModel::collectElementTypesForWidening() { 6449 ElementTypesInLoop.clear(); 6450 // For each block. 6451 for (BasicBlock *BB : TheLoop->blocks()) { 6452 // For each instruction in the loop. 6453 for (Instruction &I : BB->instructionsWithoutDebug()) { 6454 Type *T = I.getType(); 6455 6456 // Skip ignored values. 6457 if (ValuesToIgnore.count(&I)) 6458 continue; 6459 6460 // Only examine Loads, Stores and PHINodes. 6461 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6462 continue; 6463 6464 // Examine PHI nodes that are reduction variables. Update the type to 6465 // account for the recurrence type. 6466 if (auto *PN = dyn_cast<PHINode>(&I)) { 6467 if (!Legal->isReductionVariable(PN)) 6468 continue; 6469 const RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[PN]; 6470 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 6471 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 6472 RdxDesc.getRecurrenceType(), 6473 TargetTransformInfo::ReductionFlags())) 6474 continue; 6475 T = RdxDesc.getRecurrenceType(); 6476 } 6477 6478 // Examine the stored values. 6479 if (auto *ST = dyn_cast<StoreInst>(&I)) 6480 T = ST->getValueOperand()->getType(); 6481 6482 // Ignore loaded pointer types and stored pointer types that are not 6483 // vectorizable. 6484 // 6485 // FIXME: The check here attempts to predict whether a load or store will 6486 // be vectorized. We only know this for certain after a VF has 6487 // been selected. Here, we assume that if an access can be 6488 // vectorized, it will be. We should also look at extending this 6489 // optimization to non-pointer types. 6490 // 6491 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6492 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 6493 continue; 6494 6495 ElementTypesInLoop.insert(T); 6496 } 6497 } 6498 } 6499 6500 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 6501 unsigned LoopCost) { 6502 // -- The interleave heuristics -- 6503 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6504 // There are many micro-architectural considerations that we can't predict 6505 // at this level. For example, frontend pressure (on decode or fetch) due to 6506 // code size, or the number and capabilities of the execution ports. 6507 // 6508 // We use the following heuristics to select the interleave count: 6509 // 1. If the code has reductions, then we interleave to break the cross 6510 // iteration dependency. 6511 // 2. If the loop is really small, then we interleave to reduce the loop 6512 // overhead. 6513 // 3. We don't interleave if we think that we will spill registers to memory 6514 // due to the increased register pressure. 6515 6516 if (!isScalarEpilogueAllowed()) 6517 return 1; 6518 6519 // We used the distance for the interleave count. 6520 if (Legal->getMaxSafeDepDistBytes() != -1U) 6521 return 1; 6522 6523 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6524 const bool HasReductions = !Legal->getReductionVars().empty(); 6525 // Do not interleave loops with a relatively small known or estimated trip 6526 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6527 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6528 // because with the above conditions interleaving can expose ILP and break 6529 // cross iteration dependences for reductions. 6530 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6531 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6532 return 1; 6533 6534 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6535 // We divide by these constants so assume that we have at least one 6536 // instruction that uses at least one register. 6537 for (auto& pair : R.MaxLocalUsers) { 6538 pair.second = std::max(pair.second, 1U); 6539 } 6540 6541 // We calculate the interleave count using the following formula. 6542 // Subtract the number of loop invariants from the number of available 6543 // registers. These registers are used by all of the interleaved instances. 6544 // Next, divide the remaining registers by the number of registers that is 6545 // required by the loop, in order to estimate how many parallel instances 6546 // fit without causing spills. All of this is rounded down if necessary to be 6547 // a power of two. We want power of two interleave count to simplify any 6548 // addressing operations or alignment considerations. 6549 // We also want power of two interleave counts to ensure that the induction 6550 // variable of the vector loop wraps to zero, when tail is folded by masking; 6551 // this currently happens when OptForSize, in which case IC is set to 1 above. 6552 unsigned IC = UINT_MAX; 6553 6554 for (auto& pair : R.MaxLocalUsers) { 6555 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6556 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6557 << " registers of " 6558 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6559 if (VF.isScalar()) { 6560 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6561 TargetNumRegisters = ForceTargetNumScalarRegs; 6562 } else { 6563 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6564 TargetNumRegisters = ForceTargetNumVectorRegs; 6565 } 6566 unsigned MaxLocalUsers = pair.second; 6567 unsigned LoopInvariantRegs = 0; 6568 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6569 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6570 6571 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6572 // Don't count the induction variable as interleaved. 6573 if (EnableIndVarRegisterHeur) { 6574 TmpIC = 6575 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6576 std::max(1U, (MaxLocalUsers - 1))); 6577 } 6578 6579 IC = std::min(IC, TmpIC); 6580 } 6581 6582 // Clamp the interleave ranges to reasonable counts. 6583 unsigned MaxInterleaveCount = 6584 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6585 6586 // Check if the user has overridden the max. 6587 if (VF.isScalar()) { 6588 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6589 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6590 } else { 6591 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6592 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6593 } 6594 6595 // If trip count is known or estimated compile time constant, limit the 6596 // interleave count to be less than the trip count divided by VF, provided it 6597 // is at least 1. 6598 // 6599 // For scalable vectors we can't know if interleaving is beneficial. It may 6600 // not be beneficial for small loops if none of the lanes in the second vector 6601 // iterations is enabled. However, for larger loops, there is likely to be a 6602 // similar benefit as for fixed-width vectors. For now, we choose to leave 6603 // the InterleaveCount as if vscale is '1', although if some information about 6604 // the vector is known (e.g. min vector size), we can make a better decision. 6605 if (BestKnownTC) { 6606 MaxInterleaveCount = 6607 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6608 // Make sure MaxInterleaveCount is greater than 0. 6609 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6610 } 6611 6612 assert(MaxInterleaveCount > 0 && 6613 "Maximum interleave count must be greater than 0"); 6614 6615 // Clamp the calculated IC to be between the 1 and the max interleave count 6616 // that the target and trip count allows. 6617 if (IC > MaxInterleaveCount) 6618 IC = MaxInterleaveCount; 6619 else 6620 // Make sure IC is greater than 0. 6621 IC = std::max(1u, IC); 6622 6623 assert(IC > 0 && "Interleave count must be greater than 0."); 6624 6625 // If we did not calculate the cost for VF (because the user selected the VF) 6626 // then we calculate the cost of VF here. 6627 if (LoopCost == 0) { 6628 InstructionCost C = expectedCost(VF).first; 6629 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 6630 LoopCost = *C.getValue(); 6631 } 6632 6633 assert(LoopCost && "Non-zero loop cost expected"); 6634 6635 // Interleave if we vectorized this loop and there is a reduction that could 6636 // benefit from interleaving. 6637 if (VF.isVector() && HasReductions) { 6638 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6639 return IC; 6640 } 6641 6642 // Note that if we've already vectorized the loop we will have done the 6643 // runtime check and so interleaving won't require further checks. 6644 bool InterleavingRequiresRuntimePointerCheck = 6645 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6646 6647 // We want to interleave small loops in order to reduce the loop overhead and 6648 // potentially expose ILP opportunities. 6649 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6650 << "LV: IC is " << IC << '\n' 6651 << "LV: VF is " << VF << '\n'); 6652 const bool AggressivelyInterleaveReductions = 6653 TTI.enableAggressiveInterleaving(HasReductions); 6654 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6655 // We assume that the cost overhead is 1 and we use the cost model 6656 // to estimate the cost of the loop and interleave until the cost of the 6657 // loop overhead is about 5% of the cost of the loop. 6658 unsigned SmallIC = 6659 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6660 6661 // Interleave until store/load ports (estimated by max interleave count) are 6662 // saturated. 6663 unsigned NumStores = Legal->getNumStores(); 6664 unsigned NumLoads = Legal->getNumLoads(); 6665 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6666 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6667 6668 // There is little point in interleaving for reductions containing selects 6669 // and compares when VF=1 since it may just create more overhead than it's 6670 // worth for loops with small trip counts. This is because we still have to 6671 // do the final reduction after the loop. 6672 bool HasSelectCmpReductions = 6673 HasReductions && 6674 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6675 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6676 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 6677 RdxDesc.getRecurrenceKind()); 6678 }); 6679 if (HasSelectCmpReductions) { 6680 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 6681 return 1; 6682 } 6683 6684 // If we have a scalar reduction (vector reductions are already dealt with 6685 // by this point), we can increase the critical path length if the loop 6686 // we're interleaving is inside another loop. For tree-wise reductions 6687 // set the limit to 2, and for ordered reductions it's best to disable 6688 // interleaving entirely. 6689 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6690 bool HasOrderedReductions = 6691 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6692 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6693 return RdxDesc.isOrdered(); 6694 }); 6695 if (HasOrderedReductions) { 6696 LLVM_DEBUG( 6697 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 6698 return 1; 6699 } 6700 6701 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6702 SmallIC = std::min(SmallIC, F); 6703 StoresIC = std::min(StoresIC, F); 6704 LoadsIC = std::min(LoadsIC, F); 6705 } 6706 6707 if (EnableLoadStoreRuntimeInterleave && 6708 std::max(StoresIC, LoadsIC) > SmallIC) { 6709 LLVM_DEBUG( 6710 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6711 return std::max(StoresIC, LoadsIC); 6712 } 6713 6714 // If there are scalar reductions and TTI has enabled aggressive 6715 // interleaving for reductions, we will interleave to expose ILP. 6716 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6717 AggressivelyInterleaveReductions) { 6718 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6719 // Interleave no less than SmallIC but not as aggressive as the normal IC 6720 // to satisfy the rare situation when resources are too limited. 6721 return std::max(IC / 2, SmallIC); 6722 } else { 6723 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6724 return SmallIC; 6725 } 6726 } 6727 6728 // Interleave if this is a large loop (small loops are already dealt with by 6729 // this point) that could benefit from interleaving. 6730 if (AggressivelyInterleaveReductions) { 6731 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6732 return IC; 6733 } 6734 6735 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6736 return 1; 6737 } 6738 6739 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6740 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6741 // This function calculates the register usage by measuring the highest number 6742 // of values that are alive at a single location. Obviously, this is a very 6743 // rough estimation. We scan the loop in a topological order in order and 6744 // assign a number to each instruction. We use RPO to ensure that defs are 6745 // met before their users. We assume that each instruction that has in-loop 6746 // users starts an interval. We record every time that an in-loop value is 6747 // used, so we have a list of the first and last occurrences of each 6748 // instruction. Next, we transpose this data structure into a multi map that 6749 // holds the list of intervals that *end* at a specific location. This multi 6750 // map allows us to perform a linear search. We scan the instructions linearly 6751 // and record each time that a new interval starts, by placing it in a set. 6752 // If we find this value in the multi-map then we remove it from the set. 6753 // The max register usage is the maximum size of the set. 6754 // We also search for instructions that are defined outside the loop, but are 6755 // used inside the loop. We need this number separately from the max-interval 6756 // usage number because when we unroll, loop-invariant values do not take 6757 // more register. 6758 LoopBlocksDFS DFS(TheLoop); 6759 DFS.perform(LI); 6760 6761 RegisterUsage RU; 6762 6763 // Each 'key' in the map opens a new interval. The values 6764 // of the map are the index of the 'last seen' usage of the 6765 // instruction that is the key. 6766 using IntervalMap = DenseMap<Instruction *, unsigned>; 6767 6768 // Maps instruction to its index. 6769 SmallVector<Instruction *, 64> IdxToInstr; 6770 // Marks the end of each interval. 6771 IntervalMap EndPoint; 6772 // Saves the list of instruction indices that are used in the loop. 6773 SmallPtrSet<Instruction *, 8> Ends; 6774 // Saves the list of values that are used in the loop but are 6775 // defined outside the loop, such as arguments and constants. 6776 SmallPtrSet<Value *, 8> LoopInvariants; 6777 6778 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6779 for (Instruction &I : BB->instructionsWithoutDebug()) { 6780 IdxToInstr.push_back(&I); 6781 6782 // Save the end location of each USE. 6783 for (Value *U : I.operands()) { 6784 auto *Instr = dyn_cast<Instruction>(U); 6785 6786 // Ignore non-instruction values such as arguments, constants, etc. 6787 if (!Instr) 6788 continue; 6789 6790 // If this instruction is outside the loop then record it and continue. 6791 if (!TheLoop->contains(Instr)) { 6792 LoopInvariants.insert(Instr); 6793 continue; 6794 } 6795 6796 // Overwrite previous end points. 6797 EndPoint[Instr] = IdxToInstr.size(); 6798 Ends.insert(Instr); 6799 } 6800 } 6801 } 6802 6803 // Saves the list of intervals that end with the index in 'key'. 6804 using InstrList = SmallVector<Instruction *, 2>; 6805 DenseMap<unsigned, InstrList> TransposeEnds; 6806 6807 // Transpose the EndPoints to a list of values that end at each index. 6808 for (auto &Interval : EndPoint) 6809 TransposeEnds[Interval.second].push_back(Interval.first); 6810 6811 SmallPtrSet<Instruction *, 8> OpenIntervals; 6812 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6813 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6814 6815 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6816 6817 // A lambda that gets the register usage for the given type and VF. 6818 const auto &TTICapture = TTI; 6819 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 6820 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6821 return 0; 6822 InstructionCost::CostType RegUsage = 6823 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 6824 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 6825 "Nonsensical values for register usage."); 6826 return RegUsage; 6827 }; 6828 6829 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6830 Instruction *I = IdxToInstr[i]; 6831 6832 // Remove all of the instructions that end at this location. 6833 InstrList &List = TransposeEnds[i]; 6834 for (Instruction *ToRemove : List) 6835 OpenIntervals.erase(ToRemove); 6836 6837 // Ignore instructions that are never used within the loop. 6838 if (!Ends.count(I)) 6839 continue; 6840 6841 // Skip ignored values. 6842 if (ValuesToIgnore.count(I)) 6843 continue; 6844 6845 // For each VF find the maximum usage of registers. 6846 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6847 // Count the number of live intervals. 6848 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6849 6850 if (VFs[j].isScalar()) { 6851 for (auto Inst : OpenIntervals) { 6852 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6853 if (RegUsage.find(ClassID) == RegUsage.end()) 6854 RegUsage[ClassID] = 1; 6855 else 6856 RegUsage[ClassID] += 1; 6857 } 6858 } else { 6859 collectUniformsAndScalars(VFs[j]); 6860 for (auto Inst : OpenIntervals) { 6861 // Skip ignored values for VF > 1. 6862 if (VecValuesToIgnore.count(Inst)) 6863 continue; 6864 if (isScalarAfterVectorization(Inst, VFs[j])) { 6865 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6866 if (RegUsage.find(ClassID) == RegUsage.end()) 6867 RegUsage[ClassID] = 1; 6868 else 6869 RegUsage[ClassID] += 1; 6870 } else { 6871 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6872 if (RegUsage.find(ClassID) == RegUsage.end()) 6873 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6874 else 6875 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6876 } 6877 } 6878 } 6879 6880 for (auto& pair : RegUsage) { 6881 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6882 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6883 else 6884 MaxUsages[j][pair.first] = pair.second; 6885 } 6886 } 6887 6888 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6889 << OpenIntervals.size() << '\n'); 6890 6891 // Add the current instruction to the list of open intervals. 6892 OpenIntervals.insert(I); 6893 } 6894 6895 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6896 SmallMapVector<unsigned, unsigned, 4> Invariant; 6897 6898 for (auto Inst : LoopInvariants) { 6899 unsigned Usage = 6900 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6901 unsigned ClassID = 6902 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6903 if (Invariant.find(ClassID) == Invariant.end()) 6904 Invariant[ClassID] = Usage; 6905 else 6906 Invariant[ClassID] += Usage; 6907 } 6908 6909 LLVM_DEBUG({ 6910 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6911 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6912 << " item\n"; 6913 for (const auto &pair : MaxUsages[i]) { 6914 dbgs() << "LV(REG): RegisterClass: " 6915 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6916 << " registers\n"; 6917 } 6918 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6919 << " item\n"; 6920 for (const auto &pair : Invariant) { 6921 dbgs() << "LV(REG): RegisterClass: " 6922 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6923 << " registers\n"; 6924 } 6925 }); 6926 6927 RU.LoopInvariantRegs = Invariant; 6928 RU.MaxLocalUsers = MaxUsages[i]; 6929 RUs[i] = RU; 6930 } 6931 6932 return RUs; 6933 } 6934 6935 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6936 // TODO: Cost model for emulated masked load/store is completely 6937 // broken. This hack guides the cost model to use an artificially 6938 // high enough value to practically disable vectorization with such 6939 // operations, except where previously deployed legality hack allowed 6940 // using very low cost values. This is to avoid regressions coming simply 6941 // from moving "masked load/store" check from legality to cost model. 6942 // Masked Load/Gather emulation was previously never allowed. 6943 // Limited number of Masked Store/Scatter emulation was allowed. 6944 assert(isPredicatedInst(I) && 6945 "Expecting a scalar emulated instruction"); 6946 return isa<LoadInst>(I) || 6947 (isa<StoreInst>(I) && 6948 NumPredStores > NumberOfStoresToPredicate); 6949 } 6950 6951 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6952 // If we aren't vectorizing the loop, or if we've already collected the 6953 // instructions to scalarize, there's nothing to do. Collection may already 6954 // have occurred if we have a user-selected VF and are now computing the 6955 // expected cost for interleaving. 6956 if (VF.isScalar() || VF.isZero() || 6957 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6958 return; 6959 6960 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6961 // not profitable to scalarize any instructions, the presence of VF in the 6962 // map will indicate that we've analyzed it already. 6963 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6964 6965 // Find all the instructions that are scalar with predication in the loop and 6966 // determine if it would be better to not if-convert the blocks they are in. 6967 // If so, we also record the instructions to scalarize. 6968 for (BasicBlock *BB : TheLoop->blocks()) { 6969 if (!blockNeedsPredicationForAnyReason(BB)) 6970 continue; 6971 for (Instruction &I : *BB) 6972 if (isScalarWithPredication(&I)) { 6973 ScalarCostsTy ScalarCosts; 6974 // Do not apply discount if scalable, because that would lead to 6975 // invalid scalarization costs. 6976 // Do not apply discount logic if hacked cost is needed 6977 // for emulated masked memrefs. 6978 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I) && 6979 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6980 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6981 // Remember that BB will remain after vectorization. 6982 PredicatedBBsAfterVectorization.insert(BB); 6983 } 6984 } 6985 } 6986 6987 int LoopVectorizationCostModel::computePredInstDiscount( 6988 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6989 assert(!isUniformAfterVectorization(PredInst, VF) && 6990 "Instruction marked uniform-after-vectorization will be predicated"); 6991 6992 // Initialize the discount to zero, meaning that the scalar version and the 6993 // vector version cost the same. 6994 InstructionCost Discount = 0; 6995 6996 // Holds instructions to analyze. The instructions we visit are mapped in 6997 // ScalarCosts. Those instructions are the ones that would be scalarized if 6998 // we find that the scalar version costs less. 6999 SmallVector<Instruction *, 8> Worklist; 7000 7001 // Returns true if the given instruction can be scalarized. 7002 auto canBeScalarized = [&](Instruction *I) -> bool { 7003 // We only attempt to scalarize instructions forming a single-use chain 7004 // from the original predicated block that would otherwise be vectorized. 7005 // Although not strictly necessary, we give up on instructions we know will 7006 // already be scalar to avoid traversing chains that are unlikely to be 7007 // beneficial. 7008 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 7009 isScalarAfterVectorization(I, VF)) 7010 return false; 7011 7012 // If the instruction is scalar with predication, it will be analyzed 7013 // separately. We ignore it within the context of PredInst. 7014 if (isScalarWithPredication(I)) 7015 return false; 7016 7017 // If any of the instruction's operands are uniform after vectorization, 7018 // the instruction cannot be scalarized. This prevents, for example, a 7019 // masked load from being scalarized. 7020 // 7021 // We assume we will only emit a value for lane zero of an instruction 7022 // marked uniform after vectorization, rather than VF identical values. 7023 // Thus, if we scalarize an instruction that uses a uniform, we would 7024 // create uses of values corresponding to the lanes we aren't emitting code 7025 // for. This behavior can be changed by allowing getScalarValue to clone 7026 // the lane zero values for uniforms rather than asserting. 7027 for (Use &U : I->operands()) 7028 if (auto *J = dyn_cast<Instruction>(U.get())) 7029 if (isUniformAfterVectorization(J, VF)) 7030 return false; 7031 7032 // Otherwise, we can scalarize the instruction. 7033 return true; 7034 }; 7035 7036 // Compute the expected cost discount from scalarizing the entire expression 7037 // feeding the predicated instruction. We currently only consider expressions 7038 // that are single-use instruction chains. 7039 Worklist.push_back(PredInst); 7040 while (!Worklist.empty()) { 7041 Instruction *I = Worklist.pop_back_val(); 7042 7043 // If we've already analyzed the instruction, there's nothing to do. 7044 if (ScalarCosts.find(I) != ScalarCosts.end()) 7045 continue; 7046 7047 // Compute the cost of the vector instruction. Note that this cost already 7048 // includes the scalarization overhead of the predicated instruction. 7049 InstructionCost VectorCost = getInstructionCost(I, VF).first; 7050 7051 // Compute the cost of the scalarized instruction. This cost is the cost of 7052 // the instruction as if it wasn't if-converted and instead remained in the 7053 // predicated block. We will scale this cost by block probability after 7054 // computing the scalarization overhead. 7055 InstructionCost ScalarCost = 7056 VF.getFixedValue() * 7057 getInstructionCost(I, ElementCount::getFixed(1)).first; 7058 7059 // Compute the scalarization overhead of needed insertelement instructions 7060 // and phi nodes. 7061 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 7062 ScalarCost += TTI.getScalarizationOverhead( 7063 cast<VectorType>(ToVectorTy(I->getType(), VF)), 7064 APInt::getAllOnes(VF.getFixedValue()), true, false); 7065 ScalarCost += 7066 VF.getFixedValue() * 7067 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 7068 } 7069 7070 // Compute the scalarization overhead of needed extractelement 7071 // instructions. For each of the instruction's operands, if the operand can 7072 // be scalarized, add it to the worklist; otherwise, account for the 7073 // overhead. 7074 for (Use &U : I->operands()) 7075 if (auto *J = dyn_cast<Instruction>(U.get())) { 7076 assert(VectorType::isValidElementType(J->getType()) && 7077 "Instruction has non-scalar type"); 7078 if (canBeScalarized(J)) 7079 Worklist.push_back(J); 7080 else if (needsExtract(J, VF)) { 7081 ScalarCost += TTI.getScalarizationOverhead( 7082 cast<VectorType>(ToVectorTy(J->getType(), VF)), 7083 APInt::getAllOnes(VF.getFixedValue()), false, true); 7084 } 7085 } 7086 7087 // Scale the total scalar cost by block probability. 7088 ScalarCost /= getReciprocalPredBlockProb(); 7089 7090 // Compute the discount. A non-negative discount means the vector version 7091 // of the instruction costs more, and scalarizing would be beneficial. 7092 Discount += VectorCost - ScalarCost; 7093 ScalarCosts[I] = ScalarCost; 7094 } 7095 7096 return *Discount.getValue(); 7097 } 7098 7099 LoopVectorizationCostModel::VectorizationCostTy 7100 LoopVectorizationCostModel::expectedCost( 7101 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 7102 VectorizationCostTy Cost; 7103 7104 // For each block. 7105 for (BasicBlock *BB : TheLoop->blocks()) { 7106 VectorizationCostTy BlockCost; 7107 7108 // For each instruction in the old loop. 7109 for (Instruction &I : BB->instructionsWithoutDebug()) { 7110 // Skip ignored values. 7111 if (ValuesToIgnore.count(&I) || 7112 (VF.isVector() && VecValuesToIgnore.count(&I))) 7113 continue; 7114 7115 VectorizationCostTy C = getInstructionCost(&I, VF); 7116 7117 // Check if we should override the cost. 7118 if (C.first.isValid() && 7119 ForceTargetInstructionCost.getNumOccurrences() > 0) 7120 C.first = InstructionCost(ForceTargetInstructionCost); 7121 7122 // Keep a list of instructions with invalid costs. 7123 if (Invalid && !C.first.isValid()) 7124 Invalid->emplace_back(&I, VF); 7125 7126 BlockCost.first += C.first; 7127 BlockCost.second |= C.second; 7128 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 7129 << " for VF " << VF << " For instruction: " << I 7130 << '\n'); 7131 } 7132 7133 // If we are vectorizing a predicated block, it will have been 7134 // if-converted. This means that the block's instructions (aside from 7135 // stores and instructions that may divide by zero) will now be 7136 // unconditionally executed. For the scalar case, we may not always execute 7137 // the predicated block, if it is an if-else block. Thus, scale the block's 7138 // cost by the probability of executing it. blockNeedsPredication from 7139 // Legal is used so as to not include all blocks in tail folded loops. 7140 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 7141 BlockCost.first /= getReciprocalPredBlockProb(); 7142 7143 Cost.first += BlockCost.first; 7144 Cost.second |= BlockCost.second; 7145 } 7146 7147 return Cost; 7148 } 7149 7150 /// Gets Address Access SCEV after verifying that the access pattern 7151 /// is loop invariant except the induction variable dependence. 7152 /// 7153 /// This SCEV can be sent to the Target in order to estimate the address 7154 /// calculation cost. 7155 static const SCEV *getAddressAccessSCEV( 7156 Value *Ptr, 7157 LoopVectorizationLegality *Legal, 7158 PredicatedScalarEvolution &PSE, 7159 const Loop *TheLoop) { 7160 7161 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 7162 if (!Gep) 7163 return nullptr; 7164 7165 // We are looking for a gep with all loop invariant indices except for one 7166 // which should be an induction variable. 7167 auto SE = PSE.getSE(); 7168 unsigned NumOperands = Gep->getNumOperands(); 7169 for (unsigned i = 1; i < NumOperands; ++i) { 7170 Value *Opd = Gep->getOperand(i); 7171 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 7172 !Legal->isInductionVariable(Opd)) 7173 return nullptr; 7174 } 7175 7176 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 7177 return PSE.getSCEV(Ptr); 7178 } 7179 7180 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 7181 return Legal->hasStride(I->getOperand(0)) || 7182 Legal->hasStride(I->getOperand(1)); 7183 } 7184 7185 InstructionCost 7186 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 7187 ElementCount VF) { 7188 assert(VF.isVector() && 7189 "Scalarization cost of instruction implies vectorization."); 7190 if (VF.isScalable()) 7191 return InstructionCost::getInvalid(); 7192 7193 Type *ValTy = getLoadStoreType(I); 7194 auto SE = PSE.getSE(); 7195 7196 unsigned AS = getLoadStoreAddressSpace(I); 7197 Value *Ptr = getLoadStorePointerOperand(I); 7198 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 7199 7200 // Figure out whether the access is strided and get the stride value 7201 // if it's known in compile time 7202 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 7203 7204 // Get the cost of the scalar memory instruction and address computation. 7205 InstructionCost Cost = 7206 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 7207 7208 // Don't pass *I here, since it is scalar but will actually be part of a 7209 // vectorized loop where the user of it is a vectorized instruction. 7210 const Align Alignment = getLoadStoreAlignment(I); 7211 Cost += VF.getKnownMinValue() * 7212 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 7213 AS, TTI::TCK_RecipThroughput); 7214 7215 // Get the overhead of the extractelement and insertelement instructions 7216 // we might create due to scalarization. 7217 Cost += getScalarizationOverhead(I, VF); 7218 7219 // If we have a predicated load/store, it will need extra i1 extracts and 7220 // conditional branches, but may not be executed for each vector lane. Scale 7221 // the cost by the probability of executing the predicated block. 7222 if (isPredicatedInst(I)) { 7223 Cost /= getReciprocalPredBlockProb(); 7224 7225 // Add the cost of an i1 extract and a branch 7226 auto *Vec_i1Ty = 7227 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 7228 Cost += TTI.getScalarizationOverhead( 7229 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 7230 /*Insert=*/false, /*Extract=*/true); 7231 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 7232 7233 if (useEmulatedMaskMemRefHack(I)) 7234 // Artificially setting to a high enough value to practically disable 7235 // vectorization with such operations. 7236 Cost = 3000000; 7237 } 7238 7239 return Cost; 7240 } 7241 7242 InstructionCost 7243 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 7244 ElementCount VF) { 7245 Type *ValTy = getLoadStoreType(I); 7246 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7247 Value *Ptr = getLoadStorePointerOperand(I); 7248 unsigned AS = getLoadStoreAddressSpace(I); 7249 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 7250 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7251 7252 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7253 "Stride should be 1 or -1 for consecutive memory access"); 7254 const Align Alignment = getLoadStoreAlignment(I); 7255 InstructionCost Cost = 0; 7256 if (Legal->isMaskRequired(I)) 7257 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 7258 CostKind); 7259 else 7260 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 7261 CostKind, I); 7262 7263 bool Reverse = ConsecutiveStride < 0; 7264 if (Reverse) 7265 Cost += 7266 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 7267 return Cost; 7268 } 7269 7270 InstructionCost 7271 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 7272 ElementCount VF) { 7273 assert(Legal->isUniformMemOp(*I)); 7274 7275 Type *ValTy = getLoadStoreType(I); 7276 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7277 const Align Alignment = getLoadStoreAlignment(I); 7278 unsigned AS = getLoadStoreAddressSpace(I); 7279 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7280 if (isa<LoadInst>(I)) { 7281 return TTI.getAddressComputationCost(ValTy) + 7282 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 7283 CostKind) + 7284 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 7285 } 7286 StoreInst *SI = cast<StoreInst>(I); 7287 7288 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 7289 return TTI.getAddressComputationCost(ValTy) + 7290 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 7291 CostKind) + 7292 (isLoopInvariantStoreValue 7293 ? 0 7294 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 7295 VF.getKnownMinValue() - 1)); 7296 } 7297 7298 InstructionCost 7299 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 7300 ElementCount VF) { 7301 Type *ValTy = getLoadStoreType(I); 7302 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7303 const Align Alignment = getLoadStoreAlignment(I); 7304 const Value *Ptr = getLoadStorePointerOperand(I); 7305 7306 return TTI.getAddressComputationCost(VectorTy) + 7307 TTI.getGatherScatterOpCost( 7308 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 7309 TargetTransformInfo::TCK_RecipThroughput, I); 7310 } 7311 7312 InstructionCost 7313 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 7314 ElementCount VF) { 7315 // TODO: Once we have support for interleaving with scalable vectors 7316 // we can calculate the cost properly here. 7317 if (VF.isScalable()) 7318 return InstructionCost::getInvalid(); 7319 7320 Type *ValTy = getLoadStoreType(I); 7321 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7322 unsigned AS = getLoadStoreAddressSpace(I); 7323 7324 auto Group = getInterleavedAccessGroup(I); 7325 assert(Group && "Fail to get an interleaved access group."); 7326 7327 unsigned InterleaveFactor = Group->getFactor(); 7328 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 7329 7330 // Holds the indices of existing members in the interleaved group. 7331 SmallVector<unsigned, 4> Indices; 7332 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 7333 if (Group->getMember(IF)) 7334 Indices.push_back(IF); 7335 7336 // Calculate the cost of the whole interleaved group. 7337 bool UseMaskForGaps = 7338 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 7339 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 7340 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 7341 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 7342 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 7343 7344 if (Group->isReverse()) { 7345 // TODO: Add support for reversed masked interleaved access. 7346 assert(!Legal->isMaskRequired(I) && 7347 "Reverse masked interleaved access not supported."); 7348 Cost += 7349 Group->getNumMembers() * 7350 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 7351 } 7352 return Cost; 7353 } 7354 7355 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 7356 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 7357 using namespace llvm::PatternMatch; 7358 // Early exit for no inloop reductions 7359 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 7360 return None; 7361 auto *VectorTy = cast<VectorType>(Ty); 7362 7363 // We are looking for a pattern of, and finding the minimal acceptable cost: 7364 // reduce(mul(ext(A), ext(B))) or 7365 // reduce(mul(A, B)) or 7366 // reduce(ext(A)) or 7367 // reduce(A). 7368 // The basic idea is that we walk down the tree to do that, finding the root 7369 // reduction instruction in InLoopReductionImmediateChains. From there we find 7370 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 7371 // of the components. If the reduction cost is lower then we return it for the 7372 // reduction instruction and 0 for the other instructions in the pattern. If 7373 // it is not we return an invalid cost specifying the orignal cost method 7374 // should be used. 7375 Instruction *RetI = I; 7376 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 7377 if (!RetI->hasOneUser()) 7378 return None; 7379 RetI = RetI->user_back(); 7380 } 7381 if (match(RetI, m_Mul(m_Value(), m_Value())) && 7382 RetI->user_back()->getOpcode() == Instruction::Add) { 7383 if (!RetI->hasOneUser()) 7384 return None; 7385 RetI = RetI->user_back(); 7386 } 7387 7388 // Test if the found instruction is a reduction, and if not return an invalid 7389 // cost specifying the parent to use the original cost modelling. 7390 if (!InLoopReductionImmediateChains.count(RetI)) 7391 return None; 7392 7393 // Find the reduction this chain is a part of and calculate the basic cost of 7394 // the reduction on its own. 7395 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 7396 Instruction *ReductionPhi = LastChain; 7397 while (!isa<PHINode>(ReductionPhi)) 7398 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 7399 7400 const RecurrenceDescriptor &RdxDesc = 7401 Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; 7402 7403 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 7404 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 7405 7406 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a 7407 // normal fmul instruction to the cost of the fadd reduction. 7408 if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd) 7409 BaseCost += 7410 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind); 7411 7412 // If we're using ordered reductions then we can just return the base cost 7413 // here, since getArithmeticReductionCost calculates the full ordered 7414 // reduction cost when FP reassociation is not allowed. 7415 if (useOrderedReductions(RdxDesc)) 7416 return BaseCost; 7417 7418 // Get the operand that was not the reduction chain and match it to one of the 7419 // patterns, returning the better cost if it is found. 7420 Instruction *RedOp = RetI->getOperand(1) == LastChain 7421 ? dyn_cast<Instruction>(RetI->getOperand(0)) 7422 : dyn_cast<Instruction>(RetI->getOperand(1)); 7423 7424 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 7425 7426 Instruction *Op0, *Op1; 7427 if (RedOp && 7428 match(RedOp, 7429 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 7430 match(Op0, m_ZExtOrSExt(m_Value())) && 7431 Op0->getOpcode() == Op1->getOpcode() && 7432 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7433 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 7434 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 7435 7436 // Matched reduce(ext(mul(ext(A), ext(B))) 7437 // Note that the extend opcodes need to all match, or if A==B they will have 7438 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 7439 // which is equally fine. 7440 bool IsUnsigned = isa<ZExtInst>(Op0); 7441 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7442 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 7443 7444 InstructionCost ExtCost = 7445 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 7446 TTI::CastContextHint::None, CostKind, Op0); 7447 InstructionCost MulCost = 7448 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 7449 InstructionCost Ext2Cost = 7450 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 7451 TTI::CastContextHint::None, CostKind, RedOp); 7452 7453 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7454 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7455 CostKind); 7456 7457 if (RedCost.isValid() && 7458 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 7459 return I == RetI ? RedCost : 0; 7460 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 7461 !TheLoop->isLoopInvariant(RedOp)) { 7462 // Matched reduce(ext(A)) 7463 bool IsUnsigned = isa<ZExtInst>(RedOp); 7464 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 7465 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7466 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7467 CostKind); 7468 7469 InstructionCost ExtCost = 7470 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 7471 TTI::CastContextHint::None, CostKind, RedOp); 7472 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 7473 return I == RetI ? RedCost : 0; 7474 } else if (RedOp && 7475 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 7476 if (match(Op0, m_ZExtOrSExt(m_Value())) && 7477 Op0->getOpcode() == Op1->getOpcode() && 7478 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7479 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 7480 bool IsUnsigned = isa<ZExtInst>(Op0); 7481 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7482 // Matched reduce(mul(ext, ext)) 7483 InstructionCost ExtCost = 7484 TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, 7485 TTI::CastContextHint::None, CostKind, Op0); 7486 InstructionCost MulCost = 7487 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7488 7489 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7490 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7491 CostKind); 7492 7493 if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) 7494 return I == RetI ? RedCost : 0; 7495 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 7496 // Matched reduce(mul()) 7497 InstructionCost MulCost = 7498 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7499 7500 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7501 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 7502 CostKind); 7503 7504 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 7505 return I == RetI ? RedCost : 0; 7506 } 7507 } 7508 7509 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 7510 } 7511 7512 InstructionCost 7513 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7514 ElementCount VF) { 7515 // Calculate scalar cost only. Vectorization cost should be ready at this 7516 // moment. 7517 if (VF.isScalar()) { 7518 Type *ValTy = getLoadStoreType(I); 7519 const Align Alignment = getLoadStoreAlignment(I); 7520 unsigned AS = getLoadStoreAddressSpace(I); 7521 7522 return TTI.getAddressComputationCost(ValTy) + 7523 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 7524 TTI::TCK_RecipThroughput, I); 7525 } 7526 return getWideningCost(I, VF); 7527 } 7528 7529 LoopVectorizationCostModel::VectorizationCostTy 7530 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7531 ElementCount VF) { 7532 // If we know that this instruction will remain uniform, check the cost of 7533 // the scalar version. 7534 if (isUniformAfterVectorization(I, VF)) 7535 VF = ElementCount::getFixed(1); 7536 7537 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7538 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7539 7540 // Forced scalars do not have any scalarization overhead. 7541 auto ForcedScalar = ForcedScalars.find(VF); 7542 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7543 auto InstSet = ForcedScalar->second; 7544 if (InstSet.count(I)) 7545 return VectorizationCostTy( 7546 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7547 VF.getKnownMinValue()), 7548 false); 7549 } 7550 7551 Type *VectorTy; 7552 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7553 7554 bool TypeNotScalarized = false; 7555 if (VF.isVector() && VectorTy->isVectorTy()) { 7556 unsigned NumParts = TTI.getNumberOfParts(VectorTy); 7557 if (NumParts) 7558 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 7559 else 7560 C = InstructionCost::getInvalid(); 7561 } 7562 return VectorizationCostTy(C, TypeNotScalarized); 7563 } 7564 7565 InstructionCost 7566 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7567 ElementCount VF) const { 7568 7569 // There is no mechanism yet to create a scalable scalarization loop, 7570 // so this is currently Invalid. 7571 if (VF.isScalable()) 7572 return InstructionCost::getInvalid(); 7573 7574 if (VF.isScalar()) 7575 return 0; 7576 7577 InstructionCost Cost = 0; 7578 Type *RetTy = ToVectorTy(I->getType(), VF); 7579 if (!RetTy->isVoidTy() && 7580 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7581 Cost += TTI.getScalarizationOverhead( 7582 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 7583 false); 7584 7585 // Some targets keep addresses scalar. 7586 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7587 return Cost; 7588 7589 // Some targets support efficient element stores. 7590 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7591 return Cost; 7592 7593 // Collect operands to consider. 7594 CallInst *CI = dyn_cast<CallInst>(I); 7595 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 7596 7597 // Skip operands that do not require extraction/scalarization and do not incur 7598 // any overhead. 7599 SmallVector<Type *> Tys; 7600 for (auto *V : filterExtractingOperands(Ops, VF)) 7601 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 7602 return Cost + TTI.getOperandsScalarizationOverhead( 7603 filterExtractingOperands(Ops, VF), Tys); 7604 } 7605 7606 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7607 if (VF.isScalar()) 7608 return; 7609 NumPredStores = 0; 7610 for (BasicBlock *BB : TheLoop->blocks()) { 7611 // For each instruction in the old loop. 7612 for (Instruction &I : *BB) { 7613 Value *Ptr = getLoadStorePointerOperand(&I); 7614 if (!Ptr) 7615 continue; 7616 7617 // TODO: We should generate better code and update the cost model for 7618 // predicated uniform stores. Today they are treated as any other 7619 // predicated store (see added test cases in 7620 // invariant-store-vectorization.ll). 7621 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 7622 NumPredStores++; 7623 7624 if (Legal->isUniformMemOp(I)) { 7625 // TODO: Avoid replicating loads and stores instead of 7626 // relying on instcombine to remove them. 7627 // Load: Scalar load + broadcast 7628 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7629 InstructionCost Cost; 7630 if (isa<StoreInst>(&I) && VF.isScalable() && 7631 isLegalGatherOrScatter(&I)) { 7632 Cost = getGatherScatterCost(&I, VF); 7633 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 7634 } else { 7635 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 7636 "Cannot yet scalarize uniform stores"); 7637 Cost = getUniformMemOpCost(&I, VF); 7638 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7639 } 7640 continue; 7641 } 7642 7643 // We assume that widening is the best solution when possible. 7644 if (memoryInstructionCanBeWidened(&I, VF)) { 7645 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7646 int ConsecutiveStride = Legal->isConsecutivePtr( 7647 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 7648 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7649 "Expected consecutive stride."); 7650 InstWidening Decision = 7651 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7652 setWideningDecision(&I, VF, Decision, Cost); 7653 continue; 7654 } 7655 7656 // Choose between Interleaving, Gather/Scatter or Scalarization. 7657 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7658 unsigned NumAccesses = 1; 7659 if (isAccessInterleaved(&I)) { 7660 auto Group = getInterleavedAccessGroup(&I); 7661 assert(Group && "Fail to get an interleaved access group."); 7662 7663 // Make one decision for the whole group. 7664 if (getWideningDecision(&I, VF) != CM_Unknown) 7665 continue; 7666 7667 NumAccesses = Group->getNumMembers(); 7668 if (interleavedAccessCanBeWidened(&I, VF)) 7669 InterleaveCost = getInterleaveGroupCost(&I, VF); 7670 } 7671 7672 InstructionCost GatherScatterCost = 7673 isLegalGatherOrScatter(&I) 7674 ? getGatherScatterCost(&I, VF) * NumAccesses 7675 : InstructionCost::getInvalid(); 7676 7677 InstructionCost ScalarizationCost = 7678 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7679 7680 // Choose better solution for the current VF, 7681 // write down this decision and use it during vectorization. 7682 InstructionCost Cost; 7683 InstWidening Decision; 7684 if (InterleaveCost <= GatherScatterCost && 7685 InterleaveCost < ScalarizationCost) { 7686 Decision = CM_Interleave; 7687 Cost = InterleaveCost; 7688 } else if (GatherScatterCost < ScalarizationCost) { 7689 Decision = CM_GatherScatter; 7690 Cost = GatherScatterCost; 7691 } else { 7692 Decision = CM_Scalarize; 7693 Cost = ScalarizationCost; 7694 } 7695 // If the instructions belongs to an interleave group, the whole group 7696 // receives the same decision. The whole group receives the cost, but 7697 // the cost will actually be assigned to one instruction. 7698 if (auto Group = getInterleavedAccessGroup(&I)) 7699 setWideningDecision(Group, VF, Decision, Cost); 7700 else 7701 setWideningDecision(&I, VF, Decision, Cost); 7702 } 7703 } 7704 7705 // Make sure that any load of address and any other address computation 7706 // remains scalar unless there is gather/scatter support. This avoids 7707 // inevitable extracts into address registers, and also has the benefit of 7708 // activating LSR more, since that pass can't optimize vectorized 7709 // addresses. 7710 if (TTI.prefersVectorizedAddressing()) 7711 return; 7712 7713 // Start with all scalar pointer uses. 7714 SmallPtrSet<Instruction *, 8> AddrDefs; 7715 for (BasicBlock *BB : TheLoop->blocks()) 7716 for (Instruction &I : *BB) { 7717 Instruction *PtrDef = 7718 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7719 if (PtrDef && TheLoop->contains(PtrDef) && 7720 getWideningDecision(&I, VF) != CM_GatherScatter) 7721 AddrDefs.insert(PtrDef); 7722 } 7723 7724 // Add all instructions used to generate the addresses. 7725 SmallVector<Instruction *, 4> Worklist; 7726 append_range(Worklist, AddrDefs); 7727 while (!Worklist.empty()) { 7728 Instruction *I = Worklist.pop_back_val(); 7729 for (auto &Op : I->operands()) 7730 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7731 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7732 AddrDefs.insert(InstOp).second) 7733 Worklist.push_back(InstOp); 7734 } 7735 7736 for (auto *I : AddrDefs) { 7737 if (isa<LoadInst>(I)) { 7738 // Setting the desired widening decision should ideally be handled in 7739 // by cost functions, but since this involves the task of finding out 7740 // if the loaded register is involved in an address computation, it is 7741 // instead changed here when we know this is the case. 7742 InstWidening Decision = getWideningDecision(I, VF); 7743 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7744 // Scalarize a widened load of address. 7745 setWideningDecision( 7746 I, VF, CM_Scalarize, 7747 (VF.getKnownMinValue() * 7748 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7749 else if (auto Group = getInterleavedAccessGroup(I)) { 7750 // Scalarize an interleave group of address loads. 7751 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7752 if (Instruction *Member = Group->getMember(I)) 7753 setWideningDecision( 7754 Member, VF, CM_Scalarize, 7755 (VF.getKnownMinValue() * 7756 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7757 } 7758 } 7759 } else 7760 // Make sure I gets scalarized and a cost estimate without 7761 // scalarization overhead. 7762 ForcedScalars[VF].insert(I); 7763 } 7764 } 7765 7766 InstructionCost 7767 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7768 Type *&VectorTy) { 7769 Type *RetTy = I->getType(); 7770 if (canTruncateToMinimalBitwidth(I, VF)) 7771 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7772 auto SE = PSE.getSE(); 7773 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7774 7775 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 7776 ElementCount VF) -> bool { 7777 if (VF.isScalar()) 7778 return true; 7779 7780 auto Scalarized = InstsToScalarize.find(VF); 7781 assert(Scalarized != InstsToScalarize.end() && 7782 "VF not yet analyzed for scalarization profitability"); 7783 return !Scalarized->second.count(I) && 7784 llvm::all_of(I->users(), [&](User *U) { 7785 auto *UI = cast<Instruction>(U); 7786 return !Scalarized->second.count(UI); 7787 }); 7788 }; 7789 (void) hasSingleCopyAfterVectorization; 7790 7791 if (isScalarAfterVectorization(I, VF)) { 7792 // With the exception of GEPs and PHIs, after scalarization there should 7793 // only be one copy of the instruction generated in the loop. This is 7794 // because the VF is either 1, or any instructions that need scalarizing 7795 // have already been dealt with by the the time we get here. As a result, 7796 // it means we don't have to multiply the instruction cost by VF. 7797 assert(I->getOpcode() == Instruction::GetElementPtr || 7798 I->getOpcode() == Instruction::PHI || 7799 (I->getOpcode() == Instruction::BitCast && 7800 I->getType()->isPointerTy()) || 7801 hasSingleCopyAfterVectorization(I, VF)); 7802 VectorTy = RetTy; 7803 } else 7804 VectorTy = ToVectorTy(RetTy, VF); 7805 7806 // TODO: We need to estimate the cost of intrinsic calls. 7807 switch (I->getOpcode()) { 7808 case Instruction::GetElementPtr: 7809 // We mark this instruction as zero-cost because the cost of GEPs in 7810 // vectorized code depends on whether the corresponding memory instruction 7811 // is scalarized or not. Therefore, we handle GEPs with the memory 7812 // instruction cost. 7813 return 0; 7814 case Instruction::Br: { 7815 // In cases of scalarized and predicated instructions, there will be VF 7816 // predicated blocks in the vectorized loop. Each branch around these 7817 // blocks requires also an extract of its vector compare i1 element. 7818 bool ScalarPredicatedBB = false; 7819 BranchInst *BI = cast<BranchInst>(I); 7820 if (VF.isVector() && BI->isConditional() && 7821 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7822 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7823 ScalarPredicatedBB = true; 7824 7825 if (ScalarPredicatedBB) { 7826 // Not possible to scalarize scalable vector with predicated instructions. 7827 if (VF.isScalable()) 7828 return InstructionCost::getInvalid(); 7829 // Return cost for branches around scalarized and predicated blocks. 7830 auto *Vec_i1Ty = 7831 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7832 return ( 7833 TTI.getScalarizationOverhead( 7834 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7835 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7836 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7837 // The back-edge branch will remain, as will all scalar branches. 7838 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7839 else 7840 // This branch will be eliminated by if-conversion. 7841 return 0; 7842 // Note: We currently assume zero cost for an unconditional branch inside 7843 // a predicated block since it will become a fall-through, although we 7844 // may decide in the future to call TTI for all branches. 7845 } 7846 case Instruction::PHI: { 7847 auto *Phi = cast<PHINode>(I); 7848 7849 // First-order recurrences are replaced by vector shuffles inside the loop. 7850 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7851 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7852 return TTI.getShuffleCost( 7853 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7854 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7855 7856 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7857 // converted into select instructions. We require N - 1 selects per phi 7858 // node, where N is the number of incoming values. 7859 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7860 return (Phi->getNumIncomingValues() - 1) * 7861 TTI.getCmpSelInstrCost( 7862 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7863 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7864 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7865 7866 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7867 } 7868 case Instruction::UDiv: 7869 case Instruction::SDiv: 7870 case Instruction::URem: 7871 case Instruction::SRem: 7872 // If we have a predicated instruction, it may not be executed for each 7873 // vector lane. Get the scalarization cost and scale this amount by the 7874 // probability of executing the predicated block. If the instruction is not 7875 // predicated, we fall through to the next case. 7876 if (VF.isVector() && isScalarWithPredication(I)) { 7877 InstructionCost Cost = 0; 7878 7879 // These instructions have a non-void type, so account for the phi nodes 7880 // that we will create. This cost is likely to be zero. The phi node 7881 // cost, if any, should be scaled by the block probability because it 7882 // models a copy at the end of each predicated block. 7883 Cost += VF.getKnownMinValue() * 7884 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7885 7886 // The cost of the non-predicated instruction. 7887 Cost += VF.getKnownMinValue() * 7888 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7889 7890 // The cost of insertelement and extractelement instructions needed for 7891 // scalarization. 7892 Cost += getScalarizationOverhead(I, VF); 7893 7894 // Scale the cost by the probability of executing the predicated blocks. 7895 // This assumes the predicated block for each vector lane is equally 7896 // likely. 7897 return Cost / getReciprocalPredBlockProb(); 7898 } 7899 LLVM_FALLTHROUGH; 7900 case Instruction::Add: 7901 case Instruction::FAdd: 7902 case Instruction::Sub: 7903 case Instruction::FSub: 7904 case Instruction::Mul: 7905 case Instruction::FMul: 7906 case Instruction::FDiv: 7907 case Instruction::FRem: 7908 case Instruction::Shl: 7909 case Instruction::LShr: 7910 case Instruction::AShr: 7911 case Instruction::And: 7912 case Instruction::Or: 7913 case Instruction::Xor: { 7914 // Since we will replace the stride by 1 the multiplication should go away. 7915 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7916 return 0; 7917 7918 // Detect reduction patterns 7919 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7920 return *RedCost; 7921 7922 // Certain instructions can be cheaper to vectorize if they have a constant 7923 // second vector operand. One example of this are shifts on x86. 7924 Value *Op2 = I->getOperand(1); 7925 TargetTransformInfo::OperandValueProperties Op2VP; 7926 TargetTransformInfo::OperandValueKind Op2VK = 7927 TTI.getOperandInfo(Op2, Op2VP); 7928 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7929 Op2VK = TargetTransformInfo::OK_UniformValue; 7930 7931 SmallVector<const Value *, 4> Operands(I->operand_values()); 7932 return TTI.getArithmeticInstrCost( 7933 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7934 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7935 } 7936 case Instruction::FNeg: { 7937 return TTI.getArithmeticInstrCost( 7938 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7939 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7940 TargetTransformInfo::OP_None, I->getOperand(0), I); 7941 } 7942 case Instruction::Select: { 7943 SelectInst *SI = cast<SelectInst>(I); 7944 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7945 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7946 7947 const Value *Op0, *Op1; 7948 using namespace llvm::PatternMatch; 7949 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7950 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7951 // select x, y, false --> x & y 7952 // select x, true, y --> x | y 7953 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7954 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7955 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7956 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7957 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7958 Op1->getType()->getScalarSizeInBits() == 1); 7959 7960 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7961 return TTI.getArithmeticInstrCost( 7962 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7963 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7964 } 7965 7966 Type *CondTy = SI->getCondition()->getType(); 7967 if (!ScalarCond) 7968 CondTy = VectorType::get(CondTy, VF); 7969 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 7970 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7971 } 7972 case Instruction::ICmp: 7973 case Instruction::FCmp: { 7974 Type *ValTy = I->getOperand(0)->getType(); 7975 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7976 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7977 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7978 VectorTy = ToVectorTy(ValTy, VF); 7979 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7980 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7981 } 7982 case Instruction::Store: 7983 case Instruction::Load: { 7984 ElementCount Width = VF; 7985 if (Width.isVector()) { 7986 InstWidening Decision = getWideningDecision(I, Width); 7987 assert(Decision != CM_Unknown && 7988 "CM decision should be taken at this point"); 7989 if (Decision == CM_Scalarize) 7990 Width = ElementCount::getFixed(1); 7991 } 7992 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7993 return getMemoryInstructionCost(I, VF); 7994 } 7995 case Instruction::BitCast: 7996 if (I->getType()->isPointerTy()) 7997 return 0; 7998 LLVM_FALLTHROUGH; 7999 case Instruction::ZExt: 8000 case Instruction::SExt: 8001 case Instruction::FPToUI: 8002 case Instruction::FPToSI: 8003 case Instruction::FPExt: 8004 case Instruction::PtrToInt: 8005 case Instruction::IntToPtr: 8006 case Instruction::SIToFP: 8007 case Instruction::UIToFP: 8008 case Instruction::Trunc: 8009 case Instruction::FPTrunc: { 8010 // Computes the CastContextHint from a Load/Store instruction. 8011 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 8012 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8013 "Expected a load or a store!"); 8014 8015 if (VF.isScalar() || !TheLoop->contains(I)) 8016 return TTI::CastContextHint::Normal; 8017 8018 switch (getWideningDecision(I, VF)) { 8019 case LoopVectorizationCostModel::CM_GatherScatter: 8020 return TTI::CastContextHint::GatherScatter; 8021 case LoopVectorizationCostModel::CM_Interleave: 8022 return TTI::CastContextHint::Interleave; 8023 case LoopVectorizationCostModel::CM_Scalarize: 8024 case LoopVectorizationCostModel::CM_Widen: 8025 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 8026 : TTI::CastContextHint::Normal; 8027 case LoopVectorizationCostModel::CM_Widen_Reverse: 8028 return TTI::CastContextHint::Reversed; 8029 case LoopVectorizationCostModel::CM_Unknown: 8030 llvm_unreachable("Instr did not go through cost modelling?"); 8031 } 8032 8033 llvm_unreachable("Unhandled case!"); 8034 }; 8035 8036 unsigned Opcode = I->getOpcode(); 8037 TTI::CastContextHint CCH = TTI::CastContextHint::None; 8038 // For Trunc, the context is the only user, which must be a StoreInst. 8039 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 8040 if (I->hasOneUse()) 8041 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 8042 CCH = ComputeCCH(Store); 8043 } 8044 // For Z/Sext, the context is the operand, which must be a LoadInst. 8045 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 8046 Opcode == Instruction::FPExt) { 8047 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 8048 CCH = ComputeCCH(Load); 8049 } 8050 8051 // We optimize the truncation of induction variables having constant 8052 // integer steps. The cost of these truncations is the same as the scalar 8053 // operation. 8054 if (isOptimizableIVTruncate(I, VF)) { 8055 auto *Trunc = cast<TruncInst>(I); 8056 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 8057 Trunc->getSrcTy(), CCH, CostKind, Trunc); 8058 } 8059 8060 // Detect reduction patterns 8061 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 8062 return *RedCost; 8063 8064 Type *SrcScalarTy = I->getOperand(0)->getType(); 8065 Type *SrcVecTy = 8066 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 8067 if (canTruncateToMinimalBitwidth(I, VF)) { 8068 // This cast is going to be shrunk. This may remove the cast or it might 8069 // turn it into slightly different cast. For example, if MinBW == 16, 8070 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 8071 // 8072 // Calculate the modified src and dest types. 8073 Type *MinVecTy = VectorTy; 8074 if (Opcode == Instruction::Trunc) { 8075 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 8076 VectorTy = 8077 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 8078 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 8079 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 8080 VectorTy = 8081 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 8082 } 8083 } 8084 8085 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 8086 } 8087 case Instruction::Call: { 8088 if (RecurrenceDescriptor::isFMulAddIntrinsic(I)) 8089 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 8090 return *RedCost; 8091 bool NeedToScalarize; 8092 CallInst *CI = cast<CallInst>(I); 8093 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 8094 if (getVectorIntrinsicIDForCall(CI, TLI)) { 8095 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 8096 return std::min(CallCost, IntrinsicCost); 8097 } 8098 return CallCost; 8099 } 8100 case Instruction::ExtractValue: 8101 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 8102 case Instruction::Alloca: 8103 // We cannot easily widen alloca to a scalable alloca, as 8104 // the result would need to be a vector of pointers. 8105 if (VF.isScalable()) 8106 return InstructionCost::getInvalid(); 8107 LLVM_FALLTHROUGH; 8108 default: 8109 // This opcode is unknown. Assume that it is the same as 'mul'. 8110 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 8111 } // end of switch. 8112 } 8113 8114 char LoopVectorize::ID = 0; 8115 8116 static const char lv_name[] = "Loop Vectorization"; 8117 8118 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 8119 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 8120 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 8121 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 8122 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 8123 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 8124 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 8125 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 8126 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 8127 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 8128 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 8129 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 8130 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 8131 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 8132 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 8133 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 8134 8135 namespace llvm { 8136 8137 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 8138 8139 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 8140 bool VectorizeOnlyWhenForced) { 8141 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 8142 } 8143 8144 } // end namespace llvm 8145 8146 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 8147 // Check if the pointer operand of a load or store instruction is 8148 // consecutive. 8149 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 8150 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 8151 return false; 8152 } 8153 8154 void LoopVectorizationCostModel::collectValuesToIgnore() { 8155 // Ignore ephemeral values. 8156 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 8157 8158 // Ignore type-promoting instructions we identified during reduction 8159 // detection. 8160 for (auto &Reduction : Legal->getReductionVars()) { 8161 RecurrenceDescriptor &RedDes = Reduction.second; 8162 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 8163 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 8164 } 8165 // Ignore type-casting instructions we identified during induction 8166 // detection. 8167 for (auto &Induction : Legal->getInductionVars()) { 8168 InductionDescriptor &IndDes = Induction.second; 8169 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 8170 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 8171 } 8172 } 8173 8174 void LoopVectorizationCostModel::collectInLoopReductions() { 8175 for (auto &Reduction : Legal->getReductionVars()) { 8176 PHINode *Phi = Reduction.first; 8177 RecurrenceDescriptor &RdxDesc = Reduction.second; 8178 8179 // We don't collect reductions that are type promoted (yet). 8180 if (RdxDesc.getRecurrenceType() != Phi->getType()) 8181 continue; 8182 8183 // If the target would prefer this reduction to happen "in-loop", then we 8184 // want to record it as such. 8185 unsigned Opcode = RdxDesc.getOpcode(); 8186 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 8187 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 8188 TargetTransformInfo::ReductionFlags())) 8189 continue; 8190 8191 // Check that we can correctly put the reductions into the loop, by 8192 // finding the chain of operations that leads from the phi to the loop 8193 // exit value. 8194 SmallVector<Instruction *, 4> ReductionOperations = 8195 RdxDesc.getReductionOpChain(Phi, TheLoop); 8196 bool InLoop = !ReductionOperations.empty(); 8197 if (InLoop) { 8198 InLoopReductionChains[Phi] = ReductionOperations; 8199 // Add the elements to InLoopReductionImmediateChains for cost modelling. 8200 Instruction *LastChain = Phi; 8201 for (auto *I : ReductionOperations) { 8202 InLoopReductionImmediateChains[I] = LastChain; 8203 LastChain = I; 8204 } 8205 } 8206 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 8207 << " reduction for phi: " << *Phi << "\n"); 8208 } 8209 } 8210 8211 // TODO: we could return a pair of values that specify the max VF and 8212 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 8213 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 8214 // doesn't have a cost model that can choose which plan to execute if 8215 // more than one is generated. 8216 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 8217 LoopVectorizationCostModel &CM) { 8218 unsigned WidestType; 8219 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 8220 return WidestVectorRegBits / WidestType; 8221 } 8222 8223 VectorizationFactor 8224 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 8225 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 8226 ElementCount VF = UserVF; 8227 // Outer loop handling: They may require CFG and instruction level 8228 // transformations before even evaluating whether vectorization is profitable. 8229 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 8230 // the vectorization pipeline. 8231 if (!OrigLoop->isInnermost()) { 8232 // If the user doesn't provide a vectorization factor, determine a 8233 // reasonable one. 8234 if (UserVF.isZero()) { 8235 VF = ElementCount::getFixed(determineVPlanVF( 8236 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 8237 .getFixedSize(), 8238 CM)); 8239 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 8240 8241 // Make sure we have a VF > 1 for stress testing. 8242 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 8243 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 8244 << "overriding computed VF.\n"); 8245 VF = ElementCount::getFixed(4); 8246 } 8247 } 8248 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 8249 assert(isPowerOf2_32(VF.getKnownMinValue()) && 8250 "VF needs to be a power of two"); 8251 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 8252 << "VF " << VF << " to build VPlans.\n"); 8253 buildVPlans(VF, VF); 8254 8255 // For VPlan build stress testing, we bail out after VPlan construction. 8256 if (VPlanBuildStressTest) 8257 return VectorizationFactor::Disabled(); 8258 8259 return {VF, 0 /*Cost*/}; 8260 } 8261 8262 LLVM_DEBUG( 8263 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 8264 "VPlan-native path.\n"); 8265 return VectorizationFactor::Disabled(); 8266 } 8267 8268 Optional<VectorizationFactor> 8269 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 8270 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8271 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 8272 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 8273 return None; 8274 8275 // Invalidate interleave groups if all blocks of loop will be predicated. 8276 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 8277 !useMaskedInterleavedAccesses(*TTI)) { 8278 LLVM_DEBUG( 8279 dbgs() 8280 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 8281 "which requires masked-interleaved support.\n"); 8282 if (CM.InterleaveInfo.invalidateGroups()) 8283 // Invalidating interleave groups also requires invalidating all decisions 8284 // based on them, which includes widening decisions and uniform and scalar 8285 // values. 8286 CM.invalidateCostModelingDecisions(); 8287 } 8288 8289 ElementCount MaxUserVF = 8290 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 8291 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 8292 if (!UserVF.isZero() && UserVFIsLegal) { 8293 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 8294 "VF needs to be a power of two"); 8295 // Collect the instructions (and their associated costs) that will be more 8296 // profitable to scalarize. 8297 if (CM.selectUserVectorizationFactor(UserVF)) { 8298 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 8299 CM.collectInLoopReductions(); 8300 buildVPlansWithVPRecipes(UserVF, UserVF); 8301 LLVM_DEBUG(printPlans(dbgs())); 8302 return {{UserVF, 0}}; 8303 } else 8304 reportVectorizationInfo("UserVF ignored because of invalid costs.", 8305 "InvalidCost", ORE, OrigLoop); 8306 } 8307 8308 // Populate the set of Vectorization Factor Candidates. 8309 ElementCountSet VFCandidates; 8310 for (auto VF = ElementCount::getFixed(1); 8311 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 8312 VFCandidates.insert(VF); 8313 for (auto VF = ElementCount::getScalable(1); 8314 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 8315 VFCandidates.insert(VF); 8316 8317 for (const auto &VF : VFCandidates) { 8318 // Collect Uniform and Scalar instructions after vectorization with VF. 8319 CM.collectUniformsAndScalars(VF); 8320 8321 // Collect the instructions (and their associated costs) that will be more 8322 // profitable to scalarize. 8323 if (VF.isVector()) 8324 CM.collectInstsToScalarize(VF); 8325 } 8326 8327 CM.collectInLoopReductions(); 8328 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 8329 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 8330 8331 LLVM_DEBUG(printPlans(dbgs())); 8332 if (!MaxFactors.hasVector()) 8333 return VectorizationFactor::Disabled(); 8334 8335 // Select the optimal vectorization factor. 8336 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 8337 8338 // Check if it is profitable to vectorize with runtime checks. 8339 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 8340 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 8341 bool PragmaThresholdReached = 8342 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 8343 bool ThresholdReached = 8344 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 8345 if ((ThresholdReached && !Hints.allowReordering()) || 8346 PragmaThresholdReached) { 8347 ORE->emit([&]() { 8348 return OptimizationRemarkAnalysisAliasing( 8349 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 8350 OrigLoop->getHeader()) 8351 << "loop not vectorized: cannot prove it is safe to reorder " 8352 "memory operations"; 8353 }); 8354 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 8355 Hints.emitRemarkWithHints(); 8356 return VectorizationFactor::Disabled(); 8357 } 8358 } 8359 return SelectedVF; 8360 } 8361 8362 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 8363 assert(count_if(VPlans, 8364 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 8365 1 && 8366 "Best VF has not a single VPlan."); 8367 8368 for (const VPlanPtr &Plan : VPlans) { 8369 if (Plan->hasVF(VF)) 8370 return *Plan.get(); 8371 } 8372 llvm_unreachable("No plan found!"); 8373 } 8374 8375 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 8376 VPlan &BestVPlan, 8377 InnerLoopVectorizer &ILV, 8378 DominatorTree *DT) { 8379 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 8380 << '\n'); 8381 8382 // Perform the actual loop transformation. 8383 8384 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 8385 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 8386 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 8387 State.TripCount = ILV.getOrCreateTripCount(nullptr); 8388 State.CanonicalIV = ILV.Induction; 8389 ILV.collectPoisonGeneratingRecipes(State); 8390 8391 ILV.printDebugTracesAtStart(); 8392 8393 //===------------------------------------------------===// 8394 // 8395 // Notice: any optimization or new instruction that go 8396 // into the code below should also be implemented in 8397 // the cost-model. 8398 // 8399 //===------------------------------------------------===// 8400 8401 // 2. Copy and widen instructions from the old loop into the new loop. 8402 BestVPlan.execute(&State); 8403 8404 // 3. Fix the vectorized code: take care of header phi's, live-outs, 8405 // predication, updating analyses. 8406 ILV.fixVectorizedLoop(State); 8407 8408 ILV.printDebugTracesAtEnd(); 8409 } 8410 8411 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 8412 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 8413 for (const auto &Plan : VPlans) 8414 if (PrintVPlansInDotFormat) 8415 Plan->printDOT(O); 8416 else 8417 Plan->print(O); 8418 } 8419 #endif 8420 8421 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 8422 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 8423 8424 // We create new control-flow for the vectorized loop, so the original exit 8425 // conditions will be dead after vectorization if it's only used by the 8426 // terminator 8427 SmallVector<BasicBlock*> ExitingBlocks; 8428 OrigLoop->getExitingBlocks(ExitingBlocks); 8429 for (auto *BB : ExitingBlocks) { 8430 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 8431 if (!Cmp || !Cmp->hasOneUse()) 8432 continue; 8433 8434 // TODO: we should introduce a getUniqueExitingBlocks on Loop 8435 if (!DeadInstructions.insert(Cmp).second) 8436 continue; 8437 8438 // The operands of the icmp is often a dead trunc, used by IndUpdate. 8439 // TODO: can recurse through operands in general 8440 for (Value *Op : Cmp->operands()) { 8441 if (isa<TruncInst>(Op) && Op->hasOneUse()) 8442 DeadInstructions.insert(cast<Instruction>(Op)); 8443 } 8444 } 8445 8446 // We create new "steps" for induction variable updates to which the original 8447 // induction variables map. An original update instruction will be dead if 8448 // all its users except the induction variable are dead. 8449 auto *Latch = OrigLoop->getLoopLatch(); 8450 for (auto &Induction : Legal->getInductionVars()) { 8451 PHINode *Ind = Induction.first; 8452 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 8453 8454 // If the tail is to be folded by masking, the primary induction variable, 8455 // if exists, isn't dead: it will be used for masking. Don't kill it. 8456 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 8457 continue; 8458 8459 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 8460 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 8461 })) 8462 DeadInstructions.insert(IndUpdate); 8463 8464 // We record as "Dead" also the type-casting instructions we had identified 8465 // during induction analysis. We don't need any handling for them in the 8466 // vectorized loop because we have proven that, under a proper runtime 8467 // test guarding the vectorized loop, the value of the phi, and the casted 8468 // value of the phi, are the same. The last instruction in this casting chain 8469 // will get its scalar/vector/widened def from the scalar/vector/widened def 8470 // of the respective phi node. Any other casts in the induction def-use chain 8471 // have no other uses outside the phi update chain, and will be ignored. 8472 InductionDescriptor &IndDes = Induction.second; 8473 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 8474 DeadInstructions.insert(Casts.begin(), Casts.end()); 8475 } 8476 } 8477 8478 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 8479 8480 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 8481 8482 Value *InnerLoopUnroller::getStepVector(Value *Val, Value *StartIdx, 8483 Value *Step, 8484 Instruction::BinaryOps BinOp) { 8485 // When unrolling and the VF is 1, we only need to add a simple scalar. 8486 Type *Ty = Val->getType(); 8487 assert(!Ty->isVectorTy() && "Val must be a scalar"); 8488 8489 if (Ty->isFloatingPointTy()) { 8490 // Floating-point operations inherit FMF via the builder's flags. 8491 Value *MulOp = Builder.CreateFMul(StartIdx, Step); 8492 return Builder.CreateBinOp(BinOp, Val, MulOp); 8493 } 8494 return Builder.CreateAdd(Val, Builder.CreateMul(StartIdx, Step), "induction"); 8495 } 8496 8497 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 8498 SmallVector<Metadata *, 4> MDs; 8499 // Reserve first location for self reference to the LoopID metadata node. 8500 MDs.push_back(nullptr); 8501 bool IsUnrollMetadata = false; 8502 MDNode *LoopID = L->getLoopID(); 8503 if (LoopID) { 8504 // First find existing loop unrolling disable metadata. 8505 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 8506 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 8507 if (MD) { 8508 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 8509 IsUnrollMetadata = 8510 S && S->getString().startswith("llvm.loop.unroll.disable"); 8511 } 8512 MDs.push_back(LoopID->getOperand(i)); 8513 } 8514 } 8515 8516 if (!IsUnrollMetadata) { 8517 // Add runtime unroll disable metadata. 8518 LLVMContext &Context = L->getHeader()->getContext(); 8519 SmallVector<Metadata *, 1> DisableOperands; 8520 DisableOperands.push_back( 8521 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 8522 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 8523 MDs.push_back(DisableNode); 8524 MDNode *NewLoopID = MDNode::get(Context, MDs); 8525 // Set operand 0 to refer to the loop id itself. 8526 NewLoopID->replaceOperandWith(0, NewLoopID); 8527 L->setLoopID(NewLoopID); 8528 } 8529 } 8530 8531 //===--------------------------------------------------------------------===// 8532 // EpilogueVectorizerMainLoop 8533 //===--------------------------------------------------------------------===// 8534 8535 /// This function is partially responsible for generating the control flow 8536 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8537 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 8538 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8539 Loop *Lp = createVectorLoopSkeleton(""); 8540 8541 // Generate the code to check the minimum iteration count of the vector 8542 // epilogue (see below). 8543 EPI.EpilogueIterationCountCheck = 8544 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 8545 EPI.EpilogueIterationCountCheck->setName("iter.check"); 8546 8547 // Generate the code to check any assumptions that we've made for SCEV 8548 // expressions. 8549 EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); 8550 8551 // Generate the code that checks at runtime if arrays overlap. We put the 8552 // checks into a separate block to make the more common case of few elements 8553 // faster. 8554 EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 8555 8556 // Generate the iteration count check for the main loop, *after* the check 8557 // for the epilogue loop, so that the path-length is shorter for the case 8558 // that goes directly through the vector epilogue. The longer-path length for 8559 // the main loop is compensated for, by the gain from vectorizing the larger 8560 // trip count. Note: the branch will get updated later on when we vectorize 8561 // the epilogue. 8562 EPI.MainLoopIterationCountCheck = 8563 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 8564 8565 // Generate the induction variable. 8566 OldInduction = Legal->getPrimaryInduction(); 8567 Type *IdxTy = Legal->getWidestInductionType(); 8568 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8569 8570 IRBuilder<> B(&*Lp->getLoopPreheader()->getFirstInsertionPt()); 8571 Value *Step = getRuntimeVF(B, IdxTy, VF * UF); 8572 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8573 EPI.VectorTripCount = CountRoundDown; 8574 Induction = 8575 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8576 getDebugLocFromInstOrOperands(OldInduction)); 8577 8578 // Skip induction resume value creation here because they will be created in 8579 // the second pass. If we created them here, they wouldn't be used anyway, 8580 // because the vplan in the second pass still contains the inductions from the 8581 // original loop. 8582 8583 return completeLoopSkeleton(Lp, OrigLoopID); 8584 } 8585 8586 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 8587 LLVM_DEBUG({ 8588 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 8589 << "Main Loop VF:" << EPI.MainLoopVF 8590 << ", Main Loop UF:" << EPI.MainLoopUF 8591 << ", Epilogue Loop VF:" << EPI.EpilogueVF 8592 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8593 }); 8594 } 8595 8596 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 8597 DEBUG_WITH_TYPE(VerboseDebug, { 8598 dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; 8599 }); 8600 } 8601 8602 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 8603 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 8604 assert(L && "Expected valid Loop."); 8605 assert(Bypass && "Expected valid bypass basic block."); 8606 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 8607 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 8608 Value *Count = getOrCreateTripCount(L); 8609 // Reuse existing vector loop preheader for TC checks. 8610 // Note that new preheader block is generated for vector loop. 8611 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 8612 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8613 8614 // Generate code to check if the loop's trip count is less than VF * UF of the 8615 // main vector loop. 8616 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 8617 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8618 8619 Value *CheckMinIters = Builder.CreateICmp( 8620 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 8621 "min.iters.check"); 8622 8623 if (!ForEpilogue) 8624 TCCheckBlock->setName("vector.main.loop.iter.check"); 8625 8626 // Create new preheader for vector loop. 8627 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8628 DT, LI, nullptr, "vector.ph"); 8629 8630 if (ForEpilogue) { 8631 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8632 DT->getNode(Bypass)->getIDom()) && 8633 "TC check is expected to dominate Bypass"); 8634 8635 // Update dominator for Bypass & LoopExit. 8636 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8637 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8638 // For loops with multiple exits, there's no edge from the middle block 8639 // to exit blocks (as the epilogue must run) and thus no need to update 8640 // the immediate dominator of the exit blocks. 8641 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8642 8643 LoopBypassBlocks.push_back(TCCheckBlock); 8644 8645 // Save the trip count so we don't have to regenerate it in the 8646 // vec.epilog.iter.check. This is safe to do because the trip count 8647 // generated here dominates the vector epilog iter check. 8648 EPI.TripCount = Count; 8649 } 8650 8651 ReplaceInstWithInst( 8652 TCCheckBlock->getTerminator(), 8653 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8654 8655 return TCCheckBlock; 8656 } 8657 8658 //===--------------------------------------------------------------------===// 8659 // EpilogueVectorizerEpilogueLoop 8660 //===--------------------------------------------------------------------===// 8661 8662 /// This function is partially responsible for generating the control flow 8663 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8664 BasicBlock * 8665 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8666 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8667 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8668 8669 // Now, compare the remaining count and if there aren't enough iterations to 8670 // execute the vectorized epilogue skip to the scalar part. 8671 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8672 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8673 LoopVectorPreHeader = 8674 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8675 LI, nullptr, "vec.epilog.ph"); 8676 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8677 VecEpilogueIterationCountCheck); 8678 8679 // Adjust the control flow taking the state info from the main loop 8680 // vectorization into account. 8681 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8682 "expected this to be saved from the previous pass."); 8683 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8684 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8685 8686 DT->changeImmediateDominator(LoopVectorPreHeader, 8687 EPI.MainLoopIterationCountCheck); 8688 8689 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8690 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8691 8692 if (EPI.SCEVSafetyCheck) 8693 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8694 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8695 if (EPI.MemSafetyCheck) 8696 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8697 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8698 8699 DT->changeImmediateDominator( 8700 VecEpilogueIterationCountCheck, 8701 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8702 8703 DT->changeImmediateDominator(LoopScalarPreHeader, 8704 EPI.EpilogueIterationCountCheck); 8705 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8706 // If there is an epilogue which must run, there's no edge from the 8707 // middle block to exit blocks and thus no need to update the immediate 8708 // dominator of the exit blocks. 8709 DT->changeImmediateDominator(LoopExitBlock, 8710 EPI.EpilogueIterationCountCheck); 8711 8712 // Keep track of bypass blocks, as they feed start values to the induction 8713 // phis in the scalar loop preheader. 8714 if (EPI.SCEVSafetyCheck) 8715 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8716 if (EPI.MemSafetyCheck) 8717 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8718 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8719 8720 // Generate a resume induction for the vector epilogue and put it in the 8721 // vector epilogue preheader 8722 Type *IdxTy = Legal->getWidestInductionType(); 8723 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8724 LoopVectorPreHeader->getFirstNonPHI()); 8725 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8726 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8727 EPI.MainLoopIterationCountCheck); 8728 8729 // Generate the induction variable. 8730 OldInduction = Legal->getPrimaryInduction(); 8731 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8732 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8733 Value *StartIdx = EPResumeVal; 8734 Induction = 8735 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8736 getDebugLocFromInstOrOperands(OldInduction)); 8737 8738 // Generate induction resume values. These variables save the new starting 8739 // indexes for the scalar loop. They are used to test if there are any tail 8740 // iterations left once the vector loop has completed. 8741 // Note that when the vectorized epilogue is skipped due to iteration count 8742 // check, then the resume value for the induction variable comes from 8743 // the trip count of the main vector loop, hence passing the AdditionalBypass 8744 // argument. 8745 createInductionResumeValues(Lp, CountRoundDown, 8746 {VecEpilogueIterationCountCheck, 8747 EPI.VectorTripCount} /* AdditionalBypass */); 8748 8749 AddRuntimeUnrollDisableMetaData(Lp); 8750 return completeLoopSkeleton(Lp, OrigLoopID); 8751 } 8752 8753 BasicBlock * 8754 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8755 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8756 8757 assert(EPI.TripCount && 8758 "Expected trip count to have been safed in the first pass."); 8759 assert( 8760 (!isa<Instruction>(EPI.TripCount) || 8761 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8762 "saved trip count does not dominate insertion point."); 8763 Value *TC = EPI.TripCount; 8764 IRBuilder<> Builder(Insert->getTerminator()); 8765 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8766 8767 // Generate code to check if the loop's trip count is less than VF * UF of the 8768 // vector epilogue loop. 8769 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 8770 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8771 8772 Value *CheckMinIters = 8773 Builder.CreateICmp(P, Count, 8774 createStepForVF(Builder, Count->getType(), 8775 EPI.EpilogueVF, EPI.EpilogueUF), 8776 "min.epilog.iters.check"); 8777 8778 ReplaceInstWithInst( 8779 Insert->getTerminator(), 8780 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8781 8782 LoopBypassBlocks.push_back(Insert); 8783 return Insert; 8784 } 8785 8786 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8787 LLVM_DEBUG({ 8788 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8789 << "Epilogue Loop VF:" << EPI.EpilogueVF 8790 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8791 }); 8792 } 8793 8794 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8795 DEBUG_WITH_TYPE(VerboseDebug, { 8796 dbgs() << "final fn:\n" << *Induction->getFunction() << "\n"; 8797 }); 8798 } 8799 8800 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8801 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8802 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8803 bool PredicateAtRangeStart = Predicate(Range.Start); 8804 8805 for (ElementCount TmpVF = Range.Start * 2; 8806 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8807 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8808 Range.End = TmpVF; 8809 break; 8810 } 8811 8812 return PredicateAtRangeStart; 8813 } 8814 8815 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8816 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8817 /// of VF's starting at a given VF and extending it as much as possible. Each 8818 /// vectorization decision can potentially shorten this sub-range during 8819 /// buildVPlan(). 8820 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8821 ElementCount MaxVF) { 8822 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8823 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8824 VFRange SubRange = {VF, MaxVFPlusOne}; 8825 VPlans.push_back(buildVPlan(SubRange)); 8826 VF = SubRange.End; 8827 } 8828 } 8829 8830 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8831 VPlanPtr &Plan) { 8832 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8833 8834 // Look for cached value. 8835 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8836 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8837 if (ECEntryIt != EdgeMaskCache.end()) 8838 return ECEntryIt->second; 8839 8840 VPValue *SrcMask = createBlockInMask(Src, Plan); 8841 8842 // The terminator has to be a branch inst! 8843 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8844 assert(BI && "Unexpected terminator found"); 8845 8846 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8847 return EdgeMaskCache[Edge] = SrcMask; 8848 8849 // If source is an exiting block, we know the exit edge is dynamically dead 8850 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8851 // adding uses of an otherwise potentially dead instruction. 8852 if (OrigLoop->isLoopExiting(Src)) 8853 return EdgeMaskCache[Edge] = SrcMask; 8854 8855 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8856 assert(EdgeMask && "No Edge Mask found for condition"); 8857 8858 if (BI->getSuccessor(0) != Dst) 8859 EdgeMask = Builder.createNot(EdgeMask); 8860 8861 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8862 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8863 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8864 // The select version does not introduce new UB if SrcMask is false and 8865 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8866 VPValue *False = Plan->getOrAddVPValue( 8867 ConstantInt::getFalse(BI->getCondition()->getType())); 8868 EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False); 8869 } 8870 8871 return EdgeMaskCache[Edge] = EdgeMask; 8872 } 8873 8874 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8875 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8876 8877 // Look for cached value. 8878 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8879 if (BCEntryIt != BlockMaskCache.end()) 8880 return BCEntryIt->second; 8881 8882 // All-one mask is modelled as no-mask following the convention for masked 8883 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8884 VPValue *BlockMask = nullptr; 8885 8886 if (OrigLoop->getHeader() == BB) { 8887 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8888 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8889 8890 // Create the block in mask as the first non-phi instruction in the block. 8891 VPBuilder::InsertPointGuard Guard(Builder); 8892 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8893 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8894 8895 // Introduce the early-exit compare IV <= BTC to form header block mask. 8896 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8897 // Start by constructing the desired canonical IV. 8898 VPValue *IV = nullptr; 8899 if (Legal->getPrimaryInduction()) 8900 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8901 else { 8902 auto *IVRecipe = new VPWidenCanonicalIVRecipe(); 8903 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8904 IV = IVRecipe; 8905 } 8906 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8907 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8908 8909 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8910 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8911 // as a second argument, we only pass the IV here and extract the 8912 // tripcount from the transform state where codegen of the VP instructions 8913 // happen. 8914 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8915 } else { 8916 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8917 } 8918 return BlockMaskCache[BB] = BlockMask; 8919 } 8920 8921 // This is the block mask. We OR all incoming edges. 8922 for (auto *Predecessor : predecessors(BB)) { 8923 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8924 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8925 return BlockMaskCache[BB] = EdgeMask; 8926 8927 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8928 BlockMask = EdgeMask; 8929 continue; 8930 } 8931 8932 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8933 } 8934 8935 return BlockMaskCache[BB] = BlockMask; 8936 } 8937 8938 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8939 ArrayRef<VPValue *> Operands, 8940 VFRange &Range, 8941 VPlanPtr &Plan) { 8942 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8943 "Must be called with either a load or store"); 8944 8945 auto willWiden = [&](ElementCount VF) -> bool { 8946 if (VF.isScalar()) 8947 return false; 8948 LoopVectorizationCostModel::InstWidening Decision = 8949 CM.getWideningDecision(I, VF); 8950 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8951 "CM decision should be taken at this point."); 8952 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8953 return true; 8954 if (CM.isScalarAfterVectorization(I, VF) || 8955 CM.isProfitableToScalarize(I, VF)) 8956 return false; 8957 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8958 }; 8959 8960 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8961 return nullptr; 8962 8963 VPValue *Mask = nullptr; 8964 if (Legal->isMaskRequired(I)) 8965 Mask = createBlockInMask(I->getParent(), Plan); 8966 8967 // Determine if the pointer operand of the access is either consecutive or 8968 // reverse consecutive. 8969 LoopVectorizationCostModel::InstWidening Decision = 8970 CM.getWideningDecision(I, Range.Start); 8971 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8972 bool Consecutive = 8973 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8974 8975 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8976 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8977 Consecutive, Reverse); 8978 8979 StoreInst *Store = cast<StoreInst>(I); 8980 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8981 Mask, Consecutive, Reverse); 8982 } 8983 8984 VPWidenIntOrFpInductionRecipe * 8985 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, 8986 ArrayRef<VPValue *> Operands) const { 8987 // Check if this is an integer or fp induction. If so, build the recipe that 8988 // produces its scalar and vector values. 8989 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8990 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8991 II.getKind() == InductionDescriptor::IK_FpInduction) { 8992 assert(II.getStartValue() == 8993 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8994 const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); 8995 return new VPWidenIntOrFpInductionRecipe( 8996 Phi, Operands[0], Casts.empty() ? nullptr : Casts.front()); 8997 } 8998 8999 return nullptr; 9000 } 9001 9002 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 9003 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 9004 VPlan &Plan) const { 9005 // Optimize the special case where the source is a constant integer 9006 // induction variable. Notice that we can only optimize the 'trunc' case 9007 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 9008 // (c) other casts depend on pointer size. 9009 9010 // Determine whether \p K is a truncation based on an induction variable that 9011 // can be optimized. 9012 auto isOptimizableIVTruncate = 9013 [&](Instruction *K) -> std::function<bool(ElementCount)> { 9014 return [=](ElementCount VF) -> bool { 9015 return CM.isOptimizableIVTruncate(K, VF); 9016 }; 9017 }; 9018 9019 if (LoopVectorizationPlanner::getDecisionAndClampRange( 9020 isOptimizableIVTruncate(I), Range)) { 9021 9022 InductionDescriptor II = 9023 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 9024 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 9025 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 9026 Start, nullptr, I); 9027 } 9028 return nullptr; 9029 } 9030 9031 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 9032 ArrayRef<VPValue *> Operands, 9033 VPlanPtr &Plan) { 9034 // If all incoming values are equal, the incoming VPValue can be used directly 9035 // instead of creating a new VPBlendRecipe. 9036 VPValue *FirstIncoming = Operands[0]; 9037 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 9038 return FirstIncoming == Inc; 9039 })) { 9040 return Operands[0]; 9041 } 9042 9043 // We know that all PHIs in non-header blocks are converted into selects, so 9044 // we don't have to worry about the insertion order and we can just use the 9045 // builder. At this point we generate the predication tree. There may be 9046 // duplications since this is a simple recursive scan, but future 9047 // optimizations will clean it up. 9048 SmallVector<VPValue *, 2> OperandsWithMask; 9049 unsigned NumIncoming = Phi->getNumIncomingValues(); 9050 9051 for (unsigned In = 0; In < NumIncoming; In++) { 9052 VPValue *EdgeMask = 9053 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 9054 assert((EdgeMask || NumIncoming == 1) && 9055 "Multiple predecessors with one having a full mask"); 9056 OperandsWithMask.push_back(Operands[In]); 9057 if (EdgeMask) 9058 OperandsWithMask.push_back(EdgeMask); 9059 } 9060 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 9061 } 9062 9063 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 9064 ArrayRef<VPValue *> Operands, 9065 VFRange &Range) const { 9066 9067 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 9068 [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); }, 9069 Range); 9070 9071 if (IsPredicated) 9072 return nullptr; 9073 9074 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 9075 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 9076 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 9077 ID == Intrinsic::pseudoprobe || 9078 ID == Intrinsic::experimental_noalias_scope_decl)) 9079 return nullptr; 9080 9081 auto willWiden = [&](ElementCount VF) -> bool { 9082 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 9083 // The following case may be scalarized depending on the VF. 9084 // The flag shows whether we use Intrinsic or a usual Call for vectorized 9085 // version of the instruction. 9086 // Is it beneficial to perform intrinsic call compared to lib call? 9087 bool NeedToScalarize = false; 9088 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 9089 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 9090 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 9091 return UseVectorIntrinsic || !NeedToScalarize; 9092 }; 9093 9094 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 9095 return nullptr; 9096 9097 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 9098 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 9099 } 9100 9101 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 9102 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 9103 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 9104 // Instruction should be widened, unless it is scalar after vectorization, 9105 // scalarization is profitable or it is predicated. 9106 auto WillScalarize = [this, I](ElementCount VF) -> bool { 9107 return CM.isScalarAfterVectorization(I, VF) || 9108 CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I); 9109 }; 9110 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 9111 Range); 9112 } 9113 9114 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 9115 ArrayRef<VPValue *> Operands) const { 9116 auto IsVectorizableOpcode = [](unsigned Opcode) { 9117 switch (Opcode) { 9118 case Instruction::Add: 9119 case Instruction::And: 9120 case Instruction::AShr: 9121 case Instruction::BitCast: 9122 case Instruction::FAdd: 9123 case Instruction::FCmp: 9124 case Instruction::FDiv: 9125 case Instruction::FMul: 9126 case Instruction::FNeg: 9127 case Instruction::FPExt: 9128 case Instruction::FPToSI: 9129 case Instruction::FPToUI: 9130 case Instruction::FPTrunc: 9131 case Instruction::FRem: 9132 case Instruction::FSub: 9133 case Instruction::ICmp: 9134 case Instruction::IntToPtr: 9135 case Instruction::LShr: 9136 case Instruction::Mul: 9137 case Instruction::Or: 9138 case Instruction::PtrToInt: 9139 case Instruction::SDiv: 9140 case Instruction::Select: 9141 case Instruction::SExt: 9142 case Instruction::Shl: 9143 case Instruction::SIToFP: 9144 case Instruction::SRem: 9145 case Instruction::Sub: 9146 case Instruction::Trunc: 9147 case Instruction::UDiv: 9148 case Instruction::UIToFP: 9149 case Instruction::URem: 9150 case Instruction::Xor: 9151 case Instruction::ZExt: 9152 return true; 9153 } 9154 return false; 9155 }; 9156 9157 if (!IsVectorizableOpcode(I->getOpcode())) 9158 return nullptr; 9159 9160 // Success: widen this instruction. 9161 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 9162 } 9163 9164 void VPRecipeBuilder::fixHeaderPhis() { 9165 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 9166 for (VPWidenPHIRecipe *R : PhisToFix) { 9167 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 9168 VPRecipeBase *IncR = 9169 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 9170 R->addOperand(IncR->getVPSingleValue()); 9171 } 9172 } 9173 9174 VPBasicBlock *VPRecipeBuilder::handleReplication( 9175 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 9176 VPlanPtr &Plan) { 9177 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 9178 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 9179 Range); 9180 9181 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 9182 [&](ElementCount VF) { return CM.isPredicatedInst(I); }, Range); 9183 9184 // Even if the instruction is not marked as uniform, there are certain 9185 // intrinsic calls that can be effectively treated as such, so we check for 9186 // them here. Conservatively, we only do this for scalable vectors, since 9187 // for fixed-width VFs we can always fall back on full scalarization. 9188 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 9189 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 9190 case Intrinsic::assume: 9191 case Intrinsic::lifetime_start: 9192 case Intrinsic::lifetime_end: 9193 // For scalable vectors if one of the operands is variant then we still 9194 // want to mark as uniform, which will generate one instruction for just 9195 // the first lane of the vector. We can't scalarize the call in the same 9196 // way as for fixed-width vectors because we don't know how many lanes 9197 // there are. 9198 // 9199 // The reasons for doing it this way for scalable vectors are: 9200 // 1. For the assume intrinsic generating the instruction for the first 9201 // lane is still be better than not generating any at all. For 9202 // example, the input may be a splat across all lanes. 9203 // 2. For the lifetime start/end intrinsics the pointer operand only 9204 // does anything useful when the input comes from a stack object, 9205 // which suggests it should always be uniform. For non-stack objects 9206 // the effect is to poison the object, which still allows us to 9207 // remove the call. 9208 IsUniform = true; 9209 break; 9210 default: 9211 break; 9212 } 9213 } 9214 9215 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 9216 IsUniform, IsPredicated); 9217 setRecipe(I, Recipe); 9218 Plan->addVPValue(I, Recipe); 9219 9220 // Find if I uses a predicated instruction. If so, it will use its scalar 9221 // value. Avoid hoisting the insert-element which packs the scalar value into 9222 // a vector value, as that happens iff all users use the vector value. 9223 for (VPValue *Op : Recipe->operands()) { 9224 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 9225 if (!PredR) 9226 continue; 9227 auto *RepR = 9228 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 9229 assert(RepR->isPredicated() && 9230 "expected Replicate recipe to be predicated"); 9231 RepR->setAlsoPack(false); 9232 } 9233 9234 // Finalize the recipe for Instr, first if it is not predicated. 9235 if (!IsPredicated) { 9236 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 9237 VPBB->appendRecipe(Recipe); 9238 return VPBB; 9239 } 9240 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 9241 assert(VPBB->getSuccessors().empty() && 9242 "VPBB has successors when handling predicated replication."); 9243 // Record predicated instructions for above packing optimizations. 9244 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 9245 VPBlockUtils::insertBlockAfter(Region, VPBB); 9246 auto *RegSucc = new VPBasicBlock(); 9247 VPBlockUtils::insertBlockAfter(RegSucc, Region); 9248 return RegSucc; 9249 } 9250 9251 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 9252 VPRecipeBase *PredRecipe, 9253 VPlanPtr &Plan) { 9254 // Instructions marked for predication are replicated and placed under an 9255 // if-then construct to prevent side-effects. 9256 9257 // Generate recipes to compute the block mask for this region. 9258 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 9259 9260 // Build the triangular if-then region. 9261 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 9262 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 9263 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 9264 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 9265 auto *PHIRecipe = Instr->getType()->isVoidTy() 9266 ? nullptr 9267 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 9268 if (PHIRecipe) { 9269 Plan->removeVPValueFor(Instr); 9270 Plan->addVPValue(Instr, PHIRecipe); 9271 } 9272 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 9273 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 9274 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 9275 9276 // Note: first set Entry as region entry and then connect successors starting 9277 // from it in order, to propagate the "parent" of each VPBasicBlock. 9278 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 9279 VPBlockUtils::connectBlocks(Pred, Exit); 9280 9281 return Region; 9282 } 9283 9284 VPRecipeOrVPValueTy 9285 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 9286 ArrayRef<VPValue *> Operands, 9287 VFRange &Range, VPlanPtr &Plan) { 9288 // First, check for specific widening recipes that deal with calls, memory 9289 // operations, inductions and Phi nodes. 9290 if (auto *CI = dyn_cast<CallInst>(Instr)) 9291 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 9292 9293 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 9294 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 9295 9296 VPRecipeBase *Recipe; 9297 if (auto Phi = dyn_cast<PHINode>(Instr)) { 9298 if (Phi->getParent() != OrigLoop->getHeader()) 9299 return tryToBlend(Phi, Operands, Plan); 9300 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands))) 9301 return toVPRecipeResult(Recipe); 9302 9303 VPWidenPHIRecipe *PhiRecipe = nullptr; 9304 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 9305 VPValue *StartV = Operands[0]; 9306 if (Legal->isReductionVariable(Phi)) { 9307 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 9308 assert(RdxDesc.getRecurrenceStartValue() == 9309 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 9310 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 9311 CM.isInLoopReduction(Phi), 9312 CM.useOrderedReductions(RdxDesc)); 9313 } else { 9314 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 9315 } 9316 9317 // Record the incoming value from the backedge, so we can add the incoming 9318 // value from the backedge after all recipes have been created. 9319 recordRecipeOf(cast<Instruction>( 9320 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 9321 PhisToFix.push_back(PhiRecipe); 9322 } else { 9323 // TODO: record start and backedge value for remaining pointer induction 9324 // phis. 9325 assert(Phi->getType()->isPointerTy() && 9326 "only pointer phis should be handled here"); 9327 PhiRecipe = new VPWidenPHIRecipe(Phi); 9328 } 9329 9330 return toVPRecipeResult(PhiRecipe); 9331 } 9332 9333 if (isa<TruncInst>(Instr) && 9334 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 9335 Range, *Plan))) 9336 return toVPRecipeResult(Recipe); 9337 9338 if (!shouldWiden(Instr, Range)) 9339 return nullptr; 9340 9341 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 9342 return toVPRecipeResult(new VPWidenGEPRecipe( 9343 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 9344 9345 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 9346 bool InvariantCond = 9347 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 9348 return toVPRecipeResult(new VPWidenSelectRecipe( 9349 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 9350 } 9351 9352 return toVPRecipeResult(tryToWiden(Instr, Operands)); 9353 } 9354 9355 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 9356 ElementCount MaxVF) { 9357 assert(OrigLoop->isInnermost() && "Inner loop expected."); 9358 9359 // Collect instructions from the original loop that will become trivially dead 9360 // in the vectorized loop. We don't need to vectorize these instructions. For 9361 // example, original induction update instructions can become dead because we 9362 // separately emit induction "steps" when generating code for the new loop. 9363 // Similarly, we create a new latch condition when setting up the structure 9364 // of the new loop, so the old one can become dead. 9365 SmallPtrSet<Instruction *, 4> DeadInstructions; 9366 collectTriviallyDeadInstructions(DeadInstructions); 9367 9368 // Add assume instructions we need to drop to DeadInstructions, to prevent 9369 // them from being added to the VPlan. 9370 // TODO: We only need to drop assumes in blocks that get flattend. If the 9371 // control flow is preserved, we should keep them. 9372 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 9373 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 9374 9375 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 9376 // Dead instructions do not need sinking. Remove them from SinkAfter. 9377 for (Instruction *I : DeadInstructions) 9378 SinkAfter.erase(I); 9379 9380 // Cannot sink instructions after dead instructions (there won't be any 9381 // recipes for them). Instead, find the first non-dead previous instruction. 9382 for (auto &P : Legal->getSinkAfter()) { 9383 Instruction *SinkTarget = P.second; 9384 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 9385 (void)FirstInst; 9386 while (DeadInstructions.contains(SinkTarget)) { 9387 assert( 9388 SinkTarget != FirstInst && 9389 "Must find a live instruction (at least the one feeding the " 9390 "first-order recurrence PHI) before reaching beginning of the block"); 9391 SinkTarget = SinkTarget->getPrevNode(); 9392 assert(SinkTarget != P.first && 9393 "sink source equals target, no sinking required"); 9394 } 9395 P.second = SinkTarget; 9396 } 9397 9398 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 9399 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 9400 VFRange SubRange = {VF, MaxVFPlusOne}; 9401 VPlans.push_back( 9402 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 9403 VF = SubRange.End; 9404 } 9405 } 9406 9407 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 9408 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 9409 const MapVector<Instruction *, Instruction *> &SinkAfter) { 9410 9411 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 9412 9413 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 9414 9415 // --------------------------------------------------------------------------- 9416 // Pre-construction: record ingredients whose recipes we'll need to further 9417 // process after constructing the initial VPlan. 9418 // --------------------------------------------------------------------------- 9419 9420 // Mark instructions we'll need to sink later and their targets as 9421 // ingredients whose recipe we'll need to record. 9422 for (auto &Entry : SinkAfter) { 9423 RecipeBuilder.recordRecipeOf(Entry.first); 9424 RecipeBuilder.recordRecipeOf(Entry.second); 9425 } 9426 for (auto &Reduction : CM.getInLoopReductionChains()) { 9427 PHINode *Phi = Reduction.first; 9428 RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); 9429 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9430 9431 RecipeBuilder.recordRecipeOf(Phi); 9432 for (auto &R : ReductionOperations) { 9433 RecipeBuilder.recordRecipeOf(R); 9434 // For min/max reducitons, where we have a pair of icmp/select, we also 9435 // need to record the ICmp recipe, so it can be removed later. 9436 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9437 "Only min/max recurrences allowed for inloop reductions"); 9438 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 9439 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 9440 } 9441 } 9442 9443 // For each interleave group which is relevant for this (possibly trimmed) 9444 // Range, add it to the set of groups to be later applied to the VPlan and add 9445 // placeholders for its members' Recipes which we'll be replacing with a 9446 // single VPInterleaveRecipe. 9447 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 9448 auto applyIG = [IG, this](ElementCount VF) -> bool { 9449 return (VF.isVector() && // Query is illegal for VF == 1 9450 CM.getWideningDecision(IG->getInsertPos(), VF) == 9451 LoopVectorizationCostModel::CM_Interleave); 9452 }; 9453 if (!getDecisionAndClampRange(applyIG, Range)) 9454 continue; 9455 InterleaveGroups.insert(IG); 9456 for (unsigned i = 0; i < IG->getFactor(); i++) 9457 if (Instruction *Member = IG->getMember(i)) 9458 RecipeBuilder.recordRecipeOf(Member); 9459 }; 9460 9461 // --------------------------------------------------------------------------- 9462 // Build initial VPlan: Scan the body of the loop in a topological order to 9463 // visit each basic block after having visited its predecessor basic blocks. 9464 // --------------------------------------------------------------------------- 9465 9466 auto Plan = std::make_unique<VPlan>(); 9467 9468 // Scan the body of the loop in a topological order to visit each basic block 9469 // after having visited its predecessor basic blocks. 9470 LoopBlocksDFS DFS(OrigLoop); 9471 DFS.perform(LI); 9472 9473 VPBasicBlock *VPBB = nullptr; 9474 VPBasicBlock *HeaderVPBB = nullptr; 9475 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 9476 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 9477 // Relevant instructions from basic block BB will be grouped into VPRecipe 9478 // ingredients and fill a new VPBasicBlock. 9479 unsigned VPBBsForBB = 0; 9480 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 9481 if (VPBB) 9482 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 9483 else { 9484 auto *TopRegion = new VPRegionBlock("vector loop"); 9485 TopRegion->setEntry(FirstVPBBForBB); 9486 Plan->setEntry(TopRegion); 9487 HeaderVPBB = FirstVPBBForBB; 9488 } 9489 VPBB = FirstVPBBForBB; 9490 Builder.setInsertPoint(VPBB); 9491 9492 // Introduce each ingredient into VPlan. 9493 // TODO: Model and preserve debug instrinsics in VPlan. 9494 for (Instruction &I : BB->instructionsWithoutDebug()) { 9495 Instruction *Instr = &I; 9496 9497 // First filter out irrelevant instructions, to ensure no recipes are 9498 // built for them. 9499 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 9500 continue; 9501 9502 SmallVector<VPValue *, 4> Operands; 9503 auto *Phi = dyn_cast<PHINode>(Instr); 9504 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 9505 Operands.push_back(Plan->getOrAddVPValue( 9506 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 9507 } else { 9508 auto OpRange = Plan->mapToVPValues(Instr->operands()); 9509 Operands = {OpRange.begin(), OpRange.end()}; 9510 } 9511 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 9512 Instr, Operands, Range, Plan)) { 9513 // If Instr can be simplified to an existing VPValue, use it. 9514 if (RecipeOrValue.is<VPValue *>()) { 9515 auto *VPV = RecipeOrValue.get<VPValue *>(); 9516 Plan->addVPValue(Instr, VPV); 9517 // If the re-used value is a recipe, register the recipe for the 9518 // instruction, in case the recipe for Instr needs to be recorded. 9519 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 9520 RecipeBuilder.setRecipe(Instr, R); 9521 continue; 9522 } 9523 // Otherwise, add the new recipe. 9524 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 9525 for (auto *Def : Recipe->definedValues()) { 9526 auto *UV = Def->getUnderlyingValue(); 9527 Plan->addVPValue(UV, Def); 9528 } 9529 9530 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 9531 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 9532 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 9533 // of the header block. That can happen for truncates of induction 9534 // variables. Those recipes are moved to the phi section of the header 9535 // block after applying SinkAfter, which relies on the original 9536 // position of the trunc. 9537 assert(isa<TruncInst>(Instr)); 9538 InductionsToMove.push_back( 9539 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 9540 } 9541 RecipeBuilder.setRecipe(Instr, Recipe); 9542 VPBB->appendRecipe(Recipe); 9543 continue; 9544 } 9545 9546 // Otherwise, if all widening options failed, Instruction is to be 9547 // replicated. This may create a successor for VPBB. 9548 VPBasicBlock *NextVPBB = 9549 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 9550 if (NextVPBB != VPBB) { 9551 VPBB = NextVPBB; 9552 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 9553 : ""); 9554 } 9555 } 9556 } 9557 9558 assert(isa<VPRegionBlock>(Plan->getEntry()) && 9559 !Plan->getEntry()->getEntryBasicBlock()->empty() && 9560 "entry block must be set to a VPRegionBlock having a non-empty entry " 9561 "VPBasicBlock"); 9562 cast<VPRegionBlock>(Plan->getEntry())->setExit(VPBB); 9563 RecipeBuilder.fixHeaderPhis(); 9564 9565 // --------------------------------------------------------------------------- 9566 // Transform initial VPlan: Apply previously taken decisions, in order, to 9567 // bring the VPlan to its final state. 9568 // --------------------------------------------------------------------------- 9569 9570 // Apply Sink-After legal constraints. 9571 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 9572 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 9573 if (Region && Region->isReplicator()) { 9574 assert(Region->getNumSuccessors() == 1 && 9575 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 9576 assert(R->getParent()->size() == 1 && 9577 "A recipe in an original replicator region must be the only " 9578 "recipe in its block"); 9579 return Region; 9580 } 9581 return nullptr; 9582 }; 9583 for (auto &Entry : SinkAfter) { 9584 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 9585 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 9586 9587 auto *TargetRegion = GetReplicateRegion(Target); 9588 auto *SinkRegion = GetReplicateRegion(Sink); 9589 if (!SinkRegion) { 9590 // If the sink source is not a replicate region, sink the recipe directly. 9591 if (TargetRegion) { 9592 // The target is in a replication region, make sure to move Sink to 9593 // the block after it, not into the replication region itself. 9594 VPBasicBlock *NextBlock = 9595 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 9596 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 9597 } else 9598 Sink->moveAfter(Target); 9599 continue; 9600 } 9601 9602 // The sink source is in a replicate region. Unhook the region from the CFG. 9603 auto *SinkPred = SinkRegion->getSinglePredecessor(); 9604 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 9605 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 9606 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 9607 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 9608 9609 if (TargetRegion) { 9610 // The target recipe is also in a replicate region, move the sink region 9611 // after the target region. 9612 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 9613 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 9614 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 9615 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 9616 } else { 9617 // The sink source is in a replicate region, we need to move the whole 9618 // replicate region, which should only contain a single recipe in the 9619 // main block. 9620 auto *SplitBlock = 9621 Target->getParent()->splitAt(std::next(Target->getIterator())); 9622 9623 auto *SplitPred = SplitBlock->getSinglePredecessor(); 9624 9625 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 9626 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 9627 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 9628 if (VPBB == SplitPred) 9629 VPBB = SplitBlock; 9630 } 9631 } 9632 9633 // Now that sink-after is done, move induction recipes for optimized truncates 9634 // to the phi section of the header block. 9635 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 9636 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 9637 9638 // Adjust the recipes for any inloop reductions. 9639 adjustRecipesForReductions(VPBB, Plan, RecipeBuilder, Range.Start); 9640 9641 // Introduce a recipe to combine the incoming and previous values of a 9642 // first-order recurrence. 9643 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9644 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 9645 if (!RecurPhi) 9646 continue; 9647 9648 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 9649 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 9650 auto *Region = GetReplicateRegion(PrevRecipe); 9651 if (Region) 9652 InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor()); 9653 if (Region || PrevRecipe->isPhi()) 9654 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 9655 else 9656 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 9657 9658 auto *RecurSplice = cast<VPInstruction>( 9659 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 9660 {RecurPhi, RecurPhi->getBackedgeValue()})); 9661 9662 RecurPhi->replaceAllUsesWith(RecurSplice); 9663 // Set the first operand of RecurSplice to RecurPhi again, after replacing 9664 // all users. 9665 RecurSplice->setOperand(0, RecurPhi); 9666 } 9667 9668 // Interleave memory: for each Interleave Group we marked earlier as relevant 9669 // for this VPlan, replace the Recipes widening its memory instructions with a 9670 // single VPInterleaveRecipe at its insertion point. 9671 for (auto IG : InterleaveGroups) { 9672 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9673 RecipeBuilder.getRecipe(IG->getInsertPos())); 9674 SmallVector<VPValue *, 4> StoredValues; 9675 for (unsigned i = 0; i < IG->getFactor(); ++i) 9676 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 9677 auto *StoreR = 9678 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 9679 StoredValues.push_back(StoreR->getStoredValue()); 9680 } 9681 9682 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9683 Recipe->getMask()); 9684 VPIG->insertBefore(Recipe); 9685 unsigned J = 0; 9686 for (unsigned i = 0; i < IG->getFactor(); ++i) 9687 if (Instruction *Member = IG->getMember(i)) { 9688 if (!Member->getType()->isVoidTy()) { 9689 VPValue *OriginalV = Plan->getVPValue(Member); 9690 Plan->removeVPValueFor(Member); 9691 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9692 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9693 J++; 9694 } 9695 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9696 } 9697 } 9698 9699 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9700 // in ways that accessing values using original IR values is incorrect. 9701 Plan->disableValue2VPValue(); 9702 9703 VPlanTransforms::sinkScalarOperands(*Plan); 9704 VPlanTransforms::mergeReplicateRegions(*Plan); 9705 9706 std::string PlanName; 9707 raw_string_ostream RSO(PlanName); 9708 ElementCount VF = Range.Start; 9709 Plan->addVF(VF); 9710 RSO << "Initial VPlan for VF={" << VF; 9711 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9712 Plan->addVF(VF); 9713 RSO << "," << VF; 9714 } 9715 RSO << "},UF>=1"; 9716 RSO.flush(); 9717 Plan->setName(PlanName); 9718 9719 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9720 return Plan; 9721 } 9722 9723 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9724 // Outer loop handling: They may require CFG and instruction level 9725 // transformations before even evaluating whether vectorization is profitable. 9726 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9727 // the vectorization pipeline. 9728 assert(!OrigLoop->isInnermost()); 9729 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9730 9731 // Create new empty VPlan 9732 auto Plan = std::make_unique<VPlan>(); 9733 9734 // Build hierarchical CFG 9735 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9736 HCFGBuilder.buildHierarchicalCFG(); 9737 9738 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9739 VF *= 2) 9740 Plan->addVF(VF); 9741 9742 if (EnableVPlanPredication) { 9743 VPlanPredicator VPP(*Plan); 9744 VPP.predicate(); 9745 9746 // Avoid running transformation to recipes until masked code generation in 9747 // VPlan-native path is in place. 9748 return Plan; 9749 } 9750 9751 SmallPtrSet<Instruction *, 1> DeadInstructions; 9752 VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan, 9753 Legal->getInductionVars(), 9754 DeadInstructions, *PSE.getSE()); 9755 return Plan; 9756 } 9757 9758 // Adjust the recipes for reductions. For in-loop reductions the chain of 9759 // instructions leading from the loop exit instr to the phi need to be converted 9760 // to reductions, with one operand being vector and the other being the scalar 9761 // reduction chain. For other reductions, a select is introduced between the phi 9762 // and live-out recipes when folding the tail. 9763 void LoopVectorizationPlanner::adjustRecipesForReductions( 9764 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9765 ElementCount MinVF) { 9766 for (auto &Reduction : CM.getInLoopReductionChains()) { 9767 PHINode *Phi = Reduction.first; 9768 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 9769 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9770 9771 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9772 continue; 9773 9774 // ReductionOperations are orders top-down from the phi's use to the 9775 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9776 // which of the two operands will remain scalar and which will be reduced. 9777 // For minmax the chain will be the select instructions. 9778 Instruction *Chain = Phi; 9779 for (Instruction *R : ReductionOperations) { 9780 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9781 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9782 9783 VPValue *ChainOp = Plan->getVPValue(Chain); 9784 unsigned FirstOpId; 9785 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9786 "Only min/max recurrences allowed for inloop reductions"); 9787 // Recognize a call to the llvm.fmuladd intrinsic. 9788 bool IsFMulAdd = (Kind == RecurKind::FMulAdd); 9789 assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) && 9790 "Expected instruction to be a call to the llvm.fmuladd intrinsic"); 9791 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9792 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9793 "Expected to replace a VPWidenSelectSC"); 9794 FirstOpId = 1; 9795 } else { 9796 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) || 9797 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) && 9798 "Expected to replace a VPWidenSC"); 9799 FirstOpId = 0; 9800 } 9801 unsigned VecOpId = 9802 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9803 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9804 9805 auto *CondOp = CM.foldTailByMasking() 9806 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9807 : nullptr; 9808 9809 if (IsFMulAdd) { 9810 // If the instruction is a call to the llvm.fmuladd intrinsic then we 9811 // need to create an fmul recipe to use as the vector operand for the 9812 // fadd reduction. 9813 VPInstruction *FMulRecipe = new VPInstruction( 9814 Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))}); 9815 FMulRecipe->setFastMathFlags(R->getFastMathFlags()); 9816 WidenRecipe->getParent()->insert(FMulRecipe, 9817 WidenRecipe->getIterator()); 9818 VecOp = FMulRecipe; 9819 } 9820 VPReductionRecipe *RedRecipe = 9821 new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9822 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9823 Plan->removeVPValueFor(R); 9824 Plan->addVPValue(R, RedRecipe); 9825 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9826 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9827 WidenRecipe->eraseFromParent(); 9828 9829 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9830 VPRecipeBase *CompareRecipe = 9831 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9832 assert(isa<VPWidenRecipe>(CompareRecipe) && 9833 "Expected to replace a VPWidenSC"); 9834 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9835 "Expected no remaining users"); 9836 CompareRecipe->eraseFromParent(); 9837 } 9838 Chain = R; 9839 } 9840 } 9841 9842 // If tail is folded by masking, introduce selects between the phi 9843 // and the live-out instruction of each reduction, at the end of the latch. 9844 if (CM.foldTailByMasking()) { 9845 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9846 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9847 if (!PhiR || PhiR->isInLoop()) 9848 continue; 9849 Builder.setInsertPoint(LatchVPBB); 9850 VPValue *Cond = 9851 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9852 VPValue *Red = PhiR->getBackedgeValue(); 9853 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9854 } 9855 } 9856 } 9857 9858 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9859 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9860 VPSlotTracker &SlotTracker) const { 9861 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9862 IG->getInsertPos()->printAsOperand(O, false); 9863 O << ", "; 9864 getAddr()->printAsOperand(O, SlotTracker); 9865 VPValue *Mask = getMask(); 9866 if (Mask) { 9867 O << ", "; 9868 Mask->printAsOperand(O, SlotTracker); 9869 } 9870 9871 unsigned OpIdx = 0; 9872 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9873 if (!IG->getMember(i)) 9874 continue; 9875 if (getNumStoreOperands() > 0) { 9876 O << "\n" << Indent << " store "; 9877 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9878 O << " to index " << i; 9879 } else { 9880 O << "\n" << Indent << " "; 9881 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9882 O << " = load from index " << i; 9883 } 9884 ++OpIdx; 9885 } 9886 } 9887 #endif 9888 9889 void VPWidenCallRecipe::execute(VPTransformState &State) { 9890 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9891 *this, State); 9892 } 9893 9894 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9895 State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), 9896 this, *this, InvariantCond, State); 9897 } 9898 9899 void VPWidenRecipe::execute(VPTransformState &State) { 9900 State.ILV->widenInstruction(*getUnderlyingInstr(), this, State); 9901 } 9902 9903 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9904 State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, 9905 *this, State.UF, State.VF, IsPtrLoopInvariant, 9906 IsIndexLoopInvariant, State); 9907 } 9908 9909 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9910 assert(!State.Instance && "Int or FP induction being replicated."); 9911 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 9912 getTruncInst(), getVPValue(0), 9913 getCastValue(), State); 9914 } 9915 9916 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9917 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9918 State); 9919 } 9920 9921 void VPBlendRecipe::execute(VPTransformState &State) { 9922 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9923 // We know that all PHIs in non-header blocks are converted into 9924 // selects, so we don't have to worry about the insertion order and we 9925 // can just use the builder. 9926 // At this point we generate the predication tree. There may be 9927 // duplications since this is a simple recursive scan, but future 9928 // optimizations will clean it up. 9929 9930 unsigned NumIncoming = getNumIncomingValues(); 9931 9932 // Generate a sequence of selects of the form: 9933 // SELECT(Mask3, In3, 9934 // SELECT(Mask2, In2, 9935 // SELECT(Mask1, In1, 9936 // In0))) 9937 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9938 // are essentially undef are taken from In0. 9939 InnerLoopVectorizer::VectorParts Entry(State.UF); 9940 for (unsigned In = 0; In < NumIncoming; ++In) { 9941 for (unsigned Part = 0; Part < State.UF; ++Part) { 9942 // We might have single edge PHIs (blocks) - use an identity 9943 // 'select' for the first PHI operand. 9944 Value *In0 = State.get(getIncomingValue(In), Part); 9945 if (In == 0) 9946 Entry[Part] = In0; // Initialize with the first incoming value. 9947 else { 9948 // Select between the current value and the previous incoming edge 9949 // based on the incoming mask. 9950 Value *Cond = State.get(getMask(In), Part); 9951 Entry[Part] = 9952 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9953 } 9954 } 9955 } 9956 for (unsigned Part = 0; Part < State.UF; ++Part) 9957 State.set(this, Entry[Part], Part); 9958 } 9959 9960 void VPInterleaveRecipe::execute(VPTransformState &State) { 9961 assert(!State.Instance && "Interleave group being replicated."); 9962 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9963 getStoredValues(), getMask()); 9964 } 9965 9966 void VPReductionRecipe::execute(VPTransformState &State) { 9967 assert(!State.Instance && "Reduction being replicated."); 9968 Value *PrevInChain = State.get(getChainOp(), 0); 9969 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9970 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9971 // Propagate the fast-math flags carried by the underlying instruction. 9972 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9973 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9974 for (unsigned Part = 0; Part < State.UF; ++Part) { 9975 Value *NewVecOp = State.get(getVecOp(), Part); 9976 if (VPValue *Cond = getCondOp()) { 9977 Value *NewCond = State.get(Cond, Part); 9978 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9979 Value *Iden = RdxDesc->getRecurrenceIdentity( 9980 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9981 Value *IdenVec = 9982 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9983 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9984 NewVecOp = Select; 9985 } 9986 Value *NewRed; 9987 Value *NextInChain; 9988 if (IsOrdered) { 9989 if (State.VF.isVector()) 9990 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9991 PrevInChain); 9992 else 9993 NewRed = State.Builder.CreateBinOp( 9994 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9995 NewVecOp); 9996 PrevInChain = NewRed; 9997 } else { 9998 PrevInChain = State.get(getChainOp(), Part); 9999 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 10000 } 10001 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 10002 NextInChain = 10003 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 10004 NewRed, PrevInChain); 10005 } else if (IsOrdered) 10006 NextInChain = NewRed; 10007 else 10008 NextInChain = State.Builder.CreateBinOp( 10009 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 10010 PrevInChain); 10011 State.set(this, NextInChain, Part); 10012 } 10013 } 10014 10015 void VPReplicateRecipe::execute(VPTransformState &State) { 10016 if (State.Instance) { // Generate a single instance. 10017 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 10018 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance, 10019 IsPredicated, State); 10020 // Insert scalar instance packing it into a vector. 10021 if (AlsoPack && State.VF.isVector()) { 10022 // If we're constructing lane 0, initialize to start from poison. 10023 if (State.Instance->Lane.isFirstLane()) { 10024 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 10025 Value *Poison = PoisonValue::get( 10026 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 10027 State.set(this, Poison, State.Instance->Part); 10028 } 10029 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 10030 } 10031 return; 10032 } 10033 10034 // Generate scalar instances for all VF lanes of all UF parts, unless the 10035 // instruction is uniform inwhich case generate only the first lane for each 10036 // of the UF parts. 10037 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 10038 assert((!State.VF.isScalable() || IsUniform) && 10039 "Can't scalarize a scalable vector"); 10040 for (unsigned Part = 0; Part < State.UF; ++Part) 10041 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 10042 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 10043 VPIteration(Part, Lane), IsPredicated, 10044 State); 10045 } 10046 10047 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 10048 assert(State.Instance && "Branch on Mask works only on single instance."); 10049 10050 unsigned Part = State.Instance->Part; 10051 unsigned Lane = State.Instance->Lane.getKnownLane(); 10052 10053 Value *ConditionBit = nullptr; 10054 VPValue *BlockInMask = getMask(); 10055 if (BlockInMask) { 10056 ConditionBit = State.get(BlockInMask, Part); 10057 if (ConditionBit->getType()->isVectorTy()) 10058 ConditionBit = State.Builder.CreateExtractElement( 10059 ConditionBit, State.Builder.getInt32(Lane)); 10060 } else // Block in mask is all-one. 10061 ConditionBit = State.Builder.getTrue(); 10062 10063 // Replace the temporary unreachable terminator with a new conditional branch, 10064 // whose two destinations will be set later when they are created. 10065 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 10066 assert(isa<UnreachableInst>(CurrentTerminator) && 10067 "Expected to replace unreachable terminator with conditional branch."); 10068 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 10069 CondBr->setSuccessor(0, nullptr); 10070 ReplaceInstWithInst(CurrentTerminator, CondBr); 10071 } 10072 10073 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 10074 assert(State.Instance && "Predicated instruction PHI works per instance."); 10075 Instruction *ScalarPredInst = 10076 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 10077 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 10078 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 10079 assert(PredicatingBB && "Predicated block has no single predecessor."); 10080 assert(isa<VPReplicateRecipe>(getOperand(0)) && 10081 "operand must be VPReplicateRecipe"); 10082 10083 // By current pack/unpack logic we need to generate only a single phi node: if 10084 // a vector value for the predicated instruction exists at this point it means 10085 // the instruction has vector users only, and a phi for the vector value is 10086 // needed. In this case the recipe of the predicated instruction is marked to 10087 // also do that packing, thereby "hoisting" the insert-element sequence. 10088 // Otherwise, a phi node for the scalar value is needed. 10089 unsigned Part = State.Instance->Part; 10090 if (State.hasVectorValue(getOperand(0), Part)) { 10091 Value *VectorValue = State.get(getOperand(0), Part); 10092 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 10093 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 10094 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 10095 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 10096 if (State.hasVectorValue(this, Part)) 10097 State.reset(this, VPhi, Part); 10098 else 10099 State.set(this, VPhi, Part); 10100 // NOTE: Currently we need to update the value of the operand, so the next 10101 // predicated iteration inserts its generated value in the correct vector. 10102 State.reset(getOperand(0), VPhi, Part); 10103 } else { 10104 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 10105 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 10106 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 10107 PredicatingBB); 10108 Phi->addIncoming(ScalarPredInst, PredicatedBB); 10109 if (State.hasScalarValue(this, *State.Instance)) 10110 State.reset(this, Phi, *State.Instance); 10111 else 10112 State.set(this, Phi, *State.Instance); 10113 // NOTE: Currently we need to update the value of the operand, so the next 10114 // predicated iteration inserts its generated value in the correct vector. 10115 State.reset(getOperand(0), Phi, *State.Instance); 10116 } 10117 } 10118 10119 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 10120 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 10121 State.ILV->vectorizeMemoryInstruction( 10122 &Ingredient, State, StoredValue ? nullptr : getVPSingleValue(), getAddr(), 10123 StoredValue, getMask(), Consecutive, Reverse); 10124 } 10125 10126 // Determine how to lower the scalar epilogue, which depends on 1) optimising 10127 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 10128 // predication, and 4) a TTI hook that analyses whether the loop is suitable 10129 // for predication. 10130 static ScalarEpilogueLowering getScalarEpilogueLowering( 10131 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 10132 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 10133 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 10134 LoopVectorizationLegality &LVL) { 10135 // 1) OptSize takes precedence over all other options, i.e. if this is set, 10136 // don't look at hints or options, and don't request a scalar epilogue. 10137 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 10138 // LoopAccessInfo (due to code dependency and not being able to reliably get 10139 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 10140 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 10141 // versioning when the vectorization is forced, unlike hasOptSize. So revert 10142 // back to the old way and vectorize with versioning when forced. See D81345.) 10143 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 10144 PGSOQueryType::IRPass) && 10145 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 10146 return CM_ScalarEpilogueNotAllowedOptSize; 10147 10148 // 2) If set, obey the directives 10149 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 10150 switch (PreferPredicateOverEpilogue) { 10151 case PreferPredicateTy::ScalarEpilogue: 10152 return CM_ScalarEpilogueAllowed; 10153 case PreferPredicateTy::PredicateElseScalarEpilogue: 10154 return CM_ScalarEpilogueNotNeededUsePredicate; 10155 case PreferPredicateTy::PredicateOrDontVectorize: 10156 return CM_ScalarEpilogueNotAllowedUsePredicate; 10157 }; 10158 } 10159 10160 // 3) If set, obey the hints 10161 switch (Hints.getPredicate()) { 10162 case LoopVectorizeHints::FK_Enabled: 10163 return CM_ScalarEpilogueNotNeededUsePredicate; 10164 case LoopVectorizeHints::FK_Disabled: 10165 return CM_ScalarEpilogueAllowed; 10166 }; 10167 10168 // 4) if the TTI hook indicates this is profitable, request predication. 10169 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 10170 LVL.getLAI())) 10171 return CM_ScalarEpilogueNotNeededUsePredicate; 10172 10173 return CM_ScalarEpilogueAllowed; 10174 } 10175 10176 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 10177 // If Values have been set for this Def return the one relevant for \p Part. 10178 if (hasVectorValue(Def, Part)) 10179 return Data.PerPartOutput[Def][Part]; 10180 10181 if (!hasScalarValue(Def, {Part, 0})) { 10182 Value *IRV = Def->getLiveInIRValue(); 10183 Value *B = ILV->getBroadcastInstrs(IRV); 10184 set(Def, B, Part); 10185 return B; 10186 } 10187 10188 Value *ScalarValue = get(Def, {Part, 0}); 10189 // If we aren't vectorizing, we can just copy the scalar map values over 10190 // to the vector map. 10191 if (VF.isScalar()) { 10192 set(Def, ScalarValue, Part); 10193 return ScalarValue; 10194 } 10195 10196 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 10197 bool IsUniform = RepR && RepR->isUniform(); 10198 10199 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 10200 // Check if there is a scalar value for the selected lane. 10201 if (!hasScalarValue(Def, {Part, LastLane})) { 10202 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 10203 assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && 10204 "unexpected recipe found to be invariant"); 10205 IsUniform = true; 10206 LastLane = 0; 10207 } 10208 10209 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 10210 // Set the insert point after the last scalarized instruction or after the 10211 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 10212 // will directly follow the scalar definitions. 10213 auto OldIP = Builder.saveIP(); 10214 auto NewIP = 10215 isa<PHINode>(LastInst) 10216 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 10217 : std::next(BasicBlock::iterator(LastInst)); 10218 Builder.SetInsertPoint(&*NewIP); 10219 10220 // However, if we are vectorizing, we need to construct the vector values. 10221 // If the value is known to be uniform after vectorization, we can just 10222 // broadcast the scalar value corresponding to lane zero for each unroll 10223 // iteration. Otherwise, we construct the vector values using 10224 // insertelement instructions. Since the resulting vectors are stored in 10225 // State, we will only generate the insertelements once. 10226 Value *VectorValue = nullptr; 10227 if (IsUniform) { 10228 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 10229 set(Def, VectorValue, Part); 10230 } else { 10231 // Initialize packing with insertelements to start from undef. 10232 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 10233 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10234 set(Def, Undef, Part); 10235 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10236 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10237 VectorValue = get(Def, Part); 10238 } 10239 Builder.restoreIP(OldIP); 10240 return VectorValue; 10241 } 10242 10243 // Process the loop in the VPlan-native vectorization path. This path builds 10244 // VPlan upfront in the vectorization pipeline, which allows to apply 10245 // VPlan-to-VPlan transformations from the very beginning without modifying the 10246 // input LLVM IR. 10247 static bool processLoopInVPlanNativePath( 10248 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10249 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10250 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10251 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10252 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10253 LoopVectorizationRequirements &Requirements) { 10254 10255 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10256 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10257 return false; 10258 } 10259 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10260 Function *F = L->getHeader()->getParent(); 10261 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10262 10263 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10264 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10265 10266 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10267 &Hints, IAI); 10268 // Use the planner for outer loop vectorization. 10269 // TODO: CM is not used at this point inside the planner. Turn CM into an 10270 // optional argument if we don't need it in the future. 10271 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10272 Requirements, ORE); 10273 10274 // Get user vectorization factor. 10275 ElementCount UserVF = Hints.getWidth(); 10276 10277 CM.collectElementTypesForWidening(); 10278 10279 // Plan how to best vectorize, return the best VF and its cost. 10280 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10281 10282 // If we are stress testing VPlan builds, do not attempt to generate vector 10283 // code. Masked vector code generation support will follow soon. 10284 // Also, do not attempt to vectorize if no vector code will be produced. 10285 if (VPlanBuildStressTest || EnableVPlanPredication || 10286 VectorizationFactor::Disabled() == VF) 10287 return false; 10288 10289 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10290 10291 { 10292 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10293 F->getParent()->getDataLayout()); 10294 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10295 &CM, BFI, PSI, Checks); 10296 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10297 << L->getHeader()->getParent()->getName() << "\"\n"); 10298 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT); 10299 } 10300 10301 // Mark the loop as already vectorized to avoid vectorizing again. 10302 Hints.setAlreadyVectorized(); 10303 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10304 return true; 10305 } 10306 10307 // Emit a remark if there are stores to floats that required a floating point 10308 // extension. If the vectorized loop was generated with floating point there 10309 // will be a performance penalty from the conversion overhead and the change in 10310 // the vector width. 10311 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10312 SmallVector<Instruction *, 4> Worklist; 10313 for (BasicBlock *BB : L->getBlocks()) { 10314 for (Instruction &Inst : *BB) { 10315 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10316 if (S->getValueOperand()->getType()->isFloatTy()) 10317 Worklist.push_back(S); 10318 } 10319 } 10320 } 10321 10322 // Traverse the floating point stores upwards searching, for floating point 10323 // conversions. 10324 SmallPtrSet<const Instruction *, 4> Visited; 10325 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10326 while (!Worklist.empty()) { 10327 auto *I = Worklist.pop_back_val(); 10328 if (!L->contains(I)) 10329 continue; 10330 if (!Visited.insert(I).second) 10331 continue; 10332 10333 // Emit a remark if the floating point store required a floating 10334 // point conversion. 10335 // TODO: More work could be done to identify the root cause such as a 10336 // constant or a function return type and point the user to it. 10337 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10338 ORE->emit([&]() { 10339 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10340 I->getDebugLoc(), L->getHeader()) 10341 << "floating point conversion changes vector width. " 10342 << "Mixed floating point precision requires an up/down " 10343 << "cast that will negatively impact performance."; 10344 }); 10345 10346 for (Use &Op : I->operands()) 10347 if (auto *OpI = dyn_cast<Instruction>(Op)) 10348 Worklist.push_back(OpI); 10349 } 10350 } 10351 10352 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10353 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10354 !EnableLoopInterleaving), 10355 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10356 !EnableLoopVectorization) {} 10357 10358 bool LoopVectorizePass::processLoop(Loop *L) { 10359 assert((EnableVPlanNativePath || L->isInnermost()) && 10360 "VPlan-native path is not enabled. Only process inner loops."); 10361 10362 #ifndef NDEBUG 10363 const std::string DebugLocStr = getDebugLocString(L); 10364 #endif /* NDEBUG */ 10365 10366 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 10367 << L->getHeader()->getParent()->getName() << "\" from " 10368 << DebugLocStr << "\n"); 10369 10370 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 10371 10372 LLVM_DEBUG( 10373 dbgs() << "LV: Loop hints:" 10374 << " force=" 10375 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10376 ? "disabled" 10377 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10378 ? "enabled" 10379 : "?")) 10380 << " width=" << Hints.getWidth() 10381 << " interleave=" << Hints.getInterleave() << "\n"); 10382 10383 // Function containing loop 10384 Function *F = L->getHeader()->getParent(); 10385 10386 // Looking at the diagnostic output is the only way to determine if a loop 10387 // was vectorized (other than looking at the IR or machine code), so it 10388 // is important to generate an optimization remark for each loop. Most of 10389 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10390 // generated as OptimizationRemark and OptimizationRemarkMissed are 10391 // less verbose reporting vectorized loops and unvectorized loops that may 10392 // benefit from vectorization, respectively. 10393 10394 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10395 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10396 return false; 10397 } 10398 10399 PredicatedScalarEvolution PSE(*SE, *L); 10400 10401 // Check if it is legal to vectorize the loop. 10402 LoopVectorizationRequirements Requirements; 10403 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10404 &Requirements, &Hints, DB, AC, BFI, PSI); 10405 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10406 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10407 Hints.emitRemarkWithHints(); 10408 return false; 10409 } 10410 10411 // Check the function attributes and profiles to find out if this function 10412 // should be optimized for size. 10413 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10414 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10415 10416 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10417 // here. They may require CFG and instruction level transformations before 10418 // even evaluating whether vectorization is profitable. Since we cannot modify 10419 // the incoming IR, we need to build VPlan upfront in the vectorization 10420 // pipeline. 10421 if (!L->isInnermost()) 10422 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10423 ORE, BFI, PSI, Hints, Requirements); 10424 10425 assert(L->isInnermost() && "Inner loop expected."); 10426 10427 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10428 // count by optimizing for size, to minimize overheads. 10429 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10430 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10431 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10432 << "This loop is worth vectorizing only if no scalar " 10433 << "iteration overheads are incurred."); 10434 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10435 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10436 else { 10437 LLVM_DEBUG(dbgs() << "\n"); 10438 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10439 } 10440 } 10441 10442 // Check the function attributes to see if implicit floats are allowed. 10443 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10444 // an integer loop and the vector instructions selected are purely integer 10445 // vector instructions? 10446 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10447 reportVectorizationFailure( 10448 "Can't vectorize when the NoImplicitFloat attribute is used", 10449 "loop not vectorized due to NoImplicitFloat attribute", 10450 "NoImplicitFloat", ORE, L); 10451 Hints.emitRemarkWithHints(); 10452 return false; 10453 } 10454 10455 // Check if the target supports potentially unsafe FP vectorization. 10456 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10457 // for the target we're vectorizing for, to make sure none of the 10458 // additional fp-math flags can help. 10459 if (Hints.isPotentiallyUnsafe() && 10460 TTI->isFPVectorizationPotentiallyUnsafe()) { 10461 reportVectorizationFailure( 10462 "Potentially unsafe FP op prevents vectorization", 10463 "loop not vectorized due to unsafe FP support.", 10464 "UnsafeFP", ORE, L); 10465 Hints.emitRemarkWithHints(); 10466 return false; 10467 } 10468 10469 bool AllowOrderedReductions; 10470 // If the flag is set, use that instead and override the TTI behaviour. 10471 if (ForceOrderedReductions.getNumOccurrences() > 0) 10472 AllowOrderedReductions = ForceOrderedReductions; 10473 else 10474 AllowOrderedReductions = TTI->enableOrderedReductions(); 10475 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10476 ORE->emit([&]() { 10477 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10478 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10479 ExactFPMathInst->getDebugLoc(), 10480 ExactFPMathInst->getParent()) 10481 << "loop not vectorized: cannot prove it is safe to reorder " 10482 "floating-point operations"; 10483 }); 10484 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10485 "reorder floating-point operations\n"); 10486 Hints.emitRemarkWithHints(); 10487 return false; 10488 } 10489 10490 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10491 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10492 10493 // If an override option has been passed in for interleaved accesses, use it. 10494 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10495 UseInterleaved = EnableInterleavedMemAccesses; 10496 10497 // Analyze interleaved memory accesses. 10498 if (UseInterleaved) { 10499 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10500 } 10501 10502 // Use the cost model. 10503 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10504 F, &Hints, IAI); 10505 CM.collectValuesToIgnore(); 10506 CM.collectElementTypesForWidening(); 10507 10508 // Use the planner for vectorization. 10509 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10510 Requirements, ORE); 10511 10512 // Get user vectorization factor and interleave count. 10513 ElementCount UserVF = Hints.getWidth(); 10514 unsigned UserIC = Hints.getInterleave(); 10515 10516 // Plan how to best vectorize, return the best VF and its cost. 10517 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10518 10519 VectorizationFactor VF = VectorizationFactor::Disabled(); 10520 unsigned IC = 1; 10521 10522 if (MaybeVF) { 10523 VF = *MaybeVF; 10524 // Select the interleave count. 10525 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10526 } 10527 10528 // Identify the diagnostic messages that should be produced. 10529 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10530 bool VectorizeLoop = true, InterleaveLoop = true; 10531 if (VF.Width.isScalar()) { 10532 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10533 VecDiagMsg = std::make_pair( 10534 "VectorizationNotBeneficial", 10535 "the cost-model indicates that vectorization is not beneficial"); 10536 VectorizeLoop = false; 10537 } 10538 10539 if (!MaybeVF && UserIC > 1) { 10540 // Tell the user interleaving was avoided up-front, despite being explicitly 10541 // requested. 10542 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10543 "interleaving should be avoided up front\n"); 10544 IntDiagMsg = std::make_pair( 10545 "InterleavingAvoided", 10546 "Ignoring UserIC, because interleaving was avoided up front"); 10547 InterleaveLoop = false; 10548 } else if (IC == 1 && UserIC <= 1) { 10549 // Tell the user interleaving is not beneficial. 10550 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10551 IntDiagMsg = std::make_pair( 10552 "InterleavingNotBeneficial", 10553 "the cost-model indicates that interleaving is not beneficial"); 10554 InterleaveLoop = false; 10555 if (UserIC == 1) { 10556 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10557 IntDiagMsg.second += 10558 " and is explicitly disabled or interleave count is set to 1"; 10559 } 10560 } else if (IC > 1 && UserIC == 1) { 10561 // Tell the user interleaving is beneficial, but it explicitly disabled. 10562 LLVM_DEBUG( 10563 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10564 IntDiagMsg = std::make_pair( 10565 "InterleavingBeneficialButDisabled", 10566 "the cost-model indicates that interleaving is beneficial " 10567 "but is explicitly disabled or interleave count is set to 1"); 10568 InterleaveLoop = false; 10569 } 10570 10571 // Override IC if user provided an interleave count. 10572 IC = UserIC > 0 ? UserIC : IC; 10573 10574 // Emit diagnostic messages, if any. 10575 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10576 if (!VectorizeLoop && !InterleaveLoop) { 10577 // Do not vectorize or interleaving the loop. 10578 ORE->emit([&]() { 10579 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10580 L->getStartLoc(), L->getHeader()) 10581 << VecDiagMsg.second; 10582 }); 10583 ORE->emit([&]() { 10584 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10585 L->getStartLoc(), L->getHeader()) 10586 << IntDiagMsg.second; 10587 }); 10588 return false; 10589 } else if (!VectorizeLoop && InterleaveLoop) { 10590 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10591 ORE->emit([&]() { 10592 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10593 L->getStartLoc(), L->getHeader()) 10594 << VecDiagMsg.second; 10595 }); 10596 } else if (VectorizeLoop && !InterleaveLoop) { 10597 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10598 << ") in " << DebugLocStr << '\n'); 10599 ORE->emit([&]() { 10600 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10601 L->getStartLoc(), L->getHeader()) 10602 << IntDiagMsg.second; 10603 }); 10604 } else if (VectorizeLoop && InterleaveLoop) { 10605 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10606 << ") in " << DebugLocStr << '\n'); 10607 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10608 } 10609 10610 bool DisableRuntimeUnroll = false; 10611 MDNode *OrigLoopID = L->getLoopID(); 10612 { 10613 // Optimistically generate runtime checks. Drop them if they turn out to not 10614 // be profitable. Limit the scope of Checks, so the cleanup happens 10615 // immediately after vector codegeneration is done. 10616 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10617 F->getParent()->getDataLayout()); 10618 if (!VF.Width.isScalar() || IC > 1) 10619 Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); 10620 10621 using namespace ore; 10622 if (!VectorizeLoop) { 10623 assert(IC > 1 && "interleave count should not be 1 or 0"); 10624 // If we decided that it is not legal to vectorize the loop, then 10625 // interleave it. 10626 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10627 &CM, BFI, PSI, Checks); 10628 10629 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10630 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT); 10631 10632 ORE->emit([&]() { 10633 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10634 L->getHeader()) 10635 << "interleaved loop (interleaved count: " 10636 << NV("InterleaveCount", IC) << ")"; 10637 }); 10638 } else { 10639 // If we decided that it is *legal* to vectorize the loop, then do it. 10640 10641 // Consider vectorizing the epilogue too if it's profitable. 10642 VectorizationFactor EpilogueVF = 10643 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10644 if (EpilogueVF.Width.isVector()) { 10645 10646 // The first pass vectorizes the main loop and creates a scalar epilogue 10647 // to be vectorized by executing the plan (potentially with a different 10648 // factor) again shortly afterwards. 10649 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10650 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10651 EPI, &LVL, &CM, BFI, PSI, Checks); 10652 10653 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10654 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10655 DT); 10656 ++LoopsVectorized; 10657 10658 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10659 formLCSSARecursively(*L, *DT, LI, SE); 10660 10661 // Second pass vectorizes the epilogue and adjusts the control flow 10662 // edges from the first pass. 10663 EPI.MainLoopVF = EPI.EpilogueVF; 10664 EPI.MainLoopUF = EPI.EpilogueUF; 10665 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10666 ORE, EPI, &LVL, &CM, BFI, PSI, 10667 Checks); 10668 10669 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10670 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10671 DT); 10672 ++LoopsEpilogueVectorized; 10673 10674 if (!MainILV.areSafetyChecksAdded()) 10675 DisableRuntimeUnroll = true; 10676 } else { 10677 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10678 &LVL, &CM, BFI, PSI, Checks); 10679 10680 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10681 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT); 10682 ++LoopsVectorized; 10683 10684 // Add metadata to disable runtime unrolling a scalar loop when there 10685 // are no runtime checks about strides and memory. A scalar loop that is 10686 // rarely used is not worth unrolling. 10687 if (!LB.areSafetyChecksAdded()) 10688 DisableRuntimeUnroll = true; 10689 } 10690 // Report the vectorization decision. 10691 ORE->emit([&]() { 10692 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10693 L->getHeader()) 10694 << "vectorized loop (vectorization width: " 10695 << NV("VectorizationFactor", VF.Width) 10696 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10697 }); 10698 } 10699 10700 if (ORE->allowExtraAnalysis(LV_NAME)) 10701 checkMixedPrecision(L, ORE); 10702 } 10703 10704 Optional<MDNode *> RemainderLoopID = 10705 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10706 LLVMLoopVectorizeFollowupEpilogue}); 10707 if (RemainderLoopID.hasValue()) { 10708 L->setLoopID(RemainderLoopID.getValue()); 10709 } else { 10710 if (DisableRuntimeUnroll) 10711 AddRuntimeUnrollDisableMetaData(L); 10712 10713 // Mark the loop as already vectorized to avoid vectorizing again. 10714 Hints.setAlreadyVectorized(); 10715 } 10716 10717 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10718 return true; 10719 } 10720 10721 LoopVectorizeResult LoopVectorizePass::runImpl( 10722 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10723 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10724 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10725 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10726 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10727 SE = &SE_; 10728 LI = &LI_; 10729 TTI = &TTI_; 10730 DT = &DT_; 10731 BFI = &BFI_; 10732 TLI = TLI_; 10733 AA = &AA_; 10734 AC = &AC_; 10735 GetLAA = &GetLAA_; 10736 DB = &DB_; 10737 ORE = &ORE_; 10738 PSI = PSI_; 10739 10740 // Don't attempt if 10741 // 1. the target claims to have no vector registers, and 10742 // 2. interleaving won't help ILP. 10743 // 10744 // The second condition is necessary because, even if the target has no 10745 // vector registers, loop vectorization may still enable scalar 10746 // interleaving. 10747 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10748 TTI->getMaxInterleaveFactor(1) < 2) 10749 return LoopVectorizeResult(false, false); 10750 10751 bool Changed = false, CFGChanged = false; 10752 10753 // The vectorizer requires loops to be in simplified form. 10754 // Since simplification may add new inner loops, it has to run before the 10755 // legality and profitability checks. This means running the loop vectorizer 10756 // will simplify all loops, regardless of whether anything end up being 10757 // vectorized. 10758 for (auto &L : *LI) 10759 Changed |= CFGChanged |= 10760 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10761 10762 // Build up a worklist of inner-loops to vectorize. This is necessary as 10763 // the act of vectorizing or partially unrolling a loop creates new loops 10764 // and can invalidate iterators across the loops. 10765 SmallVector<Loop *, 8> Worklist; 10766 10767 for (Loop *L : *LI) 10768 collectSupportedLoops(*L, LI, ORE, Worklist); 10769 10770 LoopsAnalyzed += Worklist.size(); 10771 10772 // Now walk the identified inner loops. 10773 while (!Worklist.empty()) { 10774 Loop *L = Worklist.pop_back_val(); 10775 10776 // For the inner loops we actually process, form LCSSA to simplify the 10777 // transform. 10778 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10779 10780 Changed |= CFGChanged |= processLoop(L); 10781 } 10782 10783 // Process each loop nest in the function. 10784 return LoopVectorizeResult(Changed, CFGChanged); 10785 } 10786 10787 PreservedAnalyses LoopVectorizePass::run(Function &F, 10788 FunctionAnalysisManager &AM) { 10789 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10790 auto &LI = AM.getResult<LoopAnalysis>(F); 10791 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10792 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10793 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10794 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10795 auto &AA = AM.getResult<AAManager>(F); 10796 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10797 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10798 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10799 10800 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10801 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10802 [&](Loop &L) -> const LoopAccessInfo & { 10803 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10804 TLI, TTI, nullptr, nullptr, nullptr}; 10805 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10806 }; 10807 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10808 ProfileSummaryInfo *PSI = 10809 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10810 LoopVectorizeResult Result = 10811 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10812 if (!Result.MadeAnyChange) 10813 return PreservedAnalyses::all(); 10814 PreservedAnalyses PA; 10815 10816 // We currently do not preserve loopinfo/dominator analyses with outer loop 10817 // vectorization. Until this is addressed, mark these analyses as preserved 10818 // only for non-VPlan-native path. 10819 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10820 if (!EnableVPlanNativePath) { 10821 PA.preserve<LoopAnalysis>(); 10822 PA.preserve<DominatorTreeAnalysis>(); 10823 } 10824 if (!Result.MadeCFGChange) 10825 PA.preserveSet<CFGAnalyses>(); 10826 return PA; 10827 } 10828 10829 void LoopVectorizePass::printPipeline( 10830 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10831 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10832 OS, MapClassName2PassName); 10833 10834 OS << "<"; 10835 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10836 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10837 OS << ">"; 10838 } 10839