1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/MemorySSA.h" 91 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 92 #include "llvm/Analysis/ProfileSummaryInfo.h" 93 #include "llvm/Analysis/ScalarEvolution.h" 94 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 95 #include "llvm/Analysis/TargetLibraryInfo.h" 96 #include "llvm/Analysis/TargetTransformInfo.h" 97 #include "llvm/Analysis/VectorUtils.h" 98 #include "llvm/IR/Attributes.h" 99 #include "llvm/IR/BasicBlock.h" 100 #include "llvm/IR/CFG.h" 101 #include "llvm/IR/Constant.h" 102 #include "llvm/IR/Constants.h" 103 #include "llvm/IR/DataLayout.h" 104 #include "llvm/IR/DebugInfoMetadata.h" 105 #include "llvm/IR/DebugLoc.h" 106 #include "llvm/IR/DerivedTypes.h" 107 #include "llvm/IR/DiagnosticInfo.h" 108 #include "llvm/IR/Dominators.h" 109 #include "llvm/IR/Function.h" 110 #include "llvm/IR/IRBuilder.h" 111 #include "llvm/IR/InstrTypes.h" 112 #include "llvm/IR/Instruction.h" 113 #include "llvm/IR/Instructions.h" 114 #include "llvm/IR/IntrinsicInst.h" 115 #include "llvm/IR/Intrinsics.h" 116 #include "llvm/IR/LLVMContext.h" 117 #include "llvm/IR/Metadata.h" 118 #include "llvm/IR/Module.h" 119 #include "llvm/IR/Operator.h" 120 #include "llvm/IR/PatternMatch.h" 121 #include "llvm/IR/Type.h" 122 #include "llvm/IR/Use.h" 123 #include "llvm/IR/User.h" 124 #include "llvm/IR/Value.h" 125 #include "llvm/IR/ValueHandle.h" 126 #include "llvm/IR/Verifier.h" 127 #include "llvm/InitializePasses.h" 128 #include "llvm/Pass.h" 129 #include "llvm/Support/Casting.h" 130 #include "llvm/Support/CommandLine.h" 131 #include "llvm/Support/Compiler.h" 132 #include "llvm/Support/Debug.h" 133 #include "llvm/Support/ErrorHandling.h" 134 #include "llvm/Support/InstructionCost.h" 135 #include "llvm/Support/MathExtras.h" 136 #include "llvm/Support/raw_ostream.h" 137 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 138 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 139 #include "llvm/Transforms/Utils/LoopSimplify.h" 140 #include "llvm/Transforms/Utils/LoopUtils.h" 141 #include "llvm/Transforms/Utils/LoopVersioning.h" 142 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 143 #include "llvm/Transforms/Utils/SizeOpts.h" 144 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 145 #include <algorithm> 146 #include <cassert> 147 #include <cstdint> 148 #include <cstdlib> 149 #include <functional> 150 #include <iterator> 151 #include <limits> 152 #include <memory> 153 #include <string> 154 #include <tuple> 155 #include <utility> 156 157 using namespace llvm; 158 159 #define LV_NAME "loop-vectorize" 160 #define DEBUG_TYPE LV_NAME 161 162 #ifndef NDEBUG 163 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 164 #endif 165 166 /// @{ 167 /// Metadata attribute names 168 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 169 const char LLVMLoopVectorizeFollowupVectorized[] = 170 "llvm.loop.vectorize.followup_vectorized"; 171 const char LLVMLoopVectorizeFollowupEpilogue[] = 172 "llvm.loop.vectorize.followup_epilogue"; 173 /// @} 174 175 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 176 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 177 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 178 179 static cl::opt<bool> EnableEpilogueVectorization( 180 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 181 cl::desc("Enable vectorization of epilogue loops.")); 182 183 static cl::opt<unsigned> EpilogueVectorizationForceVF( 184 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 185 cl::desc("When epilogue vectorization is enabled, and a value greater than " 186 "1 is specified, forces the given VF for all applicable epilogue " 187 "loops.")); 188 189 static cl::opt<unsigned> EpilogueVectorizationMinVF( 190 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 191 cl::desc("Only loops with vectorization factor equal to or larger than " 192 "the specified value are considered for epilogue vectorization.")); 193 194 /// Loops with a known constant trip count below this number are vectorized only 195 /// if no scalar iteration overheads are incurred. 196 static cl::opt<unsigned> TinyTripCountVectorThreshold( 197 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 198 cl::desc("Loops with a constant trip count that is smaller than this " 199 "value are vectorized only if no scalar iteration overheads " 200 "are incurred.")); 201 202 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 203 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 204 cl::desc("The maximum allowed number of runtime memory checks with a " 205 "vectorize(enable) pragma.")); 206 207 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 208 // that predication is preferred, and this lists all options. I.e., the 209 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 210 // and predicate the instructions accordingly. If tail-folding fails, there are 211 // different fallback strategies depending on these values: 212 namespace PreferPredicateTy { 213 enum Option { 214 ScalarEpilogue = 0, 215 PredicateElseScalarEpilogue, 216 PredicateOrDontVectorize 217 }; 218 } // namespace PreferPredicateTy 219 220 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 221 "prefer-predicate-over-epilogue", 222 cl::init(PreferPredicateTy::ScalarEpilogue), 223 cl::Hidden, 224 cl::desc("Tail-folding and predication preferences over creating a scalar " 225 "epilogue loop."), 226 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 227 "scalar-epilogue", 228 "Don't tail-predicate loops, create scalar epilogue"), 229 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 230 "predicate-else-scalar-epilogue", 231 "prefer tail-folding, create scalar epilogue if tail " 232 "folding fails."), 233 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 234 "predicate-dont-vectorize", 235 "prefers tail-folding, don't attempt vectorization if " 236 "tail-folding fails."))); 237 238 static cl::opt<bool> MaximizeBandwidth( 239 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 240 cl::desc("Maximize bandwidth when selecting vectorization factor which " 241 "will be determined by the smallest type in loop.")); 242 243 static cl::opt<bool> EnableInterleavedMemAccesses( 244 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 245 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 246 247 /// An interleave-group may need masking if it resides in a block that needs 248 /// predication, or in order to mask away gaps. 249 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 250 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 251 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 252 253 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 254 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 255 cl::desc("We don't interleave loops with a estimated constant trip count " 256 "below this number")); 257 258 static cl::opt<unsigned> ForceTargetNumScalarRegs( 259 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 260 cl::desc("A flag that overrides the target's number of scalar registers.")); 261 262 static cl::opt<unsigned> ForceTargetNumVectorRegs( 263 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 264 cl::desc("A flag that overrides the target's number of vector registers.")); 265 266 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 267 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 268 cl::desc("A flag that overrides the target's max interleave factor for " 269 "scalar loops.")); 270 271 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 272 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 273 cl::desc("A flag that overrides the target's max interleave factor for " 274 "vectorized loops.")); 275 276 static cl::opt<unsigned> ForceTargetInstructionCost( 277 "force-target-instruction-cost", cl::init(0), cl::Hidden, 278 cl::desc("A flag that overrides the target's expected cost for " 279 "an instruction to a single constant value. Mostly " 280 "useful for getting consistent testing.")); 281 282 static cl::opt<bool> ForceTargetSupportsScalableVectors( 283 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 284 cl::desc( 285 "Pretend that scalable vectors are supported, even if the target does " 286 "not support them. This flag should only be used for testing.")); 287 288 static cl::opt<unsigned> SmallLoopCost( 289 "small-loop-cost", cl::init(20), cl::Hidden, 290 cl::desc( 291 "The cost of a loop that is considered 'small' by the interleaver.")); 292 293 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 294 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 295 cl::desc("Enable the use of the block frequency analysis to access PGO " 296 "heuristics minimizing code growth in cold regions and being more " 297 "aggressive in hot regions.")); 298 299 // Runtime interleave loops for load/store throughput. 300 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 301 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 302 cl::desc( 303 "Enable runtime interleaving until load/store ports are saturated")); 304 305 /// Interleave small loops with scalar reductions. 306 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 307 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 308 cl::desc("Enable interleaving for loops with small iteration counts that " 309 "contain scalar reductions to expose ILP.")); 310 311 /// The number of stores in a loop that are allowed to need predication. 312 static cl::opt<unsigned> NumberOfStoresToPredicate( 313 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 314 cl::desc("Max number of stores to be predicated behind an if.")); 315 316 static cl::opt<bool> EnableIndVarRegisterHeur( 317 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 318 cl::desc("Count the induction variable only once when interleaving")); 319 320 static cl::opt<bool> EnableCondStoresVectorization( 321 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 322 cl::desc("Enable if predication of stores during vectorization.")); 323 324 static cl::opt<unsigned> MaxNestedScalarReductionIC( 325 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 326 cl::desc("The maximum interleave count to use when interleaving a scalar " 327 "reduction in a nested loop.")); 328 329 static cl::opt<bool> 330 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 331 cl::Hidden, 332 cl::desc("Prefer in-loop vector reductions, " 333 "overriding the targets preference.")); 334 335 // FIXME: When loop hints are passed which allow reordering of FP operations, 336 // we still choose to use strict reductions with this flag. We should instead 337 // use the default behaviour of vectorizing with unordered reductions if 338 // reordering is allowed. 339 cl::opt<bool> EnableStrictReductions( 340 "enable-strict-reductions", cl::init(false), cl::Hidden, 341 cl::desc("Enable the vectorisation of loops with in-order (strict) " 342 "FP reductions")); 343 344 static cl::opt<bool> PreferPredicatedReductionSelect( 345 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 346 cl::desc( 347 "Prefer predicating a reduction operation over an after loop select.")); 348 349 cl::opt<bool> EnableVPlanNativePath( 350 "enable-vplan-native-path", cl::init(false), cl::Hidden, 351 cl::desc("Enable VPlan-native vectorization path with " 352 "support for outer loop vectorization.")); 353 354 // FIXME: Remove this switch once we have divergence analysis. Currently we 355 // assume divergent non-backedge branches when this switch is true. 356 cl::opt<bool> EnableVPlanPredication( 357 "enable-vplan-predication", cl::init(false), cl::Hidden, 358 cl::desc("Enable VPlan-native vectorization path predicator with " 359 "support for outer loop vectorization.")); 360 361 // This flag enables the stress testing of the VPlan H-CFG construction in the 362 // VPlan-native vectorization path. It must be used in conjuction with 363 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 364 // verification of the H-CFGs built. 365 static cl::opt<bool> VPlanBuildStressTest( 366 "vplan-build-stress-test", cl::init(false), cl::Hidden, 367 cl::desc( 368 "Build VPlan for every supported loop nest in the function and bail " 369 "out right after the build (stress test the VPlan H-CFG construction " 370 "in the VPlan-native vectorization path).")); 371 372 cl::opt<bool> llvm::EnableLoopInterleaving( 373 "interleave-loops", cl::init(true), cl::Hidden, 374 cl::desc("Enable loop interleaving in Loop vectorization passes")); 375 cl::opt<bool> llvm::EnableLoopVectorization( 376 "vectorize-loops", cl::init(true), cl::Hidden, 377 cl::desc("Run the Loop vectorization passes")); 378 379 cl::opt<bool> PrintVPlansInDotFormat( 380 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 381 cl::desc("Use dot format instead of plain text when dumping VPlans")); 382 383 /// A helper function that returns true if the given type is irregular. The 384 /// type is irregular if its allocated size doesn't equal the store size of an 385 /// element of the corresponding vector type. 386 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 387 // Determine if an array of N elements of type Ty is "bitcast compatible" 388 // with a <N x Ty> vector. 389 // This is only true if there is no padding between the array elements. 390 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 391 } 392 393 /// A helper function that returns the reciprocal of the block probability of 394 /// predicated blocks. If we return X, we are assuming the predicated block 395 /// will execute once for every X iterations of the loop header. 396 /// 397 /// TODO: We should use actual block probability here, if available. Currently, 398 /// we always assume predicated blocks have a 50% chance of executing. 399 static unsigned getReciprocalPredBlockProb() { return 2; } 400 401 /// A helper function that returns an integer or floating-point constant with 402 /// value C. 403 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 404 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 405 : ConstantFP::get(Ty, C); 406 } 407 408 /// Returns "best known" trip count for the specified loop \p L as defined by 409 /// the following procedure: 410 /// 1) Returns exact trip count if it is known. 411 /// 2) Returns expected trip count according to profile data if any. 412 /// 3) Returns upper bound estimate if it is known. 413 /// 4) Returns None if all of the above failed. 414 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 415 // Check if exact trip count is known. 416 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 417 return ExpectedTC; 418 419 // Check if there is an expected trip count available from profile data. 420 if (LoopVectorizeWithBlockFrequency) 421 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 422 return EstimatedTC; 423 424 // Check if upper bound estimate is known. 425 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 426 return ExpectedTC; 427 428 return None; 429 } 430 431 // Forward declare GeneratedRTChecks. 432 class GeneratedRTChecks; 433 434 namespace llvm { 435 436 /// InnerLoopVectorizer vectorizes loops which contain only one basic 437 /// block to a specified vectorization factor (VF). 438 /// This class performs the widening of scalars into vectors, or multiple 439 /// scalars. This class also implements the following features: 440 /// * It inserts an epilogue loop for handling loops that don't have iteration 441 /// counts that are known to be a multiple of the vectorization factor. 442 /// * It handles the code generation for reduction variables. 443 /// * Scalarization (implementation using scalars) of un-vectorizable 444 /// instructions. 445 /// InnerLoopVectorizer does not perform any vectorization-legality 446 /// checks, and relies on the caller to check for the different legality 447 /// aspects. The InnerLoopVectorizer relies on the 448 /// LoopVectorizationLegality class to provide information about the induction 449 /// and reduction variables that were found to a given vectorization factor. 450 class InnerLoopVectorizer { 451 public: 452 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 453 LoopInfo *LI, DominatorTree *DT, 454 const TargetLibraryInfo *TLI, 455 const TargetTransformInfo *TTI, AssumptionCache *AC, 456 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 457 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 458 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 459 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 460 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 461 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 462 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 463 PSI(PSI), RTChecks(RTChecks) { 464 // Query this against the original loop and save it here because the profile 465 // of the original loop header may change as the transformation happens. 466 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 467 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 468 } 469 470 virtual ~InnerLoopVectorizer() = default; 471 472 /// Create a new empty loop that will contain vectorized instructions later 473 /// on, while the old loop will be used as the scalar remainder. Control flow 474 /// is generated around the vectorized (and scalar epilogue) loops consisting 475 /// of various checks and bypasses. Return the pre-header block of the new 476 /// loop. 477 /// In the case of epilogue vectorization, this function is overriden to 478 /// handle the more complex control flow around the loops. 479 virtual BasicBlock *createVectorizedLoopSkeleton(); 480 481 /// Widen a single instruction within the innermost loop. 482 void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands, 483 VPTransformState &State); 484 485 /// Widen a single call instruction within the innermost loop. 486 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 487 VPTransformState &State); 488 489 /// Widen a single select instruction within the innermost loop. 490 void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, 491 bool InvariantCond, VPTransformState &State); 492 493 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 494 void fixVectorizedLoop(VPTransformState &State); 495 496 // Return true if any runtime check is added. 497 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 498 499 /// A type for vectorized values in the new loop. Each value from the 500 /// original loop, when vectorized, is represented by UF vector values in the 501 /// new unrolled loop, where UF is the unroll factor. 502 using VectorParts = SmallVector<Value *, 2>; 503 504 /// Vectorize a single GetElementPtrInst based on information gathered and 505 /// decisions taken during planning. 506 void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices, 507 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant, 508 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); 509 510 /// Vectorize a single PHINode in a block. This method handles the induction 511 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 512 /// arbitrary length vectors. 513 void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc, 514 VPWidenPHIRecipe *PhiR, VPTransformState &State); 515 516 /// A helper function to scalarize a single Instruction in the innermost loop. 517 /// Generates a sequence of scalar instances for each lane between \p MinLane 518 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 519 /// inclusive. Uses the VPValue operands from \p Operands instead of \p 520 /// Instr's operands. 521 void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands, 522 const VPIteration &Instance, bool IfPredicateInstr, 523 VPTransformState &State); 524 525 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 526 /// is provided, the integer induction variable will first be truncated to 527 /// the corresponding type. 528 void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, 529 VPValue *Def, VPValue *CastDef, 530 VPTransformState &State); 531 532 /// Construct the vector value of a scalarized value \p V one lane at a time. 533 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 534 VPTransformState &State); 535 536 /// Try to vectorize interleaved access group \p Group with the base address 537 /// given in \p Addr, optionally masking the vector operations if \p 538 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 539 /// values in the vectorized loop. 540 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 541 ArrayRef<VPValue *> VPDefs, 542 VPTransformState &State, VPValue *Addr, 543 ArrayRef<VPValue *> StoredValues, 544 VPValue *BlockInMask = nullptr); 545 546 /// Vectorize Load and Store instructions with the base address given in \p 547 /// Addr, optionally masking the vector operations if \p BlockInMask is 548 /// non-null. Use \p State to translate given VPValues to IR values in the 549 /// vectorized loop. 550 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 551 VPValue *Def, VPValue *Addr, 552 VPValue *StoredValue, VPValue *BlockInMask); 553 554 /// Set the debug location in the builder using the debug location in 555 /// the instruction. 556 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 557 558 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 559 void fixNonInductionPHIs(VPTransformState &State); 560 561 /// Create a broadcast instruction. This method generates a broadcast 562 /// instruction (shuffle) for loop invariant values and for the induction 563 /// value. If this is the induction variable then we extend it to N, N+1, ... 564 /// this is needed because each iteration in the loop corresponds to a SIMD 565 /// element. 566 virtual Value *getBroadcastInstrs(Value *V); 567 568 protected: 569 friend class LoopVectorizationPlanner; 570 571 /// A small list of PHINodes. 572 using PhiVector = SmallVector<PHINode *, 4>; 573 574 /// A type for scalarized values in the new loop. Each value from the 575 /// original loop, when scalarized, is represented by UF x VF scalar values 576 /// in the new unrolled loop, where UF is the unroll factor and VF is the 577 /// vectorization factor. 578 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 579 580 /// Set up the values of the IVs correctly when exiting the vector loop. 581 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 582 Value *CountRoundDown, Value *EndValue, 583 BasicBlock *MiddleBlock); 584 585 /// Create a new induction variable inside L. 586 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 587 Value *Step, Instruction *DL); 588 589 /// Handle all cross-iteration phis in the header. 590 void fixCrossIterationPHIs(VPTransformState &State); 591 592 /// Fix a first-order recurrence. This is the second phase of vectorizing 593 /// this phi node. 594 void fixFirstOrderRecurrence(PHINode *Phi, VPTransformState &State); 595 596 /// Fix a reduction cross-iteration phi. This is the second phase of 597 /// vectorizing this phi node. 598 void fixReduction(VPWidenPHIRecipe *Phi, VPTransformState &State); 599 600 /// Clear NSW/NUW flags from reduction instructions if necessary. 601 void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc, 602 VPTransformState &State); 603 604 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 605 /// means we need to add the appropriate incoming value from the middle 606 /// block as exiting edges from the scalar epilogue loop (if present) are 607 /// already in place, and we exit the vector loop exclusively to the middle 608 /// block. 609 void fixLCSSAPHIs(VPTransformState &State); 610 611 /// Iteratively sink the scalarized operands of a predicated instruction into 612 /// the block that was created for it. 613 void sinkScalarOperands(Instruction *PredInst); 614 615 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 616 /// represented as. 617 void truncateToMinimalBitwidths(VPTransformState &State); 618 619 /// This function adds 620 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 621 /// to each vector element of Val. The sequence starts at StartIndex. 622 /// \p Opcode is relevant for FP induction variable. 623 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 624 Instruction::BinaryOps Opcode = 625 Instruction::BinaryOpsEnd); 626 627 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 628 /// variable on which to base the steps, \p Step is the size of the step, and 629 /// \p EntryVal is the value from the original loop that maps to the steps. 630 /// Note that \p EntryVal doesn't have to be an induction variable - it 631 /// can also be a truncate instruction. 632 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 633 const InductionDescriptor &ID, VPValue *Def, 634 VPValue *CastDef, VPTransformState &State); 635 636 /// Create a vector induction phi node based on an existing scalar one. \p 637 /// EntryVal is the value from the original loop that maps to the vector phi 638 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 639 /// truncate instruction, instead of widening the original IV, we widen a 640 /// version of the IV truncated to \p EntryVal's type. 641 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 642 Value *Step, Value *Start, 643 Instruction *EntryVal, VPValue *Def, 644 VPValue *CastDef, 645 VPTransformState &State); 646 647 /// Returns true if an instruction \p I should be scalarized instead of 648 /// vectorized for the chosen vectorization factor. 649 bool shouldScalarizeInstruction(Instruction *I) const; 650 651 /// Returns true if we should generate a scalar version of \p IV. 652 bool needsScalarInduction(Instruction *IV) const; 653 654 /// If there is a cast involved in the induction variable \p ID, which should 655 /// be ignored in the vectorized loop body, this function records the 656 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 657 /// cast. We had already proved that the casted Phi is equal to the uncasted 658 /// Phi in the vectorized loop (under a runtime guard), and therefore 659 /// there is no need to vectorize the cast - the same value can be used in the 660 /// vector loop for both the Phi and the cast. 661 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 662 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 663 /// 664 /// \p EntryVal is the value from the original loop that maps to the vector 665 /// phi node and is used to distinguish what is the IV currently being 666 /// processed - original one (if \p EntryVal is a phi corresponding to the 667 /// original IV) or the "newly-created" one based on the proof mentioned above 668 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 669 /// latter case \p EntryVal is a TruncInst and we must not record anything for 670 /// that IV, but it's error-prone to expect callers of this routine to care 671 /// about that, hence this explicit parameter. 672 void recordVectorLoopValueForInductionCast( 673 const InductionDescriptor &ID, const Instruction *EntryVal, 674 Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, 675 unsigned Part, unsigned Lane = UINT_MAX); 676 677 /// Generate a shuffle sequence that will reverse the vector Vec. 678 virtual Value *reverseVector(Value *Vec); 679 680 /// Returns (and creates if needed) the original loop trip count. 681 Value *getOrCreateTripCount(Loop *NewLoop); 682 683 /// Returns (and creates if needed) the trip count of the widened loop. 684 Value *getOrCreateVectorTripCount(Loop *NewLoop); 685 686 /// Returns a bitcasted value to the requested vector type. 687 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 688 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 689 const DataLayout &DL); 690 691 /// Emit a bypass check to see if the vector trip count is zero, including if 692 /// it overflows. 693 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 694 695 /// Emit a bypass check to see if all of the SCEV assumptions we've 696 /// had to make are correct. Returns the block containing the checks or 697 /// nullptr if no checks have been added. 698 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); 699 700 /// Emit bypass checks to check any memory assumptions we may have made. 701 /// Returns the block containing the checks or nullptr if no checks have been 702 /// added. 703 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 704 705 /// Compute the transformed value of Index at offset StartValue using step 706 /// StepValue. 707 /// For integer induction, returns StartValue + Index * StepValue. 708 /// For pointer induction, returns StartValue[Index * StepValue]. 709 /// FIXME: The newly created binary instructions should contain nsw/nuw 710 /// flags, which can be found from the original scalar operations. 711 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 712 const DataLayout &DL, 713 const InductionDescriptor &ID) const; 714 715 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 716 /// vector loop preheader, middle block and scalar preheader. Also 717 /// allocate a loop object for the new vector loop and return it. 718 Loop *createVectorLoopSkeleton(StringRef Prefix); 719 720 /// Create new phi nodes for the induction variables to resume iteration count 721 /// in the scalar epilogue, from where the vectorized loop left off (given by 722 /// \p VectorTripCount). 723 /// In cases where the loop skeleton is more complicated (eg. epilogue 724 /// vectorization) and the resume values can come from an additional bypass 725 /// block, the \p AdditionalBypass pair provides information about the bypass 726 /// block and the end value on the edge from bypass to this loop. 727 void createInductionResumeValues( 728 Loop *L, Value *VectorTripCount, 729 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 730 731 /// Complete the loop skeleton by adding debug MDs, creating appropriate 732 /// conditional branches in the middle block, preparing the builder and 733 /// running the verifier. Take in the vector loop \p L as argument, and return 734 /// the preheader of the completed vector loop. 735 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 736 737 /// Add additional metadata to \p To that was not present on \p Orig. 738 /// 739 /// Currently this is used to add the noalias annotations based on the 740 /// inserted memchecks. Use this for instructions that are *cloned* into the 741 /// vector loop. 742 void addNewMetadata(Instruction *To, const Instruction *Orig); 743 744 /// Add metadata from one instruction to another. 745 /// 746 /// This includes both the original MDs from \p From and additional ones (\see 747 /// addNewMetadata). Use this for *newly created* instructions in the vector 748 /// loop. 749 void addMetadata(Instruction *To, Instruction *From); 750 751 /// Similar to the previous function but it adds the metadata to a 752 /// vector of instructions. 753 void addMetadata(ArrayRef<Value *> To, Instruction *From); 754 755 /// Allow subclasses to override and print debug traces before/after vplan 756 /// execution, when trace information is requested. 757 virtual void printDebugTracesAtStart(){}; 758 virtual void printDebugTracesAtEnd(){}; 759 760 /// The original loop. 761 Loop *OrigLoop; 762 763 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 764 /// dynamic knowledge to simplify SCEV expressions and converts them to a 765 /// more usable form. 766 PredicatedScalarEvolution &PSE; 767 768 /// Loop Info. 769 LoopInfo *LI; 770 771 /// Dominator Tree. 772 DominatorTree *DT; 773 774 /// Alias Analysis. 775 AAResults *AA; 776 777 /// Target Library Info. 778 const TargetLibraryInfo *TLI; 779 780 /// Target Transform Info. 781 const TargetTransformInfo *TTI; 782 783 /// Assumption Cache. 784 AssumptionCache *AC; 785 786 /// Interface to emit optimization remarks. 787 OptimizationRemarkEmitter *ORE; 788 789 /// LoopVersioning. It's only set up (non-null) if memchecks were 790 /// used. 791 /// 792 /// This is currently only used to add no-alias metadata based on the 793 /// memchecks. The actually versioning is performed manually. 794 std::unique_ptr<LoopVersioning> LVer; 795 796 /// The vectorization SIMD factor to use. Each vector will have this many 797 /// vector elements. 798 ElementCount VF; 799 800 /// The vectorization unroll factor to use. Each scalar is vectorized to this 801 /// many different vector instructions. 802 unsigned UF; 803 804 /// The builder that we use 805 IRBuilder<> Builder; 806 807 // --- Vectorization state --- 808 809 /// The vector-loop preheader. 810 BasicBlock *LoopVectorPreHeader; 811 812 /// The scalar-loop preheader. 813 BasicBlock *LoopScalarPreHeader; 814 815 /// Middle Block between the vector and the scalar. 816 BasicBlock *LoopMiddleBlock; 817 818 /// The (unique) ExitBlock of the scalar loop. Note that 819 /// there can be multiple exiting edges reaching this block. 820 BasicBlock *LoopExitBlock; 821 822 /// The vector loop body. 823 BasicBlock *LoopVectorBody; 824 825 /// The scalar loop body. 826 BasicBlock *LoopScalarBody; 827 828 /// A list of all bypass blocks. The first block is the entry of the loop. 829 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 830 831 /// The new Induction variable which was added to the new block. 832 PHINode *Induction = nullptr; 833 834 /// The induction variable of the old basic block. 835 PHINode *OldInduction = nullptr; 836 837 /// Store instructions that were predicated. 838 SmallVector<Instruction *, 4> PredicatedInstructions; 839 840 /// Trip count of the original loop. 841 Value *TripCount = nullptr; 842 843 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 844 Value *VectorTripCount = nullptr; 845 846 /// The legality analysis. 847 LoopVectorizationLegality *Legal; 848 849 /// The profitablity analysis. 850 LoopVectorizationCostModel *Cost; 851 852 // Record whether runtime checks are added. 853 bool AddedSafetyChecks = false; 854 855 // Holds the end values for each induction variable. We save the end values 856 // so we can later fix-up the external users of the induction variables. 857 DenseMap<PHINode *, Value *> IVEndValues; 858 859 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 860 // fixed up at the end of vector code generation. 861 SmallVector<PHINode *, 8> OrigPHIsToFix; 862 863 /// BFI and PSI are used to check for profile guided size optimizations. 864 BlockFrequencyInfo *BFI; 865 ProfileSummaryInfo *PSI; 866 867 // Whether this loop should be optimized for size based on profile guided size 868 // optimizatios. 869 bool OptForSizeBasedOnProfile; 870 871 /// Structure to hold information about generated runtime checks, responsible 872 /// for cleaning the checks, if vectorization turns out unprofitable. 873 GeneratedRTChecks &RTChecks; 874 }; 875 876 class InnerLoopUnroller : public InnerLoopVectorizer { 877 public: 878 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 879 LoopInfo *LI, DominatorTree *DT, 880 const TargetLibraryInfo *TLI, 881 const TargetTransformInfo *TTI, AssumptionCache *AC, 882 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 883 LoopVectorizationLegality *LVL, 884 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 885 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 886 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 887 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 888 BFI, PSI, Check) {} 889 890 private: 891 Value *getBroadcastInstrs(Value *V) override; 892 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 893 Instruction::BinaryOps Opcode = 894 Instruction::BinaryOpsEnd) override; 895 Value *reverseVector(Value *Vec) override; 896 }; 897 898 /// Encapsulate information regarding vectorization of a loop and its epilogue. 899 /// This information is meant to be updated and used across two stages of 900 /// epilogue vectorization. 901 struct EpilogueLoopVectorizationInfo { 902 ElementCount MainLoopVF = ElementCount::getFixed(0); 903 unsigned MainLoopUF = 0; 904 ElementCount EpilogueVF = ElementCount::getFixed(0); 905 unsigned EpilogueUF = 0; 906 BasicBlock *MainLoopIterationCountCheck = nullptr; 907 BasicBlock *EpilogueIterationCountCheck = nullptr; 908 BasicBlock *SCEVSafetyCheck = nullptr; 909 BasicBlock *MemSafetyCheck = nullptr; 910 Value *TripCount = nullptr; 911 Value *VectorTripCount = nullptr; 912 913 EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF, 914 unsigned EUF) 915 : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF), 916 EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) { 917 assert(EUF == 1 && 918 "A high UF for the epilogue loop is likely not beneficial."); 919 } 920 }; 921 922 /// An extension of the inner loop vectorizer that creates a skeleton for a 923 /// vectorized loop that has its epilogue (residual) also vectorized. 924 /// The idea is to run the vplan on a given loop twice, firstly to setup the 925 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 926 /// from the first step and vectorize the epilogue. This is achieved by 927 /// deriving two concrete strategy classes from this base class and invoking 928 /// them in succession from the loop vectorizer planner. 929 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 930 public: 931 InnerLoopAndEpilogueVectorizer( 932 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 933 DominatorTree *DT, const TargetLibraryInfo *TLI, 934 const TargetTransformInfo *TTI, AssumptionCache *AC, 935 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 936 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 937 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 938 GeneratedRTChecks &Checks) 939 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 940 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 941 Checks), 942 EPI(EPI) {} 943 944 // Override this function to handle the more complex control flow around the 945 // three loops. 946 BasicBlock *createVectorizedLoopSkeleton() final override { 947 return createEpilogueVectorizedLoopSkeleton(); 948 } 949 950 /// The interface for creating a vectorized skeleton using one of two 951 /// different strategies, each corresponding to one execution of the vplan 952 /// as described above. 953 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 954 955 /// Holds and updates state information required to vectorize the main loop 956 /// and its epilogue in two separate passes. This setup helps us avoid 957 /// regenerating and recomputing runtime safety checks. It also helps us to 958 /// shorten the iteration-count-check path length for the cases where the 959 /// iteration count of the loop is so small that the main vector loop is 960 /// completely skipped. 961 EpilogueLoopVectorizationInfo &EPI; 962 }; 963 964 /// A specialized derived class of inner loop vectorizer that performs 965 /// vectorization of *main* loops in the process of vectorizing loops and their 966 /// epilogues. 967 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 968 public: 969 EpilogueVectorizerMainLoop( 970 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 971 DominatorTree *DT, const TargetLibraryInfo *TLI, 972 const TargetTransformInfo *TTI, AssumptionCache *AC, 973 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 974 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 975 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 976 GeneratedRTChecks &Check) 977 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 978 EPI, LVL, CM, BFI, PSI, Check) {} 979 /// Implements the interface for creating a vectorized skeleton using the 980 /// *main loop* strategy (ie the first pass of vplan execution). 981 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 982 983 protected: 984 /// Emits an iteration count bypass check once for the main loop (when \p 985 /// ForEpilogue is false) and once for the epilogue loop (when \p 986 /// ForEpilogue is true). 987 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 988 bool ForEpilogue); 989 void printDebugTracesAtStart() override; 990 void printDebugTracesAtEnd() override; 991 }; 992 993 // A specialized derived class of inner loop vectorizer that performs 994 // vectorization of *epilogue* loops in the process of vectorizing loops and 995 // their epilogues. 996 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 997 public: 998 EpilogueVectorizerEpilogueLoop( 999 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 1000 DominatorTree *DT, const TargetLibraryInfo *TLI, 1001 const TargetTransformInfo *TTI, AssumptionCache *AC, 1002 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 1003 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 1004 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 1005 GeneratedRTChecks &Checks) 1006 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1007 EPI, LVL, CM, BFI, PSI, Checks) {} 1008 /// Implements the interface for creating a vectorized skeleton using the 1009 /// *epilogue loop* strategy (ie the second pass of vplan execution). 1010 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1011 1012 protected: 1013 /// Emits an iteration count bypass check after the main vector loop has 1014 /// finished to see if there are any iterations left to execute by either 1015 /// the vector epilogue or the scalar epilogue. 1016 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1017 BasicBlock *Bypass, 1018 BasicBlock *Insert); 1019 void printDebugTracesAtStart() override; 1020 void printDebugTracesAtEnd() override; 1021 }; 1022 } // end namespace llvm 1023 1024 /// Look for a meaningful debug location on the instruction or it's 1025 /// operands. 1026 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1027 if (!I) 1028 return I; 1029 1030 DebugLoc Empty; 1031 if (I->getDebugLoc() != Empty) 1032 return I; 1033 1034 for (Use &Op : I->operands()) { 1035 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1036 if (OpInst->getDebugLoc() != Empty) 1037 return OpInst; 1038 } 1039 1040 return I; 1041 } 1042 1043 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 1044 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 1045 const DILocation *DIL = Inst->getDebugLoc(); 1046 1047 // When a FSDiscriminator is enabled, we don't need to add the multiply 1048 // factors to the discriminators. 1049 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1050 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 1051 // FIXME: For scalable vectors, assume vscale=1. 1052 auto NewDIL = 1053 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1054 if (NewDIL) 1055 B.SetCurrentDebugLocation(NewDIL.getValue()); 1056 else 1057 LLVM_DEBUG(dbgs() 1058 << "Failed to create new discriminator: " 1059 << DIL->getFilename() << " Line: " << DIL->getLine()); 1060 } else 1061 B.SetCurrentDebugLocation(DIL); 1062 } else 1063 B.SetCurrentDebugLocation(DebugLoc()); 1064 } 1065 1066 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 1067 /// is passed, the message relates to that particular instruction. 1068 #ifndef NDEBUG 1069 static void debugVectorizationMessage(const StringRef Prefix, 1070 const StringRef DebugMsg, 1071 Instruction *I) { 1072 dbgs() << "LV: " << Prefix << DebugMsg; 1073 if (I != nullptr) 1074 dbgs() << " " << *I; 1075 else 1076 dbgs() << '.'; 1077 dbgs() << '\n'; 1078 } 1079 #endif 1080 1081 /// Create an analysis remark that explains why vectorization failed 1082 /// 1083 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1084 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1085 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1086 /// the location of the remark. \return the remark object that can be 1087 /// streamed to. 1088 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1089 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1090 Value *CodeRegion = TheLoop->getHeader(); 1091 DebugLoc DL = TheLoop->getStartLoc(); 1092 1093 if (I) { 1094 CodeRegion = I->getParent(); 1095 // If there is no debug location attached to the instruction, revert back to 1096 // using the loop's. 1097 if (I->getDebugLoc()) 1098 DL = I->getDebugLoc(); 1099 } 1100 1101 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1102 } 1103 1104 /// Return a value for Step multiplied by VF. 1105 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) { 1106 assert(isa<ConstantInt>(Step) && "Expected an integer step"); 1107 Constant *StepVal = ConstantInt::get( 1108 Step->getType(), 1109 cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue()); 1110 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1111 } 1112 1113 namespace llvm { 1114 1115 /// Return the runtime value for VF. 1116 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) { 1117 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1118 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1119 } 1120 1121 void reportVectorizationFailure(const StringRef DebugMsg, 1122 const StringRef OREMsg, const StringRef ORETag, 1123 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1124 Instruction *I) { 1125 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1126 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1127 ORE->emit( 1128 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1129 << "loop not vectorized: " << OREMsg); 1130 } 1131 1132 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1133 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1134 Instruction *I) { 1135 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1136 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1137 ORE->emit( 1138 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1139 << Msg); 1140 } 1141 1142 } // end namespace llvm 1143 1144 #ifndef NDEBUG 1145 /// \return string containing a file name and a line # for the given loop. 1146 static std::string getDebugLocString(const Loop *L) { 1147 std::string Result; 1148 if (L) { 1149 raw_string_ostream OS(Result); 1150 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1151 LoopDbgLoc.print(OS); 1152 else 1153 // Just print the module name. 1154 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1155 OS.flush(); 1156 } 1157 return Result; 1158 } 1159 #endif 1160 1161 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1162 const Instruction *Orig) { 1163 // If the loop was versioned with memchecks, add the corresponding no-alias 1164 // metadata. 1165 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1166 LVer->annotateInstWithNoAlias(To, Orig); 1167 } 1168 1169 void InnerLoopVectorizer::addMetadata(Instruction *To, 1170 Instruction *From) { 1171 propagateMetadata(To, From); 1172 addNewMetadata(To, From); 1173 } 1174 1175 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1176 Instruction *From) { 1177 for (Value *V : To) { 1178 if (Instruction *I = dyn_cast<Instruction>(V)) 1179 addMetadata(I, From); 1180 } 1181 } 1182 1183 namespace llvm { 1184 1185 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1186 // lowered. 1187 enum ScalarEpilogueLowering { 1188 1189 // The default: allowing scalar epilogues. 1190 CM_ScalarEpilogueAllowed, 1191 1192 // Vectorization with OptForSize: don't allow epilogues. 1193 CM_ScalarEpilogueNotAllowedOptSize, 1194 1195 // A special case of vectorisation with OptForSize: loops with a very small 1196 // trip count are considered for vectorization under OptForSize, thereby 1197 // making sure the cost of their loop body is dominant, free of runtime 1198 // guards and scalar iteration overheads. 1199 CM_ScalarEpilogueNotAllowedLowTripLoop, 1200 1201 // Loop hint predicate indicating an epilogue is undesired. 1202 CM_ScalarEpilogueNotNeededUsePredicate, 1203 1204 // Directive indicating we must either tail fold or not vectorize 1205 CM_ScalarEpilogueNotAllowedUsePredicate 1206 }; 1207 1208 /// ElementCountComparator creates a total ordering for ElementCount 1209 /// for the purposes of using it in a set structure. 1210 struct ElementCountComparator { 1211 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1212 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1213 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1214 } 1215 }; 1216 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1217 1218 /// LoopVectorizationCostModel - estimates the expected speedups due to 1219 /// vectorization. 1220 /// In many cases vectorization is not profitable. This can happen because of 1221 /// a number of reasons. In this class we mainly attempt to predict the 1222 /// expected speedup/slowdowns due to the supported instruction set. We use the 1223 /// TargetTransformInfo to query the different backends for the cost of 1224 /// different operations. 1225 class LoopVectorizationCostModel { 1226 public: 1227 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1228 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1229 LoopVectorizationLegality *Legal, 1230 const TargetTransformInfo &TTI, 1231 const TargetLibraryInfo *TLI, DemandedBits *DB, 1232 AssumptionCache *AC, 1233 OptimizationRemarkEmitter *ORE, const Function *F, 1234 const LoopVectorizeHints *Hints, 1235 InterleavedAccessInfo &IAI) 1236 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1237 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1238 Hints(Hints), InterleaveInfo(IAI) {} 1239 1240 /// \return An upper bound for the vectorization factors (both fixed and 1241 /// scalable). If the factors are 0, vectorization and interleaving should be 1242 /// avoided up front. 1243 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1244 1245 /// \return True if runtime checks are required for vectorization, and false 1246 /// otherwise. 1247 bool runtimeChecksRequired(); 1248 1249 /// \return The most profitable vectorization factor and the cost of that VF. 1250 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1251 /// then this vectorization factor will be selected if vectorization is 1252 /// possible. 1253 VectorizationFactor 1254 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1255 1256 VectorizationFactor 1257 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1258 const LoopVectorizationPlanner &LVP); 1259 1260 /// Setup cost-based decisions for user vectorization factor. 1261 void selectUserVectorizationFactor(ElementCount UserVF) { 1262 collectUniformsAndScalars(UserVF); 1263 collectInstsToScalarize(UserVF); 1264 } 1265 1266 /// \return The size (in bits) of the smallest and widest types in the code 1267 /// that needs to be vectorized. We ignore values that remain scalar such as 1268 /// 64 bit loop indices. 1269 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1270 1271 /// \return The desired interleave count. 1272 /// If interleave count has been specified by metadata it will be returned. 1273 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1274 /// are the selected vectorization factor and the cost of the selected VF. 1275 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1276 1277 /// Memory access instruction may be vectorized in more than one way. 1278 /// Form of instruction after vectorization depends on cost. 1279 /// This function takes cost-based decisions for Load/Store instructions 1280 /// and collects them in a map. This decisions map is used for building 1281 /// the lists of loop-uniform and loop-scalar instructions. 1282 /// The calculated cost is saved with widening decision in order to 1283 /// avoid redundant calculations. 1284 void setCostBasedWideningDecision(ElementCount VF); 1285 1286 /// A struct that represents some properties of the register usage 1287 /// of a loop. 1288 struct RegisterUsage { 1289 /// Holds the number of loop invariant values that are used in the loop. 1290 /// The key is ClassID of target-provided register class. 1291 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1292 /// Holds the maximum number of concurrent live intervals in the loop. 1293 /// The key is ClassID of target-provided register class. 1294 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1295 }; 1296 1297 /// \return Returns information about the register usages of the loop for the 1298 /// given vectorization factors. 1299 SmallVector<RegisterUsage, 8> 1300 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1301 1302 /// Collect values we want to ignore in the cost model. 1303 void collectValuesToIgnore(); 1304 1305 /// Split reductions into those that happen in the loop, and those that happen 1306 /// outside. In loop reductions are collected into InLoopReductionChains. 1307 void collectInLoopReductions(); 1308 1309 /// \returns The smallest bitwidth each instruction can be represented with. 1310 /// The vector equivalents of these instructions should be truncated to this 1311 /// type. 1312 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1313 return MinBWs; 1314 } 1315 1316 /// \returns True if it is more profitable to scalarize instruction \p I for 1317 /// vectorization factor \p VF. 1318 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1319 assert(VF.isVector() && 1320 "Profitable to scalarize relevant only for VF > 1."); 1321 1322 // Cost model is not run in the VPlan-native path - return conservative 1323 // result until this changes. 1324 if (EnableVPlanNativePath) 1325 return false; 1326 1327 auto Scalars = InstsToScalarize.find(VF); 1328 assert(Scalars != InstsToScalarize.end() && 1329 "VF not yet analyzed for scalarization profitability"); 1330 return Scalars->second.find(I) != Scalars->second.end(); 1331 } 1332 1333 /// Returns true if \p I is known to be uniform after vectorization. 1334 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1335 if (VF.isScalar()) 1336 return true; 1337 1338 // Cost model is not run in the VPlan-native path - return conservative 1339 // result until this changes. 1340 if (EnableVPlanNativePath) 1341 return false; 1342 1343 auto UniformsPerVF = Uniforms.find(VF); 1344 assert(UniformsPerVF != Uniforms.end() && 1345 "VF not yet analyzed for uniformity"); 1346 return UniformsPerVF->second.count(I); 1347 } 1348 1349 /// Returns true if \p I is known to be scalar after vectorization. 1350 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1351 if (VF.isScalar()) 1352 return true; 1353 1354 // Cost model is not run in the VPlan-native path - return conservative 1355 // result until this changes. 1356 if (EnableVPlanNativePath) 1357 return false; 1358 1359 auto ScalarsPerVF = Scalars.find(VF); 1360 assert(ScalarsPerVF != Scalars.end() && 1361 "Scalar values are not calculated for VF"); 1362 return ScalarsPerVF->second.count(I); 1363 } 1364 1365 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1366 /// for vectorization factor \p VF. 1367 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1368 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1369 !isProfitableToScalarize(I, VF) && 1370 !isScalarAfterVectorization(I, VF); 1371 } 1372 1373 /// Decision that was taken during cost calculation for memory instruction. 1374 enum InstWidening { 1375 CM_Unknown, 1376 CM_Widen, // For consecutive accesses with stride +1. 1377 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1378 CM_Interleave, 1379 CM_GatherScatter, 1380 CM_Scalarize 1381 }; 1382 1383 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1384 /// instruction \p I and vector width \p VF. 1385 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1386 InstructionCost Cost) { 1387 assert(VF.isVector() && "Expected VF >=2"); 1388 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1389 } 1390 1391 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1392 /// interleaving group \p Grp and vector width \p VF. 1393 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1394 ElementCount VF, InstWidening W, 1395 InstructionCost Cost) { 1396 assert(VF.isVector() && "Expected VF >=2"); 1397 /// Broadcast this decicion to all instructions inside the group. 1398 /// But the cost will be assigned to one instruction only. 1399 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1400 if (auto *I = Grp->getMember(i)) { 1401 if (Grp->getInsertPos() == I) 1402 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1403 else 1404 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1405 } 1406 } 1407 } 1408 1409 /// Return the cost model decision for the given instruction \p I and vector 1410 /// width \p VF. Return CM_Unknown if this instruction did not pass 1411 /// through the cost modeling. 1412 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1413 assert(VF.isVector() && "Expected VF to be a vector VF"); 1414 // Cost model is not run in the VPlan-native path - return conservative 1415 // result until this changes. 1416 if (EnableVPlanNativePath) 1417 return CM_GatherScatter; 1418 1419 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1420 auto Itr = WideningDecisions.find(InstOnVF); 1421 if (Itr == WideningDecisions.end()) 1422 return CM_Unknown; 1423 return Itr->second.first; 1424 } 1425 1426 /// Return the vectorization cost for the given instruction \p I and vector 1427 /// width \p VF. 1428 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1429 assert(VF.isVector() && "Expected VF >=2"); 1430 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1431 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1432 "The cost is not calculated"); 1433 return WideningDecisions[InstOnVF].second; 1434 } 1435 1436 /// Return True if instruction \p I is an optimizable truncate whose operand 1437 /// is an induction variable. Such a truncate will be removed by adding a new 1438 /// induction variable with the destination type. 1439 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1440 // If the instruction is not a truncate, return false. 1441 auto *Trunc = dyn_cast<TruncInst>(I); 1442 if (!Trunc) 1443 return false; 1444 1445 // Get the source and destination types of the truncate. 1446 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1447 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1448 1449 // If the truncate is free for the given types, return false. Replacing a 1450 // free truncate with an induction variable would add an induction variable 1451 // update instruction to each iteration of the loop. We exclude from this 1452 // check the primary induction variable since it will need an update 1453 // instruction regardless. 1454 Value *Op = Trunc->getOperand(0); 1455 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1456 return false; 1457 1458 // If the truncated value is not an induction variable, return false. 1459 return Legal->isInductionPhi(Op); 1460 } 1461 1462 /// Collects the instructions to scalarize for each predicated instruction in 1463 /// the loop. 1464 void collectInstsToScalarize(ElementCount VF); 1465 1466 /// Collect Uniform and Scalar values for the given \p VF. 1467 /// The sets depend on CM decision for Load/Store instructions 1468 /// that may be vectorized as interleave, gather-scatter or scalarized. 1469 void collectUniformsAndScalars(ElementCount VF) { 1470 // Do the analysis once. 1471 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1472 return; 1473 setCostBasedWideningDecision(VF); 1474 collectLoopUniforms(VF); 1475 collectLoopScalars(VF); 1476 } 1477 1478 /// Returns true if the target machine supports masked store operation 1479 /// for the given \p DataType and kind of access to \p Ptr. 1480 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1481 return Legal->isConsecutivePtr(Ptr) && 1482 TTI.isLegalMaskedStore(DataType, Alignment); 1483 } 1484 1485 /// Returns true if the target machine supports masked load operation 1486 /// for the given \p DataType and kind of access to \p Ptr. 1487 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1488 return Legal->isConsecutivePtr(Ptr) && 1489 TTI.isLegalMaskedLoad(DataType, Alignment); 1490 } 1491 1492 /// Returns true if the target machine can represent \p V as a masked gather 1493 /// or scatter operation. 1494 bool isLegalGatherOrScatter(Value *V) { 1495 bool LI = isa<LoadInst>(V); 1496 bool SI = isa<StoreInst>(V); 1497 if (!LI && !SI) 1498 return false; 1499 auto *Ty = getLoadStoreType(V); 1500 Align Align = getLoadStoreAlignment(V); 1501 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1502 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1503 } 1504 1505 /// Returns true if the target machine supports all of the reduction 1506 /// variables found for the given VF. 1507 bool canVectorizeReductions(ElementCount VF) { 1508 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1509 RecurrenceDescriptor RdxDesc = Reduction.second; 1510 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1511 })); 1512 } 1513 1514 /// Returns true if \p I is an instruction that will be scalarized with 1515 /// predication. Such instructions include conditional stores and 1516 /// instructions that may divide by zero. 1517 /// If a non-zero VF has been calculated, we check if I will be scalarized 1518 /// predication for that VF. 1519 bool isScalarWithPredication(Instruction *I) const; 1520 1521 // Returns true if \p I is an instruction that will be predicated either 1522 // through scalar predication or masked load/store or masked gather/scatter. 1523 // Superset of instructions that return true for isScalarWithPredication. 1524 bool isPredicatedInst(Instruction *I) { 1525 if (!blockNeedsPredication(I->getParent())) 1526 return false; 1527 // Loads and stores that need some form of masked operation are predicated 1528 // instructions. 1529 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1530 return Legal->isMaskRequired(I); 1531 return isScalarWithPredication(I); 1532 } 1533 1534 /// Returns true if \p I is a memory instruction with consecutive memory 1535 /// access that can be widened. 1536 bool 1537 memoryInstructionCanBeWidened(Instruction *I, 1538 ElementCount VF = ElementCount::getFixed(1)); 1539 1540 /// Returns true if \p I is a memory instruction in an interleaved-group 1541 /// of memory accesses that can be vectorized with wide vector loads/stores 1542 /// and shuffles. 1543 bool 1544 interleavedAccessCanBeWidened(Instruction *I, 1545 ElementCount VF = ElementCount::getFixed(1)); 1546 1547 /// Check if \p Instr belongs to any interleaved access group. 1548 bool isAccessInterleaved(Instruction *Instr) { 1549 return InterleaveInfo.isInterleaved(Instr); 1550 } 1551 1552 /// Get the interleaved access group that \p Instr belongs to. 1553 const InterleaveGroup<Instruction> * 1554 getInterleavedAccessGroup(Instruction *Instr) { 1555 return InterleaveInfo.getInterleaveGroup(Instr); 1556 } 1557 1558 /// Returns true if we're required to use a scalar epilogue for at least 1559 /// the final iteration of the original loop. 1560 bool requiresScalarEpilogue() const { 1561 if (!isScalarEpilogueAllowed()) 1562 return false; 1563 // If we might exit from anywhere but the latch, must run the exiting 1564 // iteration in scalar form. 1565 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1566 return true; 1567 return InterleaveInfo.requiresScalarEpilogue(); 1568 } 1569 1570 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1571 /// loop hint annotation. 1572 bool isScalarEpilogueAllowed() const { 1573 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1574 } 1575 1576 /// Returns true if all loop blocks should be masked to fold tail loop. 1577 bool foldTailByMasking() const { return FoldTailByMasking; } 1578 1579 bool blockNeedsPredication(BasicBlock *BB) const { 1580 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1581 } 1582 1583 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1584 /// nodes to the chain of instructions representing the reductions. Uses a 1585 /// MapVector to ensure deterministic iteration order. 1586 using ReductionChainMap = 1587 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1588 1589 /// Return the chain of instructions representing an inloop reduction. 1590 const ReductionChainMap &getInLoopReductionChains() const { 1591 return InLoopReductionChains; 1592 } 1593 1594 /// Returns true if the Phi is part of an inloop reduction. 1595 bool isInLoopReduction(PHINode *Phi) const { 1596 return InLoopReductionChains.count(Phi); 1597 } 1598 1599 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1600 /// with factor VF. Return the cost of the instruction, including 1601 /// scalarization overhead if it's needed. 1602 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1603 1604 /// Estimate cost of a call instruction CI if it were vectorized with factor 1605 /// VF. Return the cost of the instruction, including scalarization overhead 1606 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1607 /// scalarized - 1608 /// i.e. either vector version isn't available, or is too expensive. 1609 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1610 bool &NeedToScalarize) const; 1611 1612 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1613 /// that of B. 1614 bool isMoreProfitable(const VectorizationFactor &A, 1615 const VectorizationFactor &B) const; 1616 1617 /// Invalidates decisions already taken by the cost model. 1618 void invalidateCostModelingDecisions() { 1619 WideningDecisions.clear(); 1620 Uniforms.clear(); 1621 Scalars.clear(); 1622 } 1623 1624 private: 1625 unsigned NumPredStores = 0; 1626 1627 /// \return An upper bound for the vectorization factors for both 1628 /// fixed and scalable vectorization, where the minimum-known number of 1629 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1630 /// disabled or unsupported, then the scalable part will be equal to 1631 /// ElementCount::getScalable(0). 1632 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1633 ElementCount UserVF); 1634 1635 /// \return the maximized element count based on the targets vector 1636 /// registers and the loop trip-count, but limited to a maximum safe VF. 1637 /// This is a helper function of computeFeasibleMaxVF. 1638 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1639 /// issue that occurred on one of the buildbots which cannot be reproduced 1640 /// without having access to the properietary compiler (see comments on 1641 /// D98509). The issue is currently under investigation and this workaround 1642 /// will be removed as soon as possible. 1643 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1644 unsigned SmallestType, 1645 unsigned WidestType, 1646 const ElementCount &MaxSafeVF); 1647 1648 /// \return the maximum legal scalable VF, based on the safe max number 1649 /// of elements. 1650 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1651 1652 /// The vectorization cost is a combination of the cost itself and a boolean 1653 /// indicating whether any of the contributing operations will actually 1654 /// operate on 1655 /// vector values after type legalization in the backend. If this latter value 1656 /// is 1657 /// false, then all operations will be scalarized (i.e. no vectorization has 1658 /// actually taken place). 1659 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1660 1661 /// Returns the expected execution cost. The unit of the cost does 1662 /// not matter because we use the 'cost' units to compare different 1663 /// vector widths. The cost that is returned is *not* normalized by 1664 /// the factor width. 1665 VectorizationCostTy expectedCost(ElementCount VF); 1666 1667 /// Returns the execution time cost of an instruction for a given vector 1668 /// width. Vector width of one means scalar. 1669 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1670 1671 /// The cost-computation logic from getInstructionCost which provides 1672 /// the vector type as an output parameter. 1673 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1674 Type *&VectorTy); 1675 1676 /// Return the cost of instructions in an inloop reduction pattern, if I is 1677 /// part of that pattern. 1678 InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF, 1679 Type *VectorTy, 1680 TTI::TargetCostKind CostKind); 1681 1682 /// Calculate vectorization cost of memory instruction \p I. 1683 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1684 1685 /// The cost computation for scalarized memory instruction. 1686 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1687 1688 /// The cost computation for interleaving group of memory instructions. 1689 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1690 1691 /// The cost computation for Gather/Scatter instruction. 1692 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1693 1694 /// The cost computation for widening instruction \p I with consecutive 1695 /// memory access. 1696 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1697 1698 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1699 /// Load: scalar load + broadcast. 1700 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1701 /// element) 1702 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1703 1704 /// Estimate the overhead of scalarizing an instruction. This is a 1705 /// convenience wrapper for the type-based getScalarizationOverhead API. 1706 InstructionCost getScalarizationOverhead(Instruction *I, 1707 ElementCount VF) const; 1708 1709 /// Returns whether the instruction is a load or store and will be a emitted 1710 /// as a vector operation. 1711 bool isConsecutiveLoadOrStore(Instruction *I); 1712 1713 /// Returns true if an artificially high cost for emulated masked memrefs 1714 /// should be used. 1715 bool useEmulatedMaskMemRefHack(Instruction *I); 1716 1717 /// Map of scalar integer values to the smallest bitwidth they can be legally 1718 /// represented as. The vector equivalents of these values should be truncated 1719 /// to this type. 1720 MapVector<Instruction *, uint64_t> MinBWs; 1721 1722 /// A type representing the costs for instructions if they were to be 1723 /// scalarized rather than vectorized. The entries are Instruction-Cost 1724 /// pairs. 1725 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1726 1727 /// A set containing all BasicBlocks that are known to present after 1728 /// vectorization as a predicated block. 1729 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1730 1731 /// Records whether it is allowed to have the original scalar loop execute at 1732 /// least once. This may be needed as a fallback loop in case runtime 1733 /// aliasing/dependence checks fail, or to handle the tail/remainder 1734 /// iterations when the trip count is unknown or doesn't divide by the VF, 1735 /// or as a peel-loop to handle gaps in interleave-groups. 1736 /// Under optsize and when the trip count is very small we don't allow any 1737 /// iterations to execute in the scalar loop. 1738 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1739 1740 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1741 bool FoldTailByMasking = false; 1742 1743 /// A map holding scalar costs for different vectorization factors. The 1744 /// presence of a cost for an instruction in the mapping indicates that the 1745 /// instruction will be scalarized when vectorizing with the associated 1746 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1747 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1748 1749 /// Holds the instructions known to be uniform after vectorization. 1750 /// The data is collected per VF. 1751 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1752 1753 /// Holds the instructions known to be scalar after vectorization. 1754 /// The data is collected per VF. 1755 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1756 1757 /// Holds the instructions (address computations) that are forced to be 1758 /// scalarized. 1759 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1760 1761 /// PHINodes of the reductions that should be expanded in-loop along with 1762 /// their associated chains of reduction operations, in program order from top 1763 /// (PHI) to bottom 1764 ReductionChainMap InLoopReductionChains; 1765 1766 /// A Map of inloop reduction operations and their immediate chain operand. 1767 /// FIXME: This can be removed once reductions can be costed correctly in 1768 /// vplan. This was added to allow quick lookup to the inloop operations, 1769 /// without having to loop through InLoopReductionChains. 1770 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1771 1772 /// Returns the expected difference in cost from scalarizing the expression 1773 /// feeding a predicated instruction \p PredInst. The instructions to 1774 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1775 /// non-negative return value implies the expression will be scalarized. 1776 /// Currently, only single-use chains are considered for scalarization. 1777 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1778 ElementCount VF); 1779 1780 /// Collect the instructions that are uniform after vectorization. An 1781 /// instruction is uniform if we represent it with a single scalar value in 1782 /// the vectorized loop corresponding to each vector iteration. Examples of 1783 /// uniform instructions include pointer operands of consecutive or 1784 /// interleaved memory accesses. Note that although uniformity implies an 1785 /// instruction will be scalar, the reverse is not true. In general, a 1786 /// scalarized instruction will be represented by VF scalar values in the 1787 /// vectorized loop, each corresponding to an iteration of the original 1788 /// scalar loop. 1789 void collectLoopUniforms(ElementCount VF); 1790 1791 /// Collect the instructions that are scalar after vectorization. An 1792 /// instruction is scalar if it is known to be uniform or will be scalarized 1793 /// during vectorization. Non-uniform scalarized instructions will be 1794 /// represented by VF values in the vectorized loop, each corresponding to an 1795 /// iteration of the original scalar loop. 1796 void collectLoopScalars(ElementCount VF); 1797 1798 /// Keeps cost model vectorization decision and cost for instructions. 1799 /// Right now it is used for memory instructions only. 1800 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1801 std::pair<InstWidening, InstructionCost>>; 1802 1803 DecisionList WideningDecisions; 1804 1805 /// Returns true if \p V is expected to be vectorized and it needs to be 1806 /// extracted. 1807 bool needsExtract(Value *V, ElementCount VF) const { 1808 Instruction *I = dyn_cast<Instruction>(V); 1809 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1810 TheLoop->isLoopInvariant(I)) 1811 return false; 1812 1813 // Assume we can vectorize V (and hence we need extraction) if the 1814 // scalars are not computed yet. This can happen, because it is called 1815 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1816 // the scalars are collected. That should be a safe assumption in most 1817 // cases, because we check if the operands have vectorizable types 1818 // beforehand in LoopVectorizationLegality. 1819 return Scalars.find(VF) == Scalars.end() || 1820 !isScalarAfterVectorization(I, VF); 1821 }; 1822 1823 /// Returns a range containing only operands needing to be extracted. 1824 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1825 ElementCount VF) const { 1826 return SmallVector<Value *, 4>(make_filter_range( 1827 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1828 } 1829 1830 /// Determines if we have the infrastructure to vectorize loop \p L and its 1831 /// epilogue, assuming the main loop is vectorized by \p VF. 1832 bool isCandidateForEpilogueVectorization(const Loop &L, 1833 const ElementCount VF) const; 1834 1835 /// Returns true if epilogue vectorization is considered profitable, and 1836 /// false otherwise. 1837 /// \p VF is the vectorization factor chosen for the original loop. 1838 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1839 1840 public: 1841 /// The loop that we evaluate. 1842 Loop *TheLoop; 1843 1844 /// Predicated scalar evolution analysis. 1845 PredicatedScalarEvolution &PSE; 1846 1847 /// Loop Info analysis. 1848 LoopInfo *LI; 1849 1850 /// Vectorization legality. 1851 LoopVectorizationLegality *Legal; 1852 1853 /// Vector target information. 1854 const TargetTransformInfo &TTI; 1855 1856 /// Target Library Info. 1857 const TargetLibraryInfo *TLI; 1858 1859 /// Demanded bits analysis. 1860 DemandedBits *DB; 1861 1862 /// Assumption cache. 1863 AssumptionCache *AC; 1864 1865 /// Interface to emit optimization remarks. 1866 OptimizationRemarkEmitter *ORE; 1867 1868 const Function *TheFunction; 1869 1870 /// Loop Vectorize Hint. 1871 const LoopVectorizeHints *Hints; 1872 1873 /// The interleave access information contains groups of interleaved accesses 1874 /// with the same stride and close to each other. 1875 InterleavedAccessInfo &InterleaveInfo; 1876 1877 /// Values to ignore in the cost model. 1878 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1879 1880 /// Values to ignore in the cost model when VF > 1. 1881 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1882 1883 /// Profitable vector factors. 1884 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1885 }; 1886 } // end namespace llvm 1887 1888 /// Helper struct to manage generating runtime checks for vectorization. 1889 /// 1890 /// The runtime checks are created up-front in temporary blocks to allow better 1891 /// estimating the cost and un-linked from the existing IR. After deciding to 1892 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1893 /// temporary blocks are completely removed. 1894 class GeneratedRTChecks { 1895 /// Basic block which contains the generated SCEV checks, if any. 1896 BasicBlock *SCEVCheckBlock = nullptr; 1897 1898 /// The value representing the result of the generated SCEV checks. If it is 1899 /// nullptr, either no SCEV checks have been generated or they have been used. 1900 Value *SCEVCheckCond = nullptr; 1901 1902 /// Basic block which contains the generated memory runtime checks, if any. 1903 BasicBlock *MemCheckBlock = nullptr; 1904 1905 /// The value representing the result of the generated memory runtime checks. 1906 /// If it is nullptr, either no memory runtime checks have been generated or 1907 /// they have been used. 1908 Instruction *MemRuntimeCheckCond = nullptr; 1909 1910 DominatorTree *DT; 1911 LoopInfo *LI; 1912 1913 SCEVExpander SCEVExp; 1914 SCEVExpander MemCheckExp; 1915 1916 public: 1917 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1918 const DataLayout &DL) 1919 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1920 MemCheckExp(SE, DL, "scev.check") {} 1921 1922 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1923 /// accurately estimate the cost of the runtime checks. The blocks are 1924 /// un-linked from the IR and is added back during vector code generation. If 1925 /// there is no vector code generation, the check blocks are removed 1926 /// completely. 1927 void Create(Loop *L, const LoopAccessInfo &LAI, 1928 const SCEVUnionPredicate &UnionPred) { 1929 1930 BasicBlock *LoopHeader = L->getHeader(); 1931 BasicBlock *Preheader = L->getLoopPreheader(); 1932 1933 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1934 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1935 // may be used by SCEVExpander. The blocks will be un-linked from their 1936 // predecessors and removed from LI & DT at the end of the function. 1937 if (!UnionPred.isAlwaysTrue()) { 1938 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1939 nullptr, "vector.scevcheck"); 1940 1941 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1942 &UnionPred, SCEVCheckBlock->getTerminator()); 1943 } 1944 1945 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1946 if (RtPtrChecking.Need) { 1947 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1948 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1949 "vector.memcheck"); 1950 1951 std::tie(std::ignore, MemRuntimeCheckCond) = 1952 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 1953 RtPtrChecking.getChecks(), MemCheckExp); 1954 assert(MemRuntimeCheckCond && 1955 "no RT checks generated although RtPtrChecking " 1956 "claimed checks are required"); 1957 } 1958 1959 if (!MemCheckBlock && !SCEVCheckBlock) 1960 return; 1961 1962 // Unhook the temporary block with the checks, update various places 1963 // accordingly. 1964 if (SCEVCheckBlock) 1965 SCEVCheckBlock->replaceAllUsesWith(Preheader); 1966 if (MemCheckBlock) 1967 MemCheckBlock->replaceAllUsesWith(Preheader); 1968 1969 if (SCEVCheckBlock) { 1970 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1971 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 1972 Preheader->getTerminator()->eraseFromParent(); 1973 } 1974 if (MemCheckBlock) { 1975 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1976 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 1977 Preheader->getTerminator()->eraseFromParent(); 1978 } 1979 1980 DT->changeImmediateDominator(LoopHeader, Preheader); 1981 if (MemCheckBlock) { 1982 DT->eraseNode(MemCheckBlock); 1983 LI->removeBlock(MemCheckBlock); 1984 } 1985 if (SCEVCheckBlock) { 1986 DT->eraseNode(SCEVCheckBlock); 1987 LI->removeBlock(SCEVCheckBlock); 1988 } 1989 } 1990 1991 /// Remove the created SCEV & memory runtime check blocks & instructions, if 1992 /// unused. 1993 ~GeneratedRTChecks() { 1994 SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT); 1995 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT); 1996 if (!SCEVCheckCond) 1997 SCEVCleaner.markResultUsed(); 1998 1999 if (!MemRuntimeCheckCond) 2000 MemCheckCleaner.markResultUsed(); 2001 2002 if (MemRuntimeCheckCond) { 2003 auto &SE = *MemCheckExp.getSE(); 2004 // Memory runtime check generation creates compares that use expanded 2005 // values. Remove them before running the SCEVExpanderCleaners. 2006 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2007 if (MemCheckExp.isInsertedInstruction(&I)) 2008 continue; 2009 SE.forgetValue(&I); 2010 SE.eraseValueFromMap(&I); 2011 I.eraseFromParent(); 2012 } 2013 } 2014 MemCheckCleaner.cleanup(); 2015 SCEVCleaner.cleanup(); 2016 2017 if (SCEVCheckCond) 2018 SCEVCheckBlock->eraseFromParent(); 2019 if (MemRuntimeCheckCond) 2020 MemCheckBlock->eraseFromParent(); 2021 } 2022 2023 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2024 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2025 /// depending on the generated condition. 2026 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, 2027 BasicBlock *LoopVectorPreHeader, 2028 BasicBlock *LoopExitBlock) { 2029 if (!SCEVCheckCond) 2030 return nullptr; 2031 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2032 if (C->isZero()) 2033 return nullptr; 2034 2035 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2036 2037 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2038 // Create new preheader for vector loop. 2039 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2040 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2041 2042 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2043 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2044 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2045 SCEVCheckBlock); 2046 2047 DT->addNewBlock(SCEVCheckBlock, Pred); 2048 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2049 2050 ReplaceInstWithInst( 2051 SCEVCheckBlock->getTerminator(), 2052 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2053 // Mark the check as used, to prevent it from being removed during cleanup. 2054 SCEVCheckCond = nullptr; 2055 return SCEVCheckBlock; 2056 } 2057 2058 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2059 /// the branches to branch to the vector preheader or \p Bypass, depending on 2060 /// the generated condition. 2061 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, 2062 BasicBlock *LoopVectorPreHeader) { 2063 // Check if we generated code that checks in runtime if arrays overlap. 2064 if (!MemRuntimeCheckCond) 2065 return nullptr; 2066 2067 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2068 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2069 MemCheckBlock); 2070 2071 DT->addNewBlock(MemCheckBlock, Pred); 2072 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2073 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2074 2075 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2076 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2077 2078 ReplaceInstWithInst( 2079 MemCheckBlock->getTerminator(), 2080 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2081 MemCheckBlock->getTerminator()->setDebugLoc( 2082 Pred->getTerminator()->getDebugLoc()); 2083 2084 // Mark the check as used, to prevent it from being removed during cleanup. 2085 MemRuntimeCheckCond = nullptr; 2086 return MemCheckBlock; 2087 } 2088 }; 2089 2090 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2091 // vectorization. The loop needs to be annotated with #pragma omp simd 2092 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2093 // vector length information is not provided, vectorization is not considered 2094 // explicit. Interleave hints are not allowed either. These limitations will be 2095 // relaxed in the future. 2096 // Please, note that we are currently forced to abuse the pragma 'clang 2097 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2098 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2099 // provides *explicit vectorization hints* (LV can bypass legal checks and 2100 // assume that vectorization is legal). However, both hints are implemented 2101 // using the same metadata (llvm.loop.vectorize, processed by 2102 // LoopVectorizeHints). This will be fixed in the future when the native IR 2103 // representation for pragma 'omp simd' is introduced. 2104 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2105 OptimizationRemarkEmitter *ORE) { 2106 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2107 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2108 2109 // Only outer loops with an explicit vectorization hint are supported. 2110 // Unannotated outer loops are ignored. 2111 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2112 return false; 2113 2114 Function *Fn = OuterLp->getHeader()->getParent(); 2115 if (!Hints.allowVectorization(Fn, OuterLp, 2116 true /*VectorizeOnlyWhenForced*/)) { 2117 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2118 return false; 2119 } 2120 2121 if (Hints.getInterleave() > 1) { 2122 // TODO: Interleave support is future work. 2123 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2124 "outer loops.\n"); 2125 Hints.emitRemarkWithHints(); 2126 return false; 2127 } 2128 2129 return true; 2130 } 2131 2132 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2133 OptimizationRemarkEmitter *ORE, 2134 SmallVectorImpl<Loop *> &V) { 2135 // Collect inner loops and outer loops without irreducible control flow. For 2136 // now, only collect outer loops that have explicit vectorization hints. If we 2137 // are stress testing the VPlan H-CFG construction, we collect the outermost 2138 // loop of every loop nest. 2139 if (L.isInnermost() || VPlanBuildStressTest || 2140 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2141 LoopBlocksRPO RPOT(&L); 2142 RPOT.perform(LI); 2143 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2144 V.push_back(&L); 2145 // TODO: Collect inner loops inside marked outer loops in case 2146 // vectorization fails for the outer loop. Do not invoke 2147 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2148 // already known to be reducible. We can use an inherited attribute for 2149 // that. 2150 return; 2151 } 2152 } 2153 for (Loop *InnerL : L) 2154 collectSupportedLoops(*InnerL, LI, ORE, V); 2155 } 2156 2157 namespace { 2158 2159 /// The LoopVectorize Pass. 2160 struct LoopVectorize : public FunctionPass { 2161 /// Pass identification, replacement for typeid 2162 static char ID; 2163 2164 LoopVectorizePass Impl; 2165 2166 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2167 bool VectorizeOnlyWhenForced = false) 2168 : FunctionPass(ID), 2169 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2170 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2171 } 2172 2173 bool runOnFunction(Function &F) override { 2174 if (skipFunction(F)) 2175 return false; 2176 2177 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2178 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2179 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2180 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2181 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2182 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2183 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2184 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2185 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2186 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2187 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2188 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2189 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2190 2191 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2192 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2193 2194 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2195 GetLAA, *ORE, PSI).MadeAnyChange; 2196 } 2197 2198 void getAnalysisUsage(AnalysisUsage &AU) const override { 2199 AU.addRequired<AssumptionCacheTracker>(); 2200 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2201 AU.addRequired<DominatorTreeWrapperPass>(); 2202 AU.addRequired<LoopInfoWrapperPass>(); 2203 AU.addRequired<ScalarEvolutionWrapperPass>(); 2204 AU.addRequired<TargetTransformInfoWrapperPass>(); 2205 AU.addRequired<AAResultsWrapperPass>(); 2206 AU.addRequired<LoopAccessLegacyAnalysis>(); 2207 AU.addRequired<DemandedBitsWrapperPass>(); 2208 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2209 AU.addRequired<InjectTLIMappingsLegacy>(); 2210 2211 // We currently do not preserve loopinfo/dominator analyses with outer loop 2212 // vectorization. Until this is addressed, mark these analyses as preserved 2213 // only for non-VPlan-native path. 2214 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2215 if (!EnableVPlanNativePath) { 2216 AU.addPreserved<LoopInfoWrapperPass>(); 2217 AU.addPreserved<DominatorTreeWrapperPass>(); 2218 } 2219 2220 AU.addPreserved<BasicAAWrapperPass>(); 2221 AU.addPreserved<GlobalsAAWrapperPass>(); 2222 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2223 } 2224 }; 2225 2226 } // end anonymous namespace 2227 2228 //===----------------------------------------------------------------------===// 2229 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2230 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2231 //===----------------------------------------------------------------------===// 2232 2233 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2234 // We need to place the broadcast of invariant variables outside the loop, 2235 // but only if it's proven safe to do so. Else, broadcast will be inside 2236 // vector loop body. 2237 Instruction *Instr = dyn_cast<Instruction>(V); 2238 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2239 (!Instr || 2240 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2241 // Place the code for broadcasting invariant variables in the new preheader. 2242 IRBuilder<>::InsertPointGuard Guard(Builder); 2243 if (SafeToHoist) 2244 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2245 2246 // Broadcast the scalar into all locations in the vector. 2247 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2248 2249 return Shuf; 2250 } 2251 2252 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2253 const InductionDescriptor &II, Value *Step, Value *Start, 2254 Instruction *EntryVal, VPValue *Def, VPValue *CastDef, 2255 VPTransformState &State) { 2256 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2257 "Expected either an induction phi-node or a truncate of it!"); 2258 2259 // Construct the initial value of the vector IV in the vector loop preheader 2260 auto CurrIP = Builder.saveIP(); 2261 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2262 if (isa<TruncInst>(EntryVal)) { 2263 assert(Start->getType()->isIntegerTy() && 2264 "Truncation requires an integer type"); 2265 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2266 Step = Builder.CreateTrunc(Step, TruncType); 2267 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2268 } 2269 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2270 Value *SteppedStart = 2271 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 2272 2273 // We create vector phi nodes for both integer and floating-point induction 2274 // variables. Here, we determine the kind of arithmetic we will perform. 2275 Instruction::BinaryOps AddOp; 2276 Instruction::BinaryOps MulOp; 2277 if (Step->getType()->isIntegerTy()) { 2278 AddOp = Instruction::Add; 2279 MulOp = Instruction::Mul; 2280 } else { 2281 AddOp = II.getInductionOpcode(); 2282 MulOp = Instruction::FMul; 2283 } 2284 2285 // Multiply the vectorization factor by the step using integer or 2286 // floating-point arithmetic as appropriate. 2287 Type *StepType = Step->getType(); 2288 if (Step->getType()->isFloatingPointTy()) 2289 StepType = IntegerType::get(StepType->getContext(), 2290 StepType->getScalarSizeInBits()); 2291 Value *RuntimeVF = getRuntimeVF(Builder, StepType, VF); 2292 if (Step->getType()->isFloatingPointTy()) 2293 RuntimeVF = Builder.CreateSIToFP(RuntimeVF, Step->getType()); 2294 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 2295 2296 // Create a vector splat to use in the induction update. 2297 // 2298 // FIXME: If the step is non-constant, we create the vector splat with 2299 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2300 // handle a constant vector splat. 2301 Value *SplatVF = isa<Constant>(Mul) 2302 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2303 : Builder.CreateVectorSplat(VF, Mul); 2304 Builder.restoreIP(CurrIP); 2305 2306 // We may need to add the step a number of times, depending on the unroll 2307 // factor. The last of those goes into the PHI. 2308 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2309 &*LoopVectorBody->getFirstInsertionPt()); 2310 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2311 Instruction *LastInduction = VecInd; 2312 for (unsigned Part = 0; Part < UF; ++Part) { 2313 State.set(Def, LastInduction, Part); 2314 2315 if (isa<TruncInst>(EntryVal)) 2316 addMetadata(LastInduction, EntryVal); 2317 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, 2318 State, Part); 2319 2320 LastInduction = cast<Instruction>( 2321 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 2322 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2323 } 2324 2325 // Move the last step to the end of the latch block. This ensures consistent 2326 // placement of all induction updates. 2327 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2328 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2329 auto *ICmp = cast<Instruction>(Br->getCondition()); 2330 LastInduction->moveBefore(ICmp); 2331 LastInduction->setName("vec.ind.next"); 2332 2333 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2334 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2335 } 2336 2337 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2338 return Cost->isScalarAfterVectorization(I, VF) || 2339 Cost->isProfitableToScalarize(I, VF); 2340 } 2341 2342 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2343 if (shouldScalarizeInstruction(IV)) 2344 return true; 2345 auto isScalarInst = [&](User *U) -> bool { 2346 auto *I = cast<Instruction>(U); 2347 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2348 }; 2349 return llvm::any_of(IV->users(), isScalarInst); 2350 } 2351 2352 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2353 const InductionDescriptor &ID, const Instruction *EntryVal, 2354 Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, 2355 unsigned Part, unsigned Lane) { 2356 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2357 "Expected either an induction phi-node or a truncate of it!"); 2358 2359 // This induction variable is not the phi from the original loop but the 2360 // newly-created IV based on the proof that casted Phi is equal to the 2361 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2362 // re-uses the same InductionDescriptor that original IV uses but we don't 2363 // have to do any recording in this case - that is done when original IV is 2364 // processed. 2365 if (isa<TruncInst>(EntryVal)) 2366 return; 2367 2368 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 2369 if (Casts.empty()) 2370 return; 2371 // Only the first Cast instruction in the Casts vector is of interest. 2372 // The rest of the Casts (if exist) have no uses outside the 2373 // induction update chain itself. 2374 if (Lane < UINT_MAX) 2375 State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); 2376 else 2377 State.set(CastDef, VectorLoopVal, Part); 2378 } 2379 2380 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2381 TruncInst *Trunc, VPValue *Def, 2382 VPValue *CastDef, 2383 VPTransformState &State) { 2384 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2385 "Primary induction variable must have an integer type"); 2386 2387 auto II = Legal->getInductionVars().find(IV); 2388 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2389 2390 auto ID = II->second; 2391 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2392 2393 // The value from the original loop to which we are mapping the new induction 2394 // variable. 2395 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2396 2397 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2398 2399 // Generate code for the induction step. Note that induction steps are 2400 // required to be loop-invariant 2401 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2402 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2403 "Induction step should be loop invariant"); 2404 if (PSE.getSE()->isSCEVable(IV->getType())) { 2405 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2406 return Exp.expandCodeFor(Step, Step->getType(), 2407 LoopVectorPreHeader->getTerminator()); 2408 } 2409 return cast<SCEVUnknown>(Step)->getValue(); 2410 }; 2411 2412 // The scalar value to broadcast. This is derived from the canonical 2413 // induction variable. If a truncation type is given, truncate the canonical 2414 // induction variable and step. Otherwise, derive these values from the 2415 // induction descriptor. 2416 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2417 Value *ScalarIV = Induction; 2418 if (IV != OldInduction) { 2419 ScalarIV = IV->getType()->isIntegerTy() 2420 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2421 : Builder.CreateCast(Instruction::SIToFP, Induction, 2422 IV->getType()); 2423 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2424 ScalarIV->setName("offset.idx"); 2425 } 2426 if (Trunc) { 2427 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2428 assert(Step->getType()->isIntegerTy() && 2429 "Truncation requires an integer step"); 2430 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2431 Step = Builder.CreateTrunc(Step, TruncType); 2432 } 2433 return ScalarIV; 2434 }; 2435 2436 // Create the vector values from the scalar IV, in the absence of creating a 2437 // vector IV. 2438 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2439 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2440 for (unsigned Part = 0; Part < UF; ++Part) { 2441 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2442 Value *EntryPart = 2443 getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step, 2444 ID.getInductionOpcode()); 2445 State.set(Def, EntryPart, Part); 2446 if (Trunc) 2447 addMetadata(EntryPart, Trunc); 2448 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, 2449 State, Part); 2450 } 2451 }; 2452 2453 // Fast-math-flags propagate from the original induction instruction. 2454 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 2455 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 2456 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 2457 2458 // Now do the actual transformations, and start with creating the step value. 2459 Value *Step = CreateStepValue(ID.getStep()); 2460 if (VF.isZero() || VF.isScalar()) { 2461 Value *ScalarIV = CreateScalarIV(Step); 2462 CreateSplatIV(ScalarIV, Step); 2463 return; 2464 } 2465 2466 // Determine if we want a scalar version of the induction variable. This is 2467 // true if the induction variable itself is not widened, or if it has at 2468 // least one user in the loop that is not widened. 2469 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2470 if (!NeedsScalarIV) { 2471 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2472 State); 2473 return; 2474 } 2475 2476 // Try to create a new independent vector induction variable. If we can't 2477 // create the phi node, we will splat the scalar induction variable in each 2478 // loop iteration. 2479 if (!shouldScalarizeInstruction(EntryVal)) { 2480 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2481 State); 2482 Value *ScalarIV = CreateScalarIV(Step); 2483 // Create scalar steps that can be used by instructions we will later 2484 // scalarize. Note that the addition of the scalar steps will not increase 2485 // the number of instructions in the loop in the common case prior to 2486 // InstCombine. We will be trading one vector extract for each scalar step. 2487 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2488 return; 2489 } 2490 2491 // All IV users are scalar instructions, so only emit a scalar IV, not a 2492 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2493 // predicate used by the masked loads/stores. 2494 Value *ScalarIV = CreateScalarIV(Step); 2495 if (!Cost->isScalarEpilogueAllowed()) 2496 CreateSplatIV(ScalarIV, Step); 2497 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2498 } 2499 2500 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2501 Instruction::BinaryOps BinOp) { 2502 // Create and check the types. 2503 auto *ValVTy = cast<VectorType>(Val->getType()); 2504 ElementCount VLen = ValVTy->getElementCount(); 2505 2506 Type *STy = Val->getType()->getScalarType(); 2507 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2508 "Induction Step must be an integer or FP"); 2509 assert(Step->getType() == STy && "Step has wrong type"); 2510 2511 SmallVector<Constant *, 8> Indices; 2512 2513 // Create a vector of consecutive numbers from zero to VF. 2514 VectorType *InitVecValVTy = ValVTy; 2515 Type *InitVecValSTy = STy; 2516 if (STy->isFloatingPointTy()) { 2517 InitVecValSTy = 2518 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2519 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2520 } 2521 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2522 2523 // Add on StartIdx 2524 Value *StartIdxSplat = Builder.CreateVectorSplat( 2525 VLen, ConstantInt::get(InitVecValSTy, StartIdx)); 2526 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2527 2528 if (STy->isIntegerTy()) { 2529 Step = Builder.CreateVectorSplat(VLen, Step); 2530 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2531 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2532 // which can be found from the original scalar operations. 2533 Step = Builder.CreateMul(InitVec, Step); 2534 return Builder.CreateAdd(Val, Step, "induction"); 2535 } 2536 2537 // Floating point induction. 2538 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2539 "Binary Opcode should be specified for FP induction"); 2540 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2541 Step = Builder.CreateVectorSplat(VLen, Step); 2542 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2543 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2544 } 2545 2546 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2547 Instruction *EntryVal, 2548 const InductionDescriptor &ID, 2549 VPValue *Def, VPValue *CastDef, 2550 VPTransformState &State) { 2551 // We shouldn't have to build scalar steps if we aren't vectorizing. 2552 assert(VF.isVector() && "VF should be greater than one"); 2553 // Get the value type and ensure it and the step have the same integer type. 2554 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2555 assert(ScalarIVTy == Step->getType() && 2556 "Val and Step should have the same type"); 2557 2558 // We build scalar steps for both integer and floating-point induction 2559 // variables. Here, we determine the kind of arithmetic we will perform. 2560 Instruction::BinaryOps AddOp; 2561 Instruction::BinaryOps MulOp; 2562 if (ScalarIVTy->isIntegerTy()) { 2563 AddOp = Instruction::Add; 2564 MulOp = Instruction::Mul; 2565 } else { 2566 AddOp = ID.getInductionOpcode(); 2567 MulOp = Instruction::FMul; 2568 } 2569 2570 // Determine the number of scalars we need to generate for each unroll 2571 // iteration. If EntryVal is uniform, we only need to generate the first 2572 // lane. Otherwise, we generate all VF values. 2573 bool IsUniform = 2574 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF); 2575 unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue(); 2576 // Compute the scalar steps and save the results in State. 2577 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2578 ScalarIVTy->getScalarSizeInBits()); 2579 Type *VecIVTy = nullptr; 2580 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2581 if (!IsUniform && VF.isScalable()) { 2582 VecIVTy = VectorType::get(ScalarIVTy, VF); 2583 UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF)); 2584 SplatStep = Builder.CreateVectorSplat(VF, Step); 2585 SplatIV = Builder.CreateVectorSplat(VF, ScalarIV); 2586 } 2587 2588 for (unsigned Part = 0; Part < UF; ++Part) { 2589 Value *StartIdx0 = 2590 createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF); 2591 2592 if (!IsUniform && VF.isScalable()) { 2593 auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0); 2594 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2595 if (ScalarIVTy->isFloatingPointTy()) 2596 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2597 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2598 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2599 State.set(Def, Add, Part); 2600 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2601 Part); 2602 // It's useful to record the lane values too for the known minimum number 2603 // of elements so we do those below. This improves the code quality when 2604 // trying to extract the first element, for example. 2605 } 2606 2607 if (ScalarIVTy->isFloatingPointTy()) 2608 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2609 2610 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2611 Value *StartIdx = Builder.CreateBinOp( 2612 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2613 // The step returned by `createStepForVF` is a runtime-evaluated value 2614 // when VF is scalable. Otherwise, it should be folded into a Constant. 2615 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2616 "Expected StartIdx to be folded to a constant when VF is not " 2617 "scalable"); 2618 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2619 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2620 State.set(Def, Add, VPIteration(Part, Lane)); 2621 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2622 Part, Lane); 2623 } 2624 } 2625 } 2626 2627 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2628 const VPIteration &Instance, 2629 VPTransformState &State) { 2630 Value *ScalarInst = State.get(Def, Instance); 2631 Value *VectorValue = State.get(Def, Instance.Part); 2632 VectorValue = Builder.CreateInsertElement( 2633 VectorValue, ScalarInst, 2634 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2635 State.set(Def, VectorValue, Instance.Part); 2636 } 2637 2638 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2639 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2640 return Builder.CreateVectorReverse(Vec, "reverse"); 2641 } 2642 2643 // Return whether we allow using masked interleave-groups (for dealing with 2644 // strided loads/stores that reside in predicated blocks, or for dealing 2645 // with gaps). 2646 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2647 // If an override option has been passed in for interleaved accesses, use it. 2648 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2649 return EnableMaskedInterleavedMemAccesses; 2650 2651 return TTI.enableMaskedInterleavedAccessVectorization(); 2652 } 2653 2654 // Try to vectorize the interleave group that \p Instr belongs to. 2655 // 2656 // E.g. Translate following interleaved load group (factor = 3): 2657 // for (i = 0; i < N; i+=3) { 2658 // R = Pic[i]; // Member of index 0 2659 // G = Pic[i+1]; // Member of index 1 2660 // B = Pic[i+2]; // Member of index 2 2661 // ... // do something to R, G, B 2662 // } 2663 // To: 2664 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2665 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2666 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2667 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2668 // 2669 // Or translate following interleaved store group (factor = 3): 2670 // for (i = 0; i < N; i+=3) { 2671 // ... do something to R, G, B 2672 // Pic[i] = R; // Member of index 0 2673 // Pic[i+1] = G; // Member of index 1 2674 // Pic[i+2] = B; // Member of index 2 2675 // } 2676 // To: 2677 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2678 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2679 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2680 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2681 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2682 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2683 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2684 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2685 VPValue *BlockInMask) { 2686 Instruction *Instr = Group->getInsertPos(); 2687 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2688 2689 // Prepare for the vector type of the interleaved load/store. 2690 Type *ScalarTy = getLoadStoreType(Instr); 2691 unsigned InterleaveFactor = Group->getFactor(); 2692 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2693 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2694 2695 // Prepare for the new pointers. 2696 SmallVector<Value *, 2> AddrParts; 2697 unsigned Index = Group->getIndex(Instr); 2698 2699 // TODO: extend the masked interleaved-group support to reversed access. 2700 assert((!BlockInMask || !Group->isReverse()) && 2701 "Reversed masked interleave-group not supported."); 2702 2703 // If the group is reverse, adjust the index to refer to the last vector lane 2704 // instead of the first. We adjust the index from the first vector lane, 2705 // rather than directly getting the pointer for lane VF - 1, because the 2706 // pointer operand of the interleaved access is supposed to be uniform. For 2707 // uniform instructions, we're only required to generate a value for the 2708 // first vector lane in each unroll iteration. 2709 if (Group->isReverse()) 2710 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2711 2712 for (unsigned Part = 0; Part < UF; Part++) { 2713 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2714 setDebugLocFromInst(Builder, AddrPart); 2715 2716 // Notice current instruction could be any index. Need to adjust the address 2717 // to the member of index 0. 2718 // 2719 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2720 // b = A[i]; // Member of index 0 2721 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2722 // 2723 // E.g. A[i+1] = a; // Member of index 1 2724 // A[i] = b; // Member of index 0 2725 // A[i+2] = c; // Member of index 2 (Current instruction) 2726 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2727 2728 bool InBounds = false; 2729 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2730 InBounds = gep->isInBounds(); 2731 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2732 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2733 2734 // Cast to the vector pointer type. 2735 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2736 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2737 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2738 } 2739 2740 setDebugLocFromInst(Builder, Instr); 2741 Value *PoisonVec = PoisonValue::get(VecTy); 2742 2743 Value *MaskForGaps = nullptr; 2744 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2745 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2746 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2747 } 2748 2749 // Vectorize the interleaved load group. 2750 if (isa<LoadInst>(Instr)) { 2751 // For each unroll part, create a wide load for the group. 2752 SmallVector<Value *, 2> NewLoads; 2753 for (unsigned Part = 0; Part < UF; Part++) { 2754 Instruction *NewLoad; 2755 if (BlockInMask || MaskForGaps) { 2756 assert(useMaskedInterleavedAccesses(*TTI) && 2757 "masked interleaved groups are not allowed."); 2758 Value *GroupMask = MaskForGaps; 2759 if (BlockInMask) { 2760 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2761 Value *ShuffledMask = Builder.CreateShuffleVector( 2762 BlockInMaskPart, 2763 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2764 "interleaved.mask"); 2765 GroupMask = MaskForGaps 2766 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2767 MaskForGaps) 2768 : ShuffledMask; 2769 } 2770 NewLoad = 2771 Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(), 2772 GroupMask, PoisonVec, "wide.masked.vec"); 2773 } 2774 else 2775 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2776 Group->getAlign(), "wide.vec"); 2777 Group->addMetadata(NewLoad); 2778 NewLoads.push_back(NewLoad); 2779 } 2780 2781 // For each member in the group, shuffle out the appropriate data from the 2782 // wide loads. 2783 unsigned J = 0; 2784 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2785 Instruction *Member = Group->getMember(I); 2786 2787 // Skip the gaps in the group. 2788 if (!Member) 2789 continue; 2790 2791 auto StrideMask = 2792 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2793 for (unsigned Part = 0; Part < UF; Part++) { 2794 Value *StridedVec = Builder.CreateShuffleVector( 2795 NewLoads[Part], StrideMask, "strided.vec"); 2796 2797 // If this member has different type, cast the result type. 2798 if (Member->getType() != ScalarTy) { 2799 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2800 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2801 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2802 } 2803 2804 if (Group->isReverse()) 2805 StridedVec = reverseVector(StridedVec); 2806 2807 State.set(VPDefs[J], StridedVec, Part); 2808 } 2809 ++J; 2810 } 2811 return; 2812 } 2813 2814 // The sub vector type for current instruction. 2815 auto *SubVT = VectorType::get(ScalarTy, VF); 2816 2817 // Vectorize the interleaved store group. 2818 for (unsigned Part = 0; Part < UF; Part++) { 2819 // Collect the stored vector from each member. 2820 SmallVector<Value *, 4> StoredVecs; 2821 for (unsigned i = 0; i < InterleaveFactor; i++) { 2822 // Interleaved store group doesn't allow a gap, so each index has a member 2823 assert(Group->getMember(i) && "Fail to get a member from an interleaved store group"); 2824 2825 Value *StoredVec = State.get(StoredValues[i], Part); 2826 2827 if (Group->isReverse()) 2828 StoredVec = reverseVector(StoredVec); 2829 2830 // If this member has different type, cast it to a unified type. 2831 2832 if (StoredVec->getType() != SubVT) 2833 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2834 2835 StoredVecs.push_back(StoredVec); 2836 } 2837 2838 // Concatenate all vectors into a wide vector. 2839 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2840 2841 // Interleave the elements in the wide vector. 2842 Value *IVec = Builder.CreateShuffleVector( 2843 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2844 "interleaved.vec"); 2845 2846 Instruction *NewStoreInstr; 2847 if (BlockInMask) { 2848 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2849 Value *ShuffledMask = Builder.CreateShuffleVector( 2850 BlockInMaskPart, 2851 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2852 "interleaved.mask"); 2853 NewStoreInstr = Builder.CreateMaskedStore( 2854 IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); 2855 } 2856 else 2857 NewStoreInstr = 2858 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2859 2860 Group->addMetadata(NewStoreInstr); 2861 } 2862 } 2863 2864 void InnerLoopVectorizer::vectorizeMemoryInstruction( 2865 Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, 2866 VPValue *StoredValue, VPValue *BlockInMask) { 2867 // Attempt to issue a wide load. 2868 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2869 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2870 2871 assert((LI || SI) && "Invalid Load/Store instruction"); 2872 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2873 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2874 2875 LoopVectorizationCostModel::InstWidening Decision = 2876 Cost->getWideningDecision(Instr, VF); 2877 assert((Decision == LoopVectorizationCostModel::CM_Widen || 2878 Decision == LoopVectorizationCostModel::CM_Widen_Reverse || 2879 Decision == LoopVectorizationCostModel::CM_GatherScatter) && 2880 "CM decision is not to widen the memory instruction"); 2881 2882 Type *ScalarDataTy = getLoadStoreType(Instr); 2883 2884 auto *DataTy = VectorType::get(ScalarDataTy, VF); 2885 const Align Alignment = getLoadStoreAlignment(Instr); 2886 2887 // Determine if the pointer operand of the access is either consecutive or 2888 // reverse consecutive. 2889 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2890 bool ConsecutiveStride = 2891 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2892 bool CreateGatherScatter = 2893 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2894 2895 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2896 // gather/scatter. Otherwise Decision should have been to Scalarize. 2897 assert((ConsecutiveStride || CreateGatherScatter) && 2898 "The instruction should be scalarized"); 2899 (void)ConsecutiveStride; 2900 2901 VectorParts BlockInMaskParts(UF); 2902 bool isMaskRequired = BlockInMask; 2903 if (isMaskRequired) 2904 for (unsigned Part = 0; Part < UF; ++Part) 2905 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2906 2907 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2908 // Calculate the pointer for the specific unroll-part. 2909 GetElementPtrInst *PartPtr = nullptr; 2910 2911 bool InBounds = false; 2912 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2913 InBounds = gep->isInBounds(); 2914 if (Reverse) { 2915 // If the address is consecutive but reversed, then the 2916 // wide store needs to start at the last vector element. 2917 // RunTimeVF = VScale * VF.getKnownMinValue() 2918 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 2919 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF); 2920 // NumElt = -Part * RunTimeVF 2921 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 2922 // LastLane = 1 - RunTimeVF 2923 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 2924 PartPtr = 2925 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 2926 PartPtr->setIsInBounds(InBounds); 2927 PartPtr = cast<GetElementPtrInst>( 2928 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 2929 PartPtr->setIsInBounds(InBounds); 2930 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2931 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2932 } else { 2933 Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF); 2934 PartPtr = cast<GetElementPtrInst>( 2935 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 2936 PartPtr->setIsInBounds(InBounds); 2937 } 2938 2939 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2940 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2941 }; 2942 2943 // Handle Stores: 2944 if (SI) { 2945 setDebugLocFromInst(Builder, SI); 2946 2947 for (unsigned Part = 0; Part < UF; ++Part) { 2948 Instruction *NewSI = nullptr; 2949 Value *StoredVal = State.get(StoredValue, Part); 2950 if (CreateGatherScatter) { 2951 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2952 Value *VectorGep = State.get(Addr, Part); 2953 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2954 MaskPart); 2955 } else { 2956 if (Reverse) { 2957 // If we store to reverse consecutive memory locations, then we need 2958 // to reverse the order of elements in the stored value. 2959 StoredVal = reverseVector(StoredVal); 2960 // We don't want to update the value in the map as it might be used in 2961 // another expression. So don't call resetVectorValue(StoredVal). 2962 } 2963 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 2964 if (isMaskRequired) 2965 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2966 BlockInMaskParts[Part]); 2967 else 2968 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2969 } 2970 addMetadata(NewSI, SI); 2971 } 2972 return; 2973 } 2974 2975 // Handle loads. 2976 assert(LI && "Must have a load instruction"); 2977 setDebugLocFromInst(Builder, LI); 2978 for (unsigned Part = 0; Part < UF; ++Part) { 2979 Value *NewLI; 2980 if (CreateGatherScatter) { 2981 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2982 Value *VectorGep = State.get(Addr, Part); 2983 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2984 nullptr, "wide.masked.gather"); 2985 addMetadata(NewLI, LI); 2986 } else { 2987 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 2988 if (isMaskRequired) 2989 NewLI = Builder.CreateMaskedLoad( 2990 VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy), 2991 "wide.masked.load"); 2992 else 2993 NewLI = 2994 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2995 2996 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2997 addMetadata(NewLI, LI); 2998 if (Reverse) 2999 NewLI = reverseVector(NewLI); 3000 } 3001 3002 State.set(Def, NewLI, Part); 3003 } 3004 } 3005 3006 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def, 3007 VPUser &User, 3008 const VPIteration &Instance, 3009 bool IfPredicateInstr, 3010 VPTransformState &State) { 3011 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 3012 3013 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 3014 // the first lane and part. 3015 if (isa<NoAliasScopeDeclInst>(Instr)) 3016 if (!Instance.isFirstIteration()) 3017 return; 3018 3019 setDebugLocFromInst(Builder, Instr); 3020 3021 // Does this instruction return a value ? 3022 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3023 3024 Instruction *Cloned = Instr->clone(); 3025 if (!IsVoidRetTy) 3026 Cloned->setName(Instr->getName() + ".cloned"); 3027 3028 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 3029 Builder.GetInsertPoint()); 3030 // Replace the operands of the cloned instructions with their scalar 3031 // equivalents in the new loop. 3032 for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { 3033 auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); 3034 auto InputInstance = Instance; 3035 if (!Operand || !OrigLoop->contains(Operand) || 3036 (Cost->isUniformAfterVectorization(Operand, State.VF))) 3037 InputInstance.Lane = VPLane::getFirstLane(); 3038 auto *NewOp = State.get(User.getOperand(op), InputInstance); 3039 Cloned->setOperand(op, NewOp); 3040 } 3041 addNewMetadata(Cloned, Instr); 3042 3043 // Place the cloned scalar in the new loop. 3044 Builder.Insert(Cloned); 3045 3046 State.set(Def, Cloned, Instance); 3047 3048 // If we just cloned a new assumption, add it the assumption cache. 3049 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 3050 AC->registerAssumption(II); 3051 3052 // End if-block. 3053 if (IfPredicateInstr) 3054 PredicatedInstructions.push_back(Cloned); 3055 } 3056 3057 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3058 Value *End, Value *Step, 3059 Instruction *DL) { 3060 BasicBlock *Header = L->getHeader(); 3061 BasicBlock *Latch = L->getLoopLatch(); 3062 // As we're just creating this loop, it's possible no latch exists 3063 // yet. If so, use the header as this will be a single block loop. 3064 if (!Latch) 3065 Latch = Header; 3066 3067 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 3068 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3069 setDebugLocFromInst(Builder, OldInst); 3070 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 3071 3072 Builder.SetInsertPoint(Latch->getTerminator()); 3073 setDebugLocFromInst(Builder, OldInst); 3074 3075 // Create i+1 and fill the PHINode. 3076 // 3077 // If the tail is not folded, we know that End - Start >= Step (either 3078 // statically or through the minimum iteration checks). We also know that both 3079 // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV + 3080 // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned 3081 // overflows and we can mark the induction increment as NUW. 3082 Value *Next = 3083 Builder.CreateAdd(Induction, Step, "index.next", 3084 /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false); 3085 Induction->addIncoming(Start, L->getLoopPreheader()); 3086 Induction->addIncoming(Next, Latch); 3087 // Create the compare. 3088 Value *ICmp = Builder.CreateICmpEQ(Next, End); 3089 Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 3090 3091 // Now we have two terminators. Remove the old one from the block. 3092 Latch->getTerminator()->eraseFromParent(); 3093 3094 return Induction; 3095 } 3096 3097 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3098 if (TripCount) 3099 return TripCount; 3100 3101 assert(L && "Create Trip Count for null loop."); 3102 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3103 // Find the loop boundaries. 3104 ScalarEvolution *SE = PSE.getSE(); 3105 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3106 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 3107 "Invalid loop count"); 3108 3109 Type *IdxTy = Legal->getWidestInductionType(); 3110 assert(IdxTy && "No type for induction"); 3111 3112 // The exit count might have the type of i64 while the phi is i32. This can 3113 // happen if we have an induction variable that is sign extended before the 3114 // compare. The only way that we get a backedge taken count is that the 3115 // induction variable was signed and as such will not overflow. In such a case 3116 // truncation is legal. 3117 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3118 IdxTy->getPrimitiveSizeInBits()) 3119 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3120 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3121 3122 // Get the total trip count from the count by adding 1. 3123 const SCEV *ExitCount = SE->getAddExpr( 3124 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3125 3126 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3127 3128 // Expand the trip count and place the new instructions in the preheader. 3129 // Notice that the pre-header does not change, only the loop body. 3130 SCEVExpander Exp(*SE, DL, "induction"); 3131 3132 // Count holds the overall loop count (N). 3133 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3134 L->getLoopPreheader()->getTerminator()); 3135 3136 if (TripCount->getType()->isPointerTy()) 3137 TripCount = 3138 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3139 L->getLoopPreheader()->getTerminator()); 3140 3141 return TripCount; 3142 } 3143 3144 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3145 if (VectorTripCount) 3146 return VectorTripCount; 3147 3148 Value *TC = getOrCreateTripCount(L); 3149 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3150 3151 Type *Ty = TC->getType(); 3152 // This is where we can make the step a runtime constant. 3153 Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF); 3154 3155 // If the tail is to be folded by masking, round the number of iterations N 3156 // up to a multiple of Step instead of rounding down. This is done by first 3157 // adding Step-1 and then rounding down. Note that it's ok if this addition 3158 // overflows: the vector induction variable will eventually wrap to zero given 3159 // that it starts at zero and its Step is a power of two; the loop will then 3160 // exit, with the last early-exit vector comparison also producing all-true. 3161 if (Cost->foldTailByMasking()) { 3162 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3163 "VF*UF must be a power of 2 when folding tail by masking"); 3164 assert(!VF.isScalable() && 3165 "Tail folding not yet supported for scalable vectors"); 3166 TC = Builder.CreateAdd( 3167 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 3168 } 3169 3170 // Now we need to generate the expression for the part of the loop that the 3171 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3172 // iterations are not required for correctness, or N - Step, otherwise. Step 3173 // is equal to the vectorization factor (number of SIMD elements) times the 3174 // unroll factor (number of SIMD instructions). 3175 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3176 3177 // There are two cases where we need to ensure (at least) the last iteration 3178 // runs in the scalar remainder loop. Thus, if the step evenly divides 3179 // the trip count, we set the remainder to be equal to the step. If the step 3180 // does not evenly divide the trip count, no adjustment is necessary since 3181 // there will already be scalar iterations. Note that the minimum iterations 3182 // check ensures that N >= Step. The cases are: 3183 // 1) If there is a non-reversed interleaved group that may speculatively 3184 // access memory out-of-bounds. 3185 // 2) If any instruction may follow a conditionally taken exit. That is, if 3186 // the loop contains multiple exiting blocks, or a single exiting block 3187 // which is not the latch. 3188 if (VF.isVector() && Cost->requiresScalarEpilogue()) { 3189 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3190 R = Builder.CreateSelect(IsZero, Step, R); 3191 } 3192 3193 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3194 3195 return VectorTripCount; 3196 } 3197 3198 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3199 const DataLayout &DL) { 3200 // Verify that V is a vector type with same number of elements as DstVTy. 3201 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3202 unsigned VF = DstFVTy->getNumElements(); 3203 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3204 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3205 Type *SrcElemTy = SrcVecTy->getElementType(); 3206 Type *DstElemTy = DstFVTy->getElementType(); 3207 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3208 "Vector elements must have same size"); 3209 3210 // Do a direct cast if element types are castable. 3211 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3212 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3213 } 3214 // V cannot be directly casted to desired vector type. 3215 // May happen when V is a floating point vector but DstVTy is a vector of 3216 // pointers or vice-versa. Handle this using a two-step bitcast using an 3217 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3218 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3219 "Only one type should be a pointer type"); 3220 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3221 "Only one type should be a floating point type"); 3222 Type *IntTy = 3223 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3224 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3225 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3226 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3227 } 3228 3229 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3230 BasicBlock *Bypass) { 3231 Value *Count = getOrCreateTripCount(L); 3232 // Reuse existing vector loop preheader for TC checks. 3233 // Note that new preheader block is generated for vector loop. 3234 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3235 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3236 3237 // Generate code to check if the loop's trip count is less than VF * UF, or 3238 // equal to it in case a scalar epilogue is required; this implies that the 3239 // vector trip count is zero. This check also covers the case where adding one 3240 // to the backedge-taken count overflowed leading to an incorrect trip count 3241 // of zero. In this case we will also jump to the scalar loop. 3242 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 3243 : ICmpInst::ICMP_ULT; 3244 3245 // If tail is to be folded, vector loop takes care of all iterations. 3246 Value *CheckMinIters = Builder.getFalse(); 3247 if (!Cost->foldTailByMasking()) { 3248 Value *Step = 3249 createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF); 3250 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3251 } 3252 // Create new preheader for vector loop. 3253 LoopVectorPreHeader = 3254 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3255 "vector.ph"); 3256 3257 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3258 DT->getNode(Bypass)->getIDom()) && 3259 "TC check is expected to dominate Bypass"); 3260 3261 // Update dominator for Bypass & LoopExit. 3262 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3263 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3264 3265 ReplaceInstWithInst( 3266 TCCheckBlock->getTerminator(), 3267 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3268 LoopBypassBlocks.push_back(TCCheckBlock); 3269 } 3270 3271 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3272 3273 BasicBlock *const SCEVCheckBlock = 3274 RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); 3275 if (!SCEVCheckBlock) 3276 return nullptr; 3277 3278 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3279 (OptForSizeBasedOnProfile && 3280 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3281 "Cannot SCEV check stride or overflow when optimizing for size"); 3282 3283 3284 // Update dominator only if this is first RT check. 3285 if (LoopBypassBlocks.empty()) { 3286 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3287 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3288 } 3289 3290 LoopBypassBlocks.push_back(SCEVCheckBlock); 3291 AddedSafetyChecks = true; 3292 return SCEVCheckBlock; 3293 } 3294 3295 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 3296 BasicBlock *Bypass) { 3297 // VPlan-native path does not do any analysis for runtime checks currently. 3298 if (EnableVPlanNativePath) 3299 return nullptr; 3300 3301 BasicBlock *const MemCheckBlock = 3302 RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); 3303 3304 // Check if we generated code that checks in runtime if arrays overlap. We put 3305 // the checks into a separate block to make the more common case of few 3306 // elements faster. 3307 if (!MemCheckBlock) 3308 return nullptr; 3309 3310 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3311 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3312 "Cannot emit memory checks when optimizing for size, unless forced " 3313 "to vectorize."); 3314 ORE->emit([&]() { 3315 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3316 L->getStartLoc(), L->getHeader()) 3317 << "Code-size may be reduced by not forcing " 3318 "vectorization, or by source-code modifications " 3319 "eliminating the need for runtime checks " 3320 "(e.g., adding 'restrict')."; 3321 }); 3322 } 3323 3324 LoopBypassBlocks.push_back(MemCheckBlock); 3325 3326 AddedSafetyChecks = true; 3327 3328 // We currently don't use LoopVersioning for the actual loop cloning but we 3329 // still use it to add the noalias metadata. 3330 LVer = std::make_unique<LoopVersioning>( 3331 *Legal->getLAI(), 3332 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3333 DT, PSE.getSE()); 3334 LVer->prepareNoAliasMetadata(); 3335 return MemCheckBlock; 3336 } 3337 3338 Value *InnerLoopVectorizer::emitTransformedIndex( 3339 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3340 const InductionDescriptor &ID) const { 3341 3342 SCEVExpander Exp(*SE, DL, "induction"); 3343 auto Step = ID.getStep(); 3344 auto StartValue = ID.getStartValue(); 3345 assert(Index->getType()->getScalarType() == Step->getType() && 3346 "Index scalar type does not match StepValue type"); 3347 3348 // Note: the IR at this point is broken. We cannot use SE to create any new 3349 // SCEV and then expand it, hoping that SCEV's simplification will give us 3350 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3351 // lead to various SCEV crashes. So all we can do is to use builder and rely 3352 // on InstCombine for future simplifications. Here we handle some trivial 3353 // cases only. 3354 auto CreateAdd = [&B](Value *X, Value *Y) { 3355 assert(X->getType() == Y->getType() && "Types don't match!"); 3356 if (auto *CX = dyn_cast<ConstantInt>(X)) 3357 if (CX->isZero()) 3358 return Y; 3359 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3360 if (CY->isZero()) 3361 return X; 3362 return B.CreateAdd(X, Y); 3363 }; 3364 3365 // We allow X to be a vector type, in which case Y will potentially be 3366 // splatted into a vector with the same element count. 3367 auto CreateMul = [&B](Value *X, Value *Y) { 3368 assert(X->getType()->getScalarType() == Y->getType() && 3369 "Types don't match!"); 3370 if (auto *CX = dyn_cast<ConstantInt>(X)) 3371 if (CX->isOne()) 3372 return Y; 3373 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3374 if (CY->isOne()) 3375 return X; 3376 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 3377 if (XVTy && !isa<VectorType>(Y->getType())) 3378 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 3379 return B.CreateMul(X, Y); 3380 }; 3381 3382 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3383 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3384 // the DomTree is not kept up-to-date for additional blocks generated in the 3385 // vector loop. By using the header as insertion point, we guarantee that the 3386 // expanded instructions dominate all their uses. 3387 auto GetInsertPoint = [this, &B]() { 3388 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3389 if (InsertBB != LoopVectorBody && 3390 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3391 return LoopVectorBody->getTerminator(); 3392 return &*B.GetInsertPoint(); 3393 }; 3394 3395 switch (ID.getKind()) { 3396 case InductionDescriptor::IK_IntInduction: { 3397 assert(!isa<VectorType>(Index->getType()) && 3398 "Vector indices not supported for integer inductions yet"); 3399 assert(Index->getType() == StartValue->getType() && 3400 "Index type does not match StartValue type"); 3401 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3402 return B.CreateSub(StartValue, Index); 3403 auto *Offset = CreateMul( 3404 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3405 return CreateAdd(StartValue, Offset); 3406 } 3407 case InductionDescriptor::IK_PtrInduction: { 3408 assert(isa<SCEVConstant>(Step) && 3409 "Expected constant step for pointer induction"); 3410 return B.CreateGEP( 3411 StartValue->getType()->getPointerElementType(), StartValue, 3412 CreateMul(Index, 3413 Exp.expandCodeFor(Step, Index->getType()->getScalarType(), 3414 GetInsertPoint()))); 3415 } 3416 case InductionDescriptor::IK_FpInduction: { 3417 assert(!isa<VectorType>(Index->getType()) && 3418 "Vector indices not supported for FP inductions yet"); 3419 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3420 auto InductionBinOp = ID.getInductionBinOp(); 3421 assert(InductionBinOp && 3422 (InductionBinOp->getOpcode() == Instruction::FAdd || 3423 InductionBinOp->getOpcode() == Instruction::FSub) && 3424 "Original bin op should be defined for FP induction"); 3425 3426 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3427 Value *MulExp = B.CreateFMul(StepValue, Index); 3428 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3429 "induction"); 3430 } 3431 case InductionDescriptor::IK_NoInduction: 3432 return nullptr; 3433 } 3434 llvm_unreachable("invalid enum"); 3435 } 3436 3437 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3438 LoopScalarBody = OrigLoop->getHeader(); 3439 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3440 LoopExitBlock = OrigLoop->getUniqueExitBlock(); 3441 assert(LoopExitBlock && "Must have an exit block"); 3442 assert(LoopVectorPreHeader && "Invalid loop structure"); 3443 3444 LoopMiddleBlock = 3445 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3446 LI, nullptr, Twine(Prefix) + "middle.block"); 3447 LoopScalarPreHeader = 3448 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3449 nullptr, Twine(Prefix) + "scalar.ph"); 3450 3451 // Set up branch from middle block to the exit and scalar preheader blocks. 3452 // completeLoopSkeleton will update the condition to use an iteration check, 3453 // if required to decide whether to execute the remainder. 3454 BranchInst *BrInst = 3455 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue()); 3456 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3457 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3458 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3459 3460 // We intentionally don't let SplitBlock to update LoopInfo since 3461 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3462 // LoopVectorBody is explicitly added to the correct place few lines later. 3463 LoopVectorBody = 3464 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3465 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3466 3467 // Update dominator for loop exit. 3468 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3469 3470 // Create and register the new vector loop. 3471 Loop *Lp = LI->AllocateLoop(); 3472 Loop *ParentLoop = OrigLoop->getParentLoop(); 3473 3474 // Insert the new loop into the loop nest and register the new basic blocks 3475 // before calling any utilities such as SCEV that require valid LoopInfo. 3476 if (ParentLoop) { 3477 ParentLoop->addChildLoop(Lp); 3478 } else { 3479 LI->addTopLevelLoop(Lp); 3480 } 3481 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3482 return Lp; 3483 } 3484 3485 void InnerLoopVectorizer::createInductionResumeValues( 3486 Loop *L, Value *VectorTripCount, 3487 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3488 assert(VectorTripCount && L && "Expected valid arguments"); 3489 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3490 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3491 "Inconsistent information about additional bypass."); 3492 // We are going to resume the execution of the scalar loop. 3493 // Go over all of the induction variables that we found and fix the 3494 // PHIs that are left in the scalar version of the loop. 3495 // The starting values of PHI nodes depend on the counter of the last 3496 // iteration in the vectorized loop. 3497 // If we come from a bypass edge then we need to start from the original 3498 // start value. 3499 for (auto &InductionEntry : Legal->getInductionVars()) { 3500 PHINode *OrigPhi = InductionEntry.first; 3501 InductionDescriptor II = InductionEntry.second; 3502 3503 // Create phi nodes to merge from the backedge-taken check block. 3504 PHINode *BCResumeVal = 3505 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3506 LoopScalarPreHeader->getTerminator()); 3507 // Copy original phi DL over to the new one. 3508 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3509 Value *&EndValue = IVEndValues[OrigPhi]; 3510 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3511 if (OrigPhi == OldInduction) { 3512 // We know what the end value is. 3513 EndValue = VectorTripCount; 3514 } else { 3515 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3516 3517 // Fast-math-flags propagate from the original induction instruction. 3518 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3519 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3520 3521 Type *StepType = II.getStep()->getType(); 3522 Instruction::CastOps CastOp = 3523 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3524 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3525 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3526 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3527 EndValue->setName("ind.end"); 3528 3529 // Compute the end value for the additional bypass (if applicable). 3530 if (AdditionalBypass.first) { 3531 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3532 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3533 StepType, true); 3534 CRD = 3535 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3536 EndValueFromAdditionalBypass = 3537 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3538 EndValueFromAdditionalBypass->setName("ind.end"); 3539 } 3540 } 3541 // The new PHI merges the original incoming value, in case of a bypass, 3542 // or the value at the end of the vectorized loop. 3543 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3544 3545 // Fix the scalar body counter (PHI node). 3546 // The old induction's phi node in the scalar body needs the truncated 3547 // value. 3548 for (BasicBlock *BB : LoopBypassBlocks) 3549 BCResumeVal->addIncoming(II.getStartValue(), BB); 3550 3551 if (AdditionalBypass.first) 3552 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3553 EndValueFromAdditionalBypass); 3554 3555 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3556 } 3557 } 3558 3559 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3560 MDNode *OrigLoopID) { 3561 assert(L && "Expected valid loop."); 3562 3563 // The trip counts should be cached by now. 3564 Value *Count = getOrCreateTripCount(L); 3565 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3566 3567 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3568 3569 // Add a check in the middle block to see if we have completed 3570 // all of the iterations in the first vector loop. 3571 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3572 // If tail is to be folded, we know we don't need to run the remainder. 3573 if (!Cost->foldTailByMasking()) { 3574 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3575 Count, VectorTripCount, "cmp.n", 3576 LoopMiddleBlock->getTerminator()); 3577 3578 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3579 // of the corresponding compare because they may have ended up with 3580 // different line numbers and we want to avoid awkward line stepping while 3581 // debugging. Eg. if the compare has got a line number inside the loop. 3582 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3583 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3584 } 3585 3586 // Get ready to start creating new instructions into the vectorized body. 3587 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3588 "Inconsistent vector loop preheader"); 3589 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3590 3591 Optional<MDNode *> VectorizedLoopID = 3592 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3593 LLVMLoopVectorizeFollowupVectorized}); 3594 if (VectorizedLoopID.hasValue()) { 3595 L->setLoopID(VectorizedLoopID.getValue()); 3596 3597 // Do not setAlreadyVectorized if loop attributes have been defined 3598 // explicitly. 3599 return LoopVectorPreHeader; 3600 } 3601 3602 // Keep all loop hints from the original loop on the vector loop (we'll 3603 // replace the vectorizer-specific hints below). 3604 if (MDNode *LID = OrigLoop->getLoopID()) 3605 L->setLoopID(LID); 3606 3607 LoopVectorizeHints Hints(L, true, *ORE); 3608 Hints.setAlreadyVectorized(); 3609 3610 #ifdef EXPENSIVE_CHECKS 3611 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3612 LI->verify(*DT); 3613 #endif 3614 3615 return LoopVectorPreHeader; 3616 } 3617 3618 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3619 /* 3620 In this function we generate a new loop. The new loop will contain 3621 the vectorized instructions while the old loop will continue to run the 3622 scalar remainder. 3623 3624 [ ] <-- loop iteration number check. 3625 / | 3626 / v 3627 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3628 | / | 3629 | / v 3630 || [ ] <-- vector pre header. 3631 |/ | 3632 | v 3633 | [ ] \ 3634 | [ ]_| <-- vector loop. 3635 | | 3636 | v 3637 | -[ ] <--- middle-block. 3638 | / | 3639 | / v 3640 -|- >[ ] <--- new preheader. 3641 | | 3642 | v 3643 | [ ] \ 3644 | [ ]_| <-- old scalar loop to handle remainder. 3645 \ | 3646 \ v 3647 >[ ] <-- exit block. 3648 ... 3649 */ 3650 3651 // Get the metadata of the original loop before it gets modified. 3652 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3653 3654 // Workaround! Compute the trip count of the original loop and cache it 3655 // before we start modifying the CFG. This code has a systemic problem 3656 // wherein it tries to run analysis over partially constructed IR; this is 3657 // wrong, and not simply for SCEV. The trip count of the original loop 3658 // simply happens to be prone to hitting this in practice. In theory, we 3659 // can hit the same issue for any SCEV, or ValueTracking query done during 3660 // mutation. See PR49900. 3661 getOrCreateTripCount(OrigLoop); 3662 3663 // Create an empty vector loop, and prepare basic blocks for the runtime 3664 // checks. 3665 Loop *Lp = createVectorLoopSkeleton(""); 3666 3667 // Now, compare the new count to zero. If it is zero skip the vector loop and 3668 // jump to the scalar loop. This check also covers the case where the 3669 // backedge-taken count is uint##_max: adding one to it will overflow leading 3670 // to an incorrect trip count of zero. In this (rare) case we will also jump 3671 // to the scalar loop. 3672 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3673 3674 // Generate the code to check any assumptions that we've made for SCEV 3675 // expressions. 3676 emitSCEVChecks(Lp, LoopScalarPreHeader); 3677 3678 // Generate the code that checks in runtime if arrays overlap. We put the 3679 // checks into a separate block to make the more common case of few elements 3680 // faster. 3681 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3682 3683 // Some loops have a single integer induction variable, while other loops 3684 // don't. One example is c++ iterators that often have multiple pointer 3685 // induction variables. In the code below we also support a case where we 3686 // don't have a single induction variable. 3687 // 3688 // We try to obtain an induction variable from the original loop as hard 3689 // as possible. However if we don't find one that: 3690 // - is an integer 3691 // - counts from zero, stepping by one 3692 // - is the size of the widest induction variable type 3693 // then we create a new one. 3694 OldInduction = Legal->getPrimaryInduction(); 3695 Type *IdxTy = Legal->getWidestInductionType(); 3696 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3697 // The loop step is equal to the vectorization factor (num of SIMD elements) 3698 // times the unroll factor (num of SIMD instructions). 3699 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3700 Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF); 3701 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3702 Induction = 3703 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3704 getDebugLocFromInstOrOperands(OldInduction)); 3705 3706 // Emit phis for the new starting index of the scalar loop. 3707 createInductionResumeValues(Lp, CountRoundDown); 3708 3709 return completeLoopSkeleton(Lp, OrigLoopID); 3710 } 3711 3712 // Fix up external users of the induction variable. At this point, we are 3713 // in LCSSA form, with all external PHIs that use the IV having one input value, 3714 // coming from the remainder loop. We need those PHIs to also have a correct 3715 // value for the IV when arriving directly from the middle block. 3716 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3717 const InductionDescriptor &II, 3718 Value *CountRoundDown, Value *EndValue, 3719 BasicBlock *MiddleBlock) { 3720 // There are two kinds of external IV usages - those that use the value 3721 // computed in the last iteration (the PHI) and those that use the penultimate 3722 // value (the value that feeds into the phi from the loop latch). 3723 // We allow both, but they, obviously, have different values. 3724 3725 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3726 3727 DenseMap<Value *, Value *> MissingVals; 3728 3729 // An external user of the last iteration's value should see the value that 3730 // the remainder loop uses to initialize its own IV. 3731 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3732 for (User *U : PostInc->users()) { 3733 Instruction *UI = cast<Instruction>(U); 3734 if (!OrigLoop->contains(UI)) { 3735 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3736 MissingVals[UI] = EndValue; 3737 } 3738 } 3739 3740 // An external user of the penultimate value need to see EndValue - Step. 3741 // The simplest way to get this is to recompute it from the constituent SCEVs, 3742 // that is Start + (Step * (CRD - 1)). 3743 for (User *U : OrigPhi->users()) { 3744 auto *UI = cast<Instruction>(U); 3745 if (!OrigLoop->contains(UI)) { 3746 const DataLayout &DL = 3747 OrigLoop->getHeader()->getModule()->getDataLayout(); 3748 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3749 3750 IRBuilder<> B(MiddleBlock->getTerminator()); 3751 3752 // Fast-math-flags propagate from the original induction instruction. 3753 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3754 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3755 3756 Value *CountMinusOne = B.CreateSub( 3757 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3758 Value *CMO = 3759 !II.getStep()->getType()->isIntegerTy() 3760 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3761 II.getStep()->getType()) 3762 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3763 CMO->setName("cast.cmo"); 3764 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3765 Escape->setName("ind.escape"); 3766 MissingVals[UI] = Escape; 3767 } 3768 } 3769 3770 for (auto &I : MissingVals) { 3771 PHINode *PHI = cast<PHINode>(I.first); 3772 // One corner case we have to handle is two IVs "chasing" each-other, 3773 // that is %IV2 = phi [...], [ %IV1, %latch ] 3774 // In this case, if IV1 has an external use, we need to avoid adding both 3775 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3776 // don't already have an incoming value for the middle block. 3777 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3778 PHI->addIncoming(I.second, MiddleBlock); 3779 } 3780 } 3781 3782 namespace { 3783 3784 struct CSEDenseMapInfo { 3785 static bool canHandle(const Instruction *I) { 3786 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3787 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3788 } 3789 3790 static inline Instruction *getEmptyKey() { 3791 return DenseMapInfo<Instruction *>::getEmptyKey(); 3792 } 3793 3794 static inline Instruction *getTombstoneKey() { 3795 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3796 } 3797 3798 static unsigned getHashValue(const Instruction *I) { 3799 assert(canHandle(I) && "Unknown instruction!"); 3800 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3801 I->value_op_end())); 3802 } 3803 3804 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3805 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3806 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3807 return LHS == RHS; 3808 return LHS->isIdenticalTo(RHS); 3809 } 3810 }; 3811 3812 } // end anonymous namespace 3813 3814 ///Perform cse of induction variable instructions. 3815 static void cse(BasicBlock *BB) { 3816 // Perform simple cse. 3817 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3818 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3819 Instruction *In = &*I++; 3820 3821 if (!CSEDenseMapInfo::canHandle(In)) 3822 continue; 3823 3824 // Check if we can replace this instruction with any of the 3825 // visited instructions. 3826 if (Instruction *V = CSEMap.lookup(In)) { 3827 In->replaceAllUsesWith(V); 3828 In->eraseFromParent(); 3829 continue; 3830 } 3831 3832 CSEMap[In] = In; 3833 } 3834 } 3835 3836 InstructionCost 3837 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3838 bool &NeedToScalarize) const { 3839 Function *F = CI->getCalledFunction(); 3840 Type *ScalarRetTy = CI->getType(); 3841 SmallVector<Type *, 4> Tys, ScalarTys; 3842 for (auto &ArgOp : CI->arg_operands()) 3843 ScalarTys.push_back(ArgOp->getType()); 3844 3845 // Estimate cost of scalarized vector call. The source operands are assumed 3846 // to be vectors, so we need to extract individual elements from there, 3847 // execute VF scalar calls, and then gather the result into the vector return 3848 // value. 3849 InstructionCost ScalarCallCost = 3850 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3851 if (VF.isScalar()) 3852 return ScalarCallCost; 3853 3854 // Compute corresponding vector type for return value and arguments. 3855 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3856 for (Type *ScalarTy : ScalarTys) 3857 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3858 3859 // Compute costs of unpacking argument values for the scalar calls and 3860 // packing the return values to a vector. 3861 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3862 3863 InstructionCost Cost = 3864 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3865 3866 // If we can't emit a vector call for this function, then the currently found 3867 // cost is the cost we need to return. 3868 NeedToScalarize = true; 3869 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3870 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3871 3872 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3873 return Cost; 3874 3875 // If the corresponding vector cost is cheaper, return its cost. 3876 InstructionCost VectorCallCost = 3877 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3878 if (VectorCallCost < Cost) { 3879 NeedToScalarize = false; 3880 Cost = VectorCallCost; 3881 } 3882 return Cost; 3883 } 3884 3885 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3886 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3887 return Elt; 3888 return VectorType::get(Elt, VF); 3889 } 3890 3891 InstructionCost 3892 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3893 ElementCount VF) const { 3894 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3895 assert(ID && "Expected intrinsic call!"); 3896 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3897 FastMathFlags FMF; 3898 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3899 FMF = FPMO->getFastMathFlags(); 3900 3901 SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end()); 3902 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3903 SmallVector<Type *> ParamTys; 3904 std::transform(FTy->param_begin(), FTy->param_end(), 3905 std::back_inserter(ParamTys), 3906 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3907 3908 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3909 dyn_cast<IntrinsicInst>(CI)); 3910 return TTI.getIntrinsicInstrCost(CostAttrs, 3911 TargetTransformInfo::TCK_RecipThroughput); 3912 } 3913 3914 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3915 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3916 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3917 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3918 } 3919 3920 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3921 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3922 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3923 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3924 } 3925 3926 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3927 // For every instruction `I` in MinBWs, truncate the operands, create a 3928 // truncated version of `I` and reextend its result. InstCombine runs 3929 // later and will remove any ext/trunc pairs. 3930 SmallPtrSet<Value *, 4> Erased; 3931 for (const auto &KV : Cost->getMinimalBitwidths()) { 3932 // If the value wasn't vectorized, we must maintain the original scalar 3933 // type. The absence of the value from State indicates that it 3934 // wasn't vectorized. 3935 VPValue *Def = State.Plan->getVPValue(KV.first); 3936 if (!State.hasAnyVectorValue(Def)) 3937 continue; 3938 for (unsigned Part = 0; Part < UF; ++Part) { 3939 Value *I = State.get(Def, Part); 3940 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3941 continue; 3942 Type *OriginalTy = I->getType(); 3943 Type *ScalarTruncatedTy = 3944 IntegerType::get(OriginalTy->getContext(), KV.second); 3945 auto *TruncatedTy = FixedVectorType::get( 3946 ScalarTruncatedTy, 3947 cast<FixedVectorType>(OriginalTy)->getNumElements()); 3948 if (TruncatedTy == OriginalTy) 3949 continue; 3950 3951 IRBuilder<> B(cast<Instruction>(I)); 3952 auto ShrinkOperand = [&](Value *V) -> Value * { 3953 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3954 if (ZI->getSrcTy() == TruncatedTy) 3955 return ZI->getOperand(0); 3956 return B.CreateZExtOrTrunc(V, TruncatedTy); 3957 }; 3958 3959 // The actual instruction modification depends on the instruction type, 3960 // unfortunately. 3961 Value *NewI = nullptr; 3962 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3963 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3964 ShrinkOperand(BO->getOperand(1))); 3965 3966 // Any wrapping introduced by shrinking this operation shouldn't be 3967 // considered undefined behavior. So, we can't unconditionally copy 3968 // arithmetic wrapping flags to NewI. 3969 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3970 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3971 NewI = 3972 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3973 ShrinkOperand(CI->getOperand(1))); 3974 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3975 NewI = B.CreateSelect(SI->getCondition(), 3976 ShrinkOperand(SI->getTrueValue()), 3977 ShrinkOperand(SI->getFalseValue())); 3978 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3979 switch (CI->getOpcode()) { 3980 default: 3981 llvm_unreachable("Unhandled cast!"); 3982 case Instruction::Trunc: 3983 NewI = ShrinkOperand(CI->getOperand(0)); 3984 break; 3985 case Instruction::SExt: 3986 NewI = B.CreateSExtOrTrunc( 3987 CI->getOperand(0), 3988 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3989 break; 3990 case Instruction::ZExt: 3991 NewI = B.CreateZExtOrTrunc( 3992 CI->getOperand(0), 3993 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3994 break; 3995 } 3996 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3997 auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType()) 3998 ->getNumElements(); 3999 auto *O0 = B.CreateZExtOrTrunc( 4000 SI->getOperand(0), 4001 FixedVectorType::get(ScalarTruncatedTy, Elements0)); 4002 auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType()) 4003 ->getNumElements(); 4004 auto *O1 = B.CreateZExtOrTrunc( 4005 SI->getOperand(1), 4006 FixedVectorType::get(ScalarTruncatedTy, Elements1)); 4007 4008 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 4009 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 4010 // Don't do anything with the operands, just extend the result. 4011 continue; 4012 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 4013 auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType()) 4014 ->getNumElements(); 4015 auto *O0 = B.CreateZExtOrTrunc( 4016 IE->getOperand(0), 4017 FixedVectorType::get(ScalarTruncatedTy, Elements)); 4018 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 4019 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 4020 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 4021 auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType()) 4022 ->getNumElements(); 4023 auto *O0 = B.CreateZExtOrTrunc( 4024 EE->getOperand(0), 4025 FixedVectorType::get(ScalarTruncatedTy, Elements)); 4026 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 4027 } else { 4028 // If we don't know what to do, be conservative and don't do anything. 4029 continue; 4030 } 4031 4032 // Lastly, extend the result. 4033 NewI->takeName(cast<Instruction>(I)); 4034 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 4035 I->replaceAllUsesWith(Res); 4036 cast<Instruction>(I)->eraseFromParent(); 4037 Erased.insert(I); 4038 State.reset(Def, Res, Part); 4039 } 4040 } 4041 4042 // We'll have created a bunch of ZExts that are now parentless. Clean up. 4043 for (const auto &KV : Cost->getMinimalBitwidths()) { 4044 // If the value wasn't vectorized, we must maintain the original scalar 4045 // type. The absence of the value from State indicates that it 4046 // wasn't vectorized. 4047 VPValue *Def = State.Plan->getVPValue(KV.first); 4048 if (!State.hasAnyVectorValue(Def)) 4049 continue; 4050 for (unsigned Part = 0; Part < UF; ++Part) { 4051 Value *I = State.get(Def, Part); 4052 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 4053 if (Inst && Inst->use_empty()) { 4054 Value *NewI = Inst->getOperand(0); 4055 Inst->eraseFromParent(); 4056 State.reset(Def, NewI, Part); 4057 } 4058 } 4059 } 4060 } 4061 4062 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 4063 // Insert truncates and extends for any truncated instructions as hints to 4064 // InstCombine. 4065 if (VF.isVector()) 4066 truncateToMinimalBitwidths(State); 4067 4068 // Fix widened non-induction PHIs by setting up the PHI operands. 4069 if (OrigPHIsToFix.size()) { 4070 assert(EnableVPlanNativePath && 4071 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 4072 fixNonInductionPHIs(State); 4073 } 4074 4075 // At this point every instruction in the original loop is widened to a 4076 // vector form. Now we need to fix the recurrences in the loop. These PHI 4077 // nodes are currently empty because we did not want to introduce cycles. 4078 // This is the second stage of vectorizing recurrences. 4079 fixCrossIterationPHIs(State); 4080 4081 // Forget the original basic block. 4082 PSE.getSE()->forgetLoop(OrigLoop); 4083 4084 // Fix-up external users of the induction variables. 4085 for (auto &Entry : Legal->getInductionVars()) 4086 fixupIVUsers(Entry.first, Entry.second, 4087 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4088 IVEndValues[Entry.first], LoopMiddleBlock); 4089 4090 fixLCSSAPHIs(State); 4091 for (Instruction *PI : PredicatedInstructions) 4092 sinkScalarOperands(&*PI); 4093 4094 // Remove redundant induction instructions. 4095 cse(LoopVectorBody); 4096 4097 // Set/update profile weights for the vector and remainder loops as original 4098 // loop iterations are now distributed among them. Note that original loop 4099 // represented by LoopScalarBody becomes remainder loop after vectorization. 4100 // 4101 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 4102 // end up getting slightly roughened result but that should be OK since 4103 // profile is not inherently precise anyway. Note also possible bypass of 4104 // vector code caused by legality checks is ignored, assigning all the weight 4105 // to the vector loop, optimistically. 4106 // 4107 // For scalable vectorization we can't know at compile time how many iterations 4108 // of the loop are handled in one vector iteration, so instead assume a pessimistic 4109 // vscale of '1'. 4110 setProfileInfoAfterUnrolling( 4111 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 4112 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 4113 } 4114 4115 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 4116 // In order to support recurrences we need to be able to vectorize Phi nodes. 4117 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4118 // stage #2: We now need to fix the recurrences by adding incoming edges to 4119 // the currently empty PHI nodes. At this point every instruction in the 4120 // original loop is widened to a vector form so we can use them to construct 4121 // the incoming edges. 4122 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); 4123 for (VPRecipeBase &R : Header->phis()) { 4124 auto *PhiR = dyn_cast<VPWidenPHIRecipe>(&R); 4125 if (!PhiR) 4126 continue; 4127 auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 4128 if (PhiR->getRecurrenceDescriptor()) { 4129 fixReduction(PhiR, State); 4130 } else if (Legal->isFirstOrderRecurrence(OrigPhi)) 4131 fixFirstOrderRecurrence(OrigPhi, State); 4132 } 4133 } 4134 4135 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi, 4136 VPTransformState &State) { 4137 // This is the second phase of vectorizing first-order recurrences. An 4138 // overview of the transformation is described below. Suppose we have the 4139 // following loop. 4140 // 4141 // for (int i = 0; i < n; ++i) 4142 // b[i] = a[i] - a[i - 1]; 4143 // 4144 // There is a first-order recurrence on "a". For this loop, the shorthand 4145 // scalar IR looks like: 4146 // 4147 // scalar.ph: 4148 // s_init = a[-1] 4149 // br scalar.body 4150 // 4151 // scalar.body: 4152 // i = phi [0, scalar.ph], [i+1, scalar.body] 4153 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4154 // s2 = a[i] 4155 // b[i] = s2 - s1 4156 // br cond, scalar.body, ... 4157 // 4158 // In this example, s1 is a recurrence because it's value depends on the 4159 // previous iteration. In the first phase of vectorization, we created a 4160 // temporary value for s1. We now complete the vectorization and produce the 4161 // shorthand vector IR shown below (for VF = 4, UF = 1). 4162 // 4163 // vector.ph: 4164 // v_init = vector(..., ..., ..., a[-1]) 4165 // br vector.body 4166 // 4167 // vector.body 4168 // i = phi [0, vector.ph], [i+4, vector.body] 4169 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4170 // v2 = a[i, i+1, i+2, i+3]; 4171 // v3 = vector(v1(3), v2(0, 1, 2)) 4172 // b[i, i+1, i+2, i+3] = v2 - v3 4173 // br cond, vector.body, middle.block 4174 // 4175 // middle.block: 4176 // x = v2(3) 4177 // br scalar.ph 4178 // 4179 // scalar.ph: 4180 // s_init = phi [x, middle.block], [a[-1], otherwise] 4181 // br scalar.body 4182 // 4183 // After execution completes the vector loop, we extract the next value of 4184 // the recurrence (x) to use as the initial value in the scalar loop. 4185 4186 // Get the original loop preheader and single loop latch. 4187 auto *Preheader = OrigLoop->getLoopPreheader(); 4188 auto *Latch = OrigLoop->getLoopLatch(); 4189 4190 // Get the initial and previous values of the scalar recurrence. 4191 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4192 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4193 4194 auto *IdxTy = Builder.getInt32Ty(); 4195 auto *One = ConstantInt::get(IdxTy, 1); 4196 4197 // Create a vector from the initial value. 4198 auto *VectorInit = ScalarInit; 4199 if (VF.isVector()) { 4200 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4201 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4202 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 4203 VectorInit = Builder.CreateInsertElement( 4204 PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), 4205 VectorInit, LastIdx, "vector.recur.init"); 4206 } 4207 4208 VPValue *PhiDef = State.Plan->getVPValue(Phi); 4209 VPValue *PreviousDef = State.Plan->getVPValue(Previous); 4210 // We constructed a temporary phi node in the first phase of vectorization. 4211 // This phi node will eventually be deleted. 4212 Builder.SetInsertPoint(cast<Instruction>(State.get(PhiDef, 0))); 4213 4214 // Create a phi node for the new recurrence. The current value will either be 4215 // the initial value inserted into a vector or loop-varying vector value. 4216 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4217 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4218 4219 // Get the vectorized previous value of the last part UF - 1. It appears last 4220 // among all unrolled iterations, due to the order of their construction. 4221 Value *PreviousLastPart = State.get(PreviousDef, UF - 1); 4222 4223 // Find and set the insertion point after the previous value if it is an 4224 // instruction. 4225 BasicBlock::iterator InsertPt; 4226 // Note that the previous value may have been constant-folded so it is not 4227 // guaranteed to be an instruction in the vector loop. 4228 // FIXME: Loop invariant values do not form recurrences. We should deal with 4229 // them earlier. 4230 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart)) 4231 InsertPt = LoopVectorBody->getFirstInsertionPt(); 4232 else { 4233 Instruction *PreviousInst = cast<Instruction>(PreviousLastPart); 4234 if (isa<PHINode>(PreviousLastPart)) 4235 // If the previous value is a phi node, we should insert after all the phi 4236 // nodes in the block containing the PHI to avoid breaking basic block 4237 // verification. Note that the basic block may be different to 4238 // LoopVectorBody, in case we predicate the loop. 4239 InsertPt = PreviousInst->getParent()->getFirstInsertionPt(); 4240 else 4241 InsertPt = ++PreviousInst->getIterator(); 4242 } 4243 Builder.SetInsertPoint(&*InsertPt); 4244 4245 // The vector from which to take the initial value for the current iteration 4246 // (actual or unrolled). Initially, this is the vector phi node. 4247 Value *Incoming = VecPhi; 4248 4249 // Shuffle the current and previous vector and update the vector parts. 4250 for (unsigned Part = 0; Part < UF; ++Part) { 4251 Value *PreviousPart = State.get(PreviousDef, Part); 4252 Value *PhiPart = State.get(PhiDef, Part); 4253 auto *Shuffle = VF.isVector() 4254 ? Builder.CreateVectorSplice(Incoming, PreviousPart, -1) 4255 : Incoming; 4256 PhiPart->replaceAllUsesWith(Shuffle); 4257 cast<Instruction>(PhiPart)->eraseFromParent(); 4258 State.reset(PhiDef, Shuffle, Part); 4259 Incoming = PreviousPart; 4260 } 4261 4262 // Fix the latch value of the new recurrence in the vector loop. 4263 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4264 4265 // Extract the last vector element in the middle block. This will be the 4266 // initial value for the recurrence when jumping to the scalar loop. 4267 auto *ExtractForScalar = Incoming; 4268 if (VF.isVector()) { 4269 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4270 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4271 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 4272 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 4273 "vector.recur.extract"); 4274 } 4275 // Extract the second last element in the middle block if the 4276 // Phi is used outside the loop. We need to extract the phi itself 4277 // and not the last element (the phi update in the current iteration). This 4278 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4279 // when the scalar loop is not run at all. 4280 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4281 if (VF.isVector()) { 4282 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4283 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 4284 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4285 Incoming, Idx, "vector.recur.extract.for.phi"); 4286 } else if (UF > 1) 4287 // When loop is unrolled without vectorizing, initialize 4288 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 4289 // of `Incoming`. This is analogous to the vectorized case above: extracting 4290 // the second last element when VF > 1. 4291 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4292 4293 // Fix the initial value of the original recurrence in the scalar loop. 4294 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4295 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4296 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4297 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4298 Start->addIncoming(Incoming, BB); 4299 } 4300 4301 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4302 Phi->setName("scalar.recur"); 4303 4304 // Finally, fix users of the recurrence outside the loop. The users will need 4305 // either the last value of the scalar recurrence or the last value of the 4306 // vector recurrence we extracted in the middle block. Since the loop is in 4307 // LCSSA form, we just need to find all the phi nodes for the original scalar 4308 // recurrence in the exit block, and then add an edge for the middle block. 4309 // Note that LCSSA does not imply single entry when the original scalar loop 4310 // had multiple exiting edges (as we always run the last iteration in the 4311 // scalar epilogue); in that case, the exiting path through middle will be 4312 // dynamically dead and the value picked for the phi doesn't matter. 4313 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4314 if (any_of(LCSSAPhi.incoming_values(), 4315 [Phi](Value *V) { return V == Phi; })) 4316 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4317 } 4318 4319 static bool useOrderedReductions(RecurrenceDescriptor &RdxDesc) { 4320 return EnableStrictReductions && RdxDesc.isOrdered(); 4321 } 4322 4323 void InnerLoopVectorizer::fixReduction(VPWidenPHIRecipe *PhiR, 4324 VPTransformState &State) { 4325 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 4326 // Get it's reduction variable descriptor. 4327 assert(Legal->isReductionVariable(OrigPhi) && 4328 "Unable to find the reduction variable"); 4329 RecurrenceDescriptor RdxDesc = *PhiR->getRecurrenceDescriptor(); 4330 4331 RecurKind RK = RdxDesc.getRecurrenceKind(); 4332 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4333 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4334 setDebugLocFromInst(Builder, ReductionStartValue); 4335 bool IsInLoopReductionPhi = Cost->isInLoopReduction(OrigPhi); 4336 4337 VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst); 4338 // This is the vector-clone of the value that leaves the loop. 4339 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4340 4341 // Wrap flags are in general invalid after vectorization, clear them. 4342 clearReductionWrapFlags(RdxDesc, State); 4343 4344 // Fix the vector-loop phi. 4345 4346 // Reductions do not have to start at zero. They can start with 4347 // any loop invariant values. 4348 BasicBlock *VectorLoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4349 4350 bool IsOrdered = State.VF.isVector() && IsInLoopReductionPhi && 4351 useOrderedReductions(RdxDesc); 4352 4353 for (unsigned Part = 0; Part < UF; ++Part) { 4354 if (IsOrdered && Part > 0) 4355 break; 4356 Value *VecRdxPhi = State.get(PhiR->getVPSingleValue(), Part); 4357 Value *Val = State.get(PhiR->getBackedgeValue(), Part); 4358 if (IsOrdered) 4359 Val = State.get(PhiR->getBackedgeValue(), UF - 1); 4360 4361 cast<PHINode>(VecRdxPhi)->addIncoming(Val, VectorLoopLatch); 4362 } 4363 4364 // Before each round, move the insertion point right between 4365 // the PHIs and the values we are going to write. 4366 // This allows us to write both PHINodes and the extractelement 4367 // instructions. 4368 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4369 4370 setDebugLocFromInst(Builder, LoopExitInst); 4371 4372 Type *PhiTy = OrigPhi->getType(); 4373 // If tail is folded by masking, the vector value to leave the loop should be 4374 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4375 // instead of the former. For an inloop reduction the reduction will already 4376 // be predicated, and does not need to be handled here. 4377 if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) { 4378 for (unsigned Part = 0; Part < UF; ++Part) { 4379 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4380 Value *Sel = nullptr; 4381 for (User *U : VecLoopExitInst->users()) { 4382 if (isa<SelectInst>(U)) { 4383 assert(!Sel && "Reduction exit feeding two selects"); 4384 Sel = U; 4385 } else 4386 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4387 } 4388 assert(Sel && "Reduction exit feeds no select"); 4389 State.reset(LoopExitInstDef, Sel, Part); 4390 4391 // If the target can create a predicated operator for the reduction at no 4392 // extra cost in the loop (for example a predicated vadd), it can be 4393 // cheaper for the select to remain in the loop than be sunk out of it, 4394 // and so use the select value for the phi instead of the old 4395 // LoopExitValue. 4396 if (PreferPredicatedReductionSelect || 4397 TTI->preferPredicatedReductionSelect( 4398 RdxDesc.getOpcode(), PhiTy, 4399 TargetTransformInfo::ReductionFlags())) { 4400 auto *VecRdxPhi = 4401 cast<PHINode>(State.get(PhiR->getVPSingleValue(), Part)); 4402 VecRdxPhi->setIncomingValueForBlock( 4403 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4404 } 4405 } 4406 } 4407 4408 // If the vector reduction can be performed in a smaller type, we truncate 4409 // then extend the loop exit value to enable InstCombine to evaluate the 4410 // entire expression in the smaller type. 4411 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 4412 assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!"); 4413 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4414 Builder.SetInsertPoint( 4415 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4416 VectorParts RdxParts(UF); 4417 for (unsigned Part = 0; Part < UF; ++Part) { 4418 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4419 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4420 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4421 : Builder.CreateZExt(Trunc, VecTy); 4422 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 4423 UI != RdxParts[Part]->user_end();) 4424 if (*UI != Trunc) { 4425 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 4426 RdxParts[Part] = Extnd; 4427 } else { 4428 ++UI; 4429 } 4430 } 4431 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4432 for (unsigned Part = 0; Part < UF; ++Part) { 4433 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4434 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4435 } 4436 } 4437 4438 // Reduce all of the unrolled parts into a single vector. 4439 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4440 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4441 4442 // The middle block terminator has already been assigned a DebugLoc here (the 4443 // OrigLoop's single latch terminator). We want the whole middle block to 4444 // appear to execute on this line because: (a) it is all compiler generated, 4445 // (b) these instructions are always executed after evaluating the latch 4446 // conditional branch, and (c) other passes may add new predecessors which 4447 // terminate on this line. This is the easiest way to ensure we don't 4448 // accidentally cause an extra step back into the loop while debugging. 4449 setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator()); 4450 if (IsOrdered) 4451 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 4452 else { 4453 // Floating-point operations should have some FMF to enable the reduction. 4454 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4455 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4456 for (unsigned Part = 1; Part < UF; ++Part) { 4457 Value *RdxPart = State.get(LoopExitInstDef, Part); 4458 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4459 ReducedPartRdx = Builder.CreateBinOp( 4460 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4461 } else { 4462 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4463 } 4464 } 4465 } 4466 4467 // Create the reduction after the loop. Note that inloop reductions create the 4468 // target reduction in the loop using a Reduction recipe. 4469 if (VF.isVector() && !IsInLoopReductionPhi) { 4470 ReducedPartRdx = 4471 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx); 4472 // If the reduction can be performed in a smaller type, we need to extend 4473 // the reduction to the wider type before we branch to the original loop. 4474 if (PhiTy != RdxDesc.getRecurrenceType()) 4475 ReducedPartRdx = RdxDesc.isSigned() 4476 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4477 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4478 } 4479 4480 // Create a phi node that merges control-flow from the backedge-taken check 4481 // block and the middle block. 4482 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4483 LoopScalarPreHeader->getTerminator()); 4484 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4485 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4486 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4487 4488 // Now, we need to fix the users of the reduction variable 4489 // inside and outside of the scalar remainder loop. 4490 4491 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4492 // in the exit blocks. See comment on analogous loop in 4493 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4494 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4495 if (any_of(LCSSAPhi.incoming_values(), 4496 [LoopExitInst](Value *V) { return V == LoopExitInst; })) 4497 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4498 4499 // Fix the scalar loop reduction variable with the incoming reduction sum 4500 // from the vector body and from the backedge value. 4501 int IncomingEdgeBlockIdx = 4502 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4503 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4504 // Pick the other block. 4505 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4506 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4507 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4508 } 4509 4510 void InnerLoopVectorizer::clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc, 4511 VPTransformState &State) { 4512 RecurKind RK = RdxDesc.getRecurrenceKind(); 4513 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4514 return; 4515 4516 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4517 assert(LoopExitInstr && "null loop exit instruction"); 4518 SmallVector<Instruction *, 8> Worklist; 4519 SmallPtrSet<Instruction *, 8> Visited; 4520 Worklist.push_back(LoopExitInstr); 4521 Visited.insert(LoopExitInstr); 4522 4523 while (!Worklist.empty()) { 4524 Instruction *Cur = Worklist.pop_back_val(); 4525 if (isa<OverflowingBinaryOperator>(Cur)) 4526 for (unsigned Part = 0; Part < UF; ++Part) { 4527 Value *V = State.get(State.Plan->getVPValue(Cur), Part); 4528 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4529 } 4530 4531 for (User *U : Cur->users()) { 4532 Instruction *UI = cast<Instruction>(U); 4533 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4534 Visited.insert(UI).second) 4535 Worklist.push_back(UI); 4536 } 4537 } 4538 } 4539 4540 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4541 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4542 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4543 // Some phis were already hand updated by the reduction and recurrence 4544 // code above, leave them alone. 4545 continue; 4546 4547 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4548 // Non-instruction incoming values will have only one value. 4549 4550 VPLane Lane = VPLane::getFirstLane(); 4551 if (isa<Instruction>(IncomingValue) && 4552 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4553 VF)) 4554 Lane = VPLane::getLastLaneForVF(VF); 4555 4556 // Can be a loop invariant incoming value or the last scalar value to be 4557 // extracted from the vectorized loop. 4558 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4559 Value *lastIncomingValue = 4560 OrigLoop->isLoopInvariant(IncomingValue) 4561 ? IncomingValue 4562 : State.get(State.Plan->getVPValue(IncomingValue), 4563 VPIteration(UF - 1, Lane)); 4564 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4565 } 4566 } 4567 4568 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4569 // The basic block and loop containing the predicated instruction. 4570 auto *PredBB = PredInst->getParent(); 4571 auto *VectorLoop = LI->getLoopFor(PredBB); 4572 4573 // Initialize a worklist with the operands of the predicated instruction. 4574 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4575 4576 // Holds instructions that we need to analyze again. An instruction may be 4577 // reanalyzed if we don't yet know if we can sink it or not. 4578 SmallVector<Instruction *, 8> InstsToReanalyze; 4579 4580 // Returns true if a given use occurs in the predicated block. Phi nodes use 4581 // their operands in their corresponding predecessor blocks. 4582 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4583 auto *I = cast<Instruction>(U.getUser()); 4584 BasicBlock *BB = I->getParent(); 4585 if (auto *Phi = dyn_cast<PHINode>(I)) 4586 BB = Phi->getIncomingBlock( 4587 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4588 return BB == PredBB; 4589 }; 4590 4591 // Iteratively sink the scalarized operands of the predicated instruction 4592 // into the block we created for it. When an instruction is sunk, it's 4593 // operands are then added to the worklist. The algorithm ends after one pass 4594 // through the worklist doesn't sink a single instruction. 4595 bool Changed; 4596 do { 4597 // Add the instructions that need to be reanalyzed to the worklist, and 4598 // reset the changed indicator. 4599 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4600 InstsToReanalyze.clear(); 4601 Changed = false; 4602 4603 while (!Worklist.empty()) { 4604 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4605 4606 // We can't sink an instruction if it is a phi node, is not in the loop, 4607 // or may have side effects. 4608 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4609 I->mayHaveSideEffects()) 4610 continue; 4611 4612 // If the instruction is already in PredBB, check if we can sink its 4613 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4614 // sinking the scalar instruction I, hence it appears in PredBB; but it 4615 // may have failed to sink I's operands (recursively), which we try 4616 // (again) here. 4617 if (I->getParent() == PredBB) { 4618 Worklist.insert(I->op_begin(), I->op_end()); 4619 continue; 4620 } 4621 4622 // It's legal to sink the instruction if all its uses occur in the 4623 // predicated block. Otherwise, there's nothing to do yet, and we may 4624 // need to reanalyze the instruction. 4625 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4626 InstsToReanalyze.push_back(I); 4627 continue; 4628 } 4629 4630 // Move the instruction to the beginning of the predicated block, and add 4631 // it's operands to the worklist. 4632 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4633 Worklist.insert(I->op_begin(), I->op_end()); 4634 4635 // The sinking may have enabled other instructions to be sunk, so we will 4636 // need to iterate. 4637 Changed = true; 4638 } 4639 } while (Changed); 4640 } 4641 4642 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4643 for (PHINode *OrigPhi : OrigPHIsToFix) { 4644 VPWidenPHIRecipe *VPPhi = 4645 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4646 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4647 // Make sure the builder has a valid insert point. 4648 Builder.SetInsertPoint(NewPhi); 4649 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4650 VPValue *Inc = VPPhi->getIncomingValue(i); 4651 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4652 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4653 } 4654 } 4655 } 4656 4657 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, 4658 VPUser &Operands, unsigned UF, 4659 ElementCount VF, bool IsPtrLoopInvariant, 4660 SmallBitVector &IsIndexLoopInvariant, 4661 VPTransformState &State) { 4662 // Construct a vector GEP by widening the operands of the scalar GEP as 4663 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4664 // results in a vector of pointers when at least one operand of the GEP 4665 // is vector-typed. Thus, to keep the representation compact, we only use 4666 // vector-typed operands for loop-varying values. 4667 4668 if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4669 // If we are vectorizing, but the GEP has only loop-invariant operands, 4670 // the GEP we build (by only using vector-typed operands for 4671 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4672 // produce a vector of pointers, we need to either arbitrarily pick an 4673 // operand to broadcast, or broadcast a clone of the original GEP. 4674 // Here, we broadcast a clone of the original. 4675 // 4676 // TODO: If at some point we decide to scalarize instructions having 4677 // loop-invariant operands, this special case will no longer be 4678 // required. We would add the scalarization decision to 4679 // collectLoopScalars() and teach getVectorValue() to broadcast 4680 // the lane-zero scalar value. 4681 auto *Clone = Builder.Insert(GEP->clone()); 4682 for (unsigned Part = 0; Part < UF; ++Part) { 4683 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4684 State.set(VPDef, EntryPart, Part); 4685 addMetadata(EntryPart, GEP); 4686 } 4687 } else { 4688 // If the GEP has at least one loop-varying operand, we are sure to 4689 // produce a vector of pointers. But if we are only unrolling, we want 4690 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4691 // produce with the code below will be scalar (if VF == 1) or vector 4692 // (otherwise). Note that for the unroll-only case, we still maintain 4693 // values in the vector mapping with initVector, as we do for other 4694 // instructions. 4695 for (unsigned Part = 0; Part < UF; ++Part) { 4696 // The pointer operand of the new GEP. If it's loop-invariant, we 4697 // won't broadcast it. 4698 auto *Ptr = IsPtrLoopInvariant 4699 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 4700 : State.get(Operands.getOperand(0), Part); 4701 4702 // Collect all the indices for the new GEP. If any index is 4703 // loop-invariant, we won't broadcast it. 4704 SmallVector<Value *, 4> Indices; 4705 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { 4706 VPValue *Operand = Operands.getOperand(I); 4707 if (IsIndexLoopInvariant[I - 1]) 4708 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 4709 else 4710 Indices.push_back(State.get(Operand, Part)); 4711 } 4712 4713 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4714 // but it should be a vector, otherwise. 4715 auto *NewGEP = 4716 GEP->isInBounds() 4717 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4718 Indices) 4719 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4720 assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) && 4721 "NewGEP is not a pointer vector"); 4722 State.set(VPDef, NewGEP, Part); 4723 addMetadata(NewGEP, GEP); 4724 } 4725 } 4726 } 4727 4728 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4729 RecurrenceDescriptor *RdxDesc, 4730 VPWidenPHIRecipe *PhiR, 4731 VPTransformState &State) { 4732 PHINode *P = cast<PHINode>(PN); 4733 if (EnableVPlanNativePath) { 4734 // Currently we enter here in the VPlan-native path for non-induction 4735 // PHIs where all control flow is uniform. We simply widen these PHIs. 4736 // Create a vector phi with no operands - the vector phi operands will be 4737 // set at the end of vector code generation. 4738 Type *VecTy = (State.VF.isScalar()) 4739 ? PN->getType() 4740 : VectorType::get(PN->getType(), State.VF); 4741 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4742 State.set(PhiR, VecPhi, 0); 4743 OrigPHIsToFix.push_back(P); 4744 4745 return; 4746 } 4747 4748 assert(PN->getParent() == OrigLoop->getHeader() && 4749 "Non-header phis should have been handled elsewhere"); 4750 4751 VPValue *StartVPV = PhiR->getStartValue(); 4752 Value *StartV = StartVPV ? StartVPV->getLiveInIRValue() : nullptr; 4753 // In order to support recurrences we need to be able to vectorize Phi nodes. 4754 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4755 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4756 // this value when we vectorize all of the instructions that use the PHI. 4757 if (RdxDesc || Legal->isFirstOrderRecurrence(P)) { 4758 Value *Iden = nullptr; 4759 bool ScalarPHI = 4760 (State.VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN)); 4761 Type *VecTy = 4762 ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF); 4763 4764 if (RdxDesc) { 4765 assert(Legal->isReductionVariable(P) && StartV && 4766 "RdxDesc should only be set for reduction variables; in that case " 4767 "a StartV is also required"); 4768 RecurKind RK = RdxDesc->getRecurrenceKind(); 4769 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) { 4770 // MinMax reduction have the start value as their identify. 4771 if (ScalarPHI) { 4772 Iden = StartV; 4773 } else { 4774 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4775 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4776 StartV = Iden = 4777 Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident"); 4778 } 4779 } else { 4780 Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity( 4781 RK, VecTy->getScalarType(), RdxDesc->getFastMathFlags()); 4782 Iden = IdenC; 4783 4784 if (!ScalarPHI) { 4785 Iden = ConstantVector::getSplat(State.VF, IdenC); 4786 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4787 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4788 Constant *Zero = Builder.getInt32(0); 4789 StartV = Builder.CreateInsertElement(Iden, StartV, Zero); 4790 } 4791 } 4792 } 4793 4794 bool IsOrdered = State.VF.isVector() && 4795 Cost->isInLoopReduction(cast<PHINode>(PN)) && 4796 useOrderedReductions(*RdxDesc); 4797 4798 for (unsigned Part = 0; Part < State.UF; ++Part) { 4799 // This is phase one of vectorizing PHIs. 4800 if (Part > 0 && IsOrdered) 4801 return; 4802 Value *EntryPart = PHINode::Create( 4803 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4804 State.set(PhiR, EntryPart, Part); 4805 if (StartV) { 4806 // Make sure to add the reduction start value only to the 4807 // first unroll part. 4808 Value *StartVal = (Part == 0) ? StartV : Iden; 4809 cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader); 4810 } 4811 } 4812 return; 4813 } 4814 4815 assert(!Legal->isReductionVariable(P) && 4816 "reductions should be handled above"); 4817 4818 setDebugLocFromInst(Builder, P); 4819 4820 // This PHINode must be an induction variable. 4821 // Make sure that we know about it. 4822 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4823 4824 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4825 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4826 4827 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4828 // which can be found from the original scalar operations. 4829 switch (II.getKind()) { 4830 case InductionDescriptor::IK_NoInduction: 4831 llvm_unreachable("Unknown induction"); 4832 case InductionDescriptor::IK_IntInduction: 4833 case InductionDescriptor::IK_FpInduction: 4834 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4835 case InductionDescriptor::IK_PtrInduction: { 4836 // Handle the pointer induction variable case. 4837 assert(P->getType()->isPointerTy() && "Unexpected type."); 4838 4839 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4840 // This is the normalized GEP that starts counting at zero. 4841 Value *PtrInd = 4842 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4843 // Determine the number of scalars we need to generate for each unroll 4844 // iteration. If the instruction is uniform, we only need to generate the 4845 // first lane. Otherwise, we generate all VF values. 4846 bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); 4847 unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue(); 4848 4849 bool NeedsVectorIndex = !IsUniform && VF.isScalable(); 4850 Value *UnitStepVec = nullptr, *PtrIndSplat = nullptr; 4851 if (NeedsVectorIndex) { 4852 Type *VecIVTy = VectorType::get(PtrInd->getType(), VF); 4853 UnitStepVec = Builder.CreateStepVector(VecIVTy); 4854 PtrIndSplat = Builder.CreateVectorSplat(VF, PtrInd); 4855 } 4856 4857 for (unsigned Part = 0; Part < UF; ++Part) { 4858 Value *PartStart = createStepForVF( 4859 Builder, ConstantInt::get(PtrInd->getType(), Part), VF); 4860 4861 if (NeedsVectorIndex) { 4862 Value *PartStartSplat = Builder.CreateVectorSplat(VF, PartStart); 4863 Value *Indices = Builder.CreateAdd(PartStartSplat, UnitStepVec); 4864 Value *GlobalIndices = Builder.CreateAdd(PtrIndSplat, Indices); 4865 Value *SclrGep = 4866 emitTransformedIndex(Builder, GlobalIndices, PSE.getSE(), DL, II); 4867 SclrGep->setName("next.gep"); 4868 State.set(PhiR, SclrGep, Part); 4869 // We've cached the whole vector, which means we can support the 4870 // extraction of any lane. 4871 continue; 4872 } 4873 4874 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4875 Value *Idx = Builder.CreateAdd( 4876 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 4877 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4878 Value *SclrGep = 4879 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4880 SclrGep->setName("next.gep"); 4881 State.set(PhiR, SclrGep, VPIteration(Part, Lane)); 4882 } 4883 } 4884 return; 4885 } 4886 assert(isa<SCEVConstant>(II.getStep()) && 4887 "Induction step not a SCEV constant!"); 4888 Type *PhiType = II.getStep()->getType(); 4889 4890 // Build a pointer phi 4891 Value *ScalarStartValue = II.getStartValue(); 4892 Type *ScStValueType = ScalarStartValue->getType(); 4893 PHINode *NewPointerPhi = 4894 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4895 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4896 4897 // A pointer induction, performed by using a gep 4898 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4899 Instruction *InductionLoc = LoopLatch->getTerminator(); 4900 const SCEV *ScalarStep = II.getStep(); 4901 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4902 Value *ScalarStepValue = 4903 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4904 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); 4905 Value *NumUnrolledElems = 4906 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 4907 Value *InductionGEP = GetElementPtrInst::Create( 4908 ScStValueType->getPointerElementType(), NewPointerPhi, 4909 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 4910 InductionLoc); 4911 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4912 4913 // Create UF many actual address geps that use the pointer 4914 // phi as base and a vectorized version of the step value 4915 // (<step*0, ..., step*N>) as offset. 4916 for (unsigned Part = 0; Part < State.UF; ++Part) { 4917 Type *VecPhiType = VectorType::get(PhiType, State.VF); 4918 Value *StartOffsetScalar = 4919 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 4920 Value *StartOffset = 4921 Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 4922 // Create a vector of consecutive numbers from zero to VF. 4923 StartOffset = 4924 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); 4925 4926 Value *GEP = Builder.CreateGEP( 4927 ScStValueType->getPointerElementType(), NewPointerPhi, 4928 Builder.CreateMul( 4929 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), 4930 "vector.gep")); 4931 State.set(PhiR, GEP, Part); 4932 } 4933 } 4934 } 4935 } 4936 4937 /// A helper function for checking whether an integer division-related 4938 /// instruction may divide by zero (in which case it must be predicated if 4939 /// executed conditionally in the scalar code). 4940 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4941 /// Non-zero divisors that are non compile-time constants will not be 4942 /// converted into multiplication, so we will still end up scalarizing 4943 /// the division, but can do so w/o predication. 4944 static bool mayDivideByZero(Instruction &I) { 4945 assert((I.getOpcode() == Instruction::UDiv || 4946 I.getOpcode() == Instruction::SDiv || 4947 I.getOpcode() == Instruction::URem || 4948 I.getOpcode() == Instruction::SRem) && 4949 "Unexpected instruction"); 4950 Value *Divisor = I.getOperand(1); 4951 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4952 return !CInt || CInt->isZero(); 4953 } 4954 4955 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def, 4956 VPUser &User, 4957 VPTransformState &State) { 4958 switch (I.getOpcode()) { 4959 case Instruction::Call: 4960 case Instruction::Br: 4961 case Instruction::PHI: 4962 case Instruction::GetElementPtr: 4963 case Instruction::Select: 4964 llvm_unreachable("This instruction is handled by a different recipe."); 4965 case Instruction::UDiv: 4966 case Instruction::SDiv: 4967 case Instruction::SRem: 4968 case Instruction::URem: 4969 case Instruction::Add: 4970 case Instruction::FAdd: 4971 case Instruction::Sub: 4972 case Instruction::FSub: 4973 case Instruction::FNeg: 4974 case Instruction::Mul: 4975 case Instruction::FMul: 4976 case Instruction::FDiv: 4977 case Instruction::FRem: 4978 case Instruction::Shl: 4979 case Instruction::LShr: 4980 case Instruction::AShr: 4981 case Instruction::And: 4982 case Instruction::Or: 4983 case Instruction::Xor: { 4984 // Just widen unops and binops. 4985 setDebugLocFromInst(Builder, &I); 4986 4987 for (unsigned Part = 0; Part < UF; ++Part) { 4988 SmallVector<Value *, 2> Ops; 4989 for (VPValue *VPOp : User.operands()) 4990 Ops.push_back(State.get(VPOp, Part)); 4991 4992 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4993 4994 if (auto *VecOp = dyn_cast<Instruction>(V)) 4995 VecOp->copyIRFlags(&I); 4996 4997 // Use this vector value for all users of the original instruction. 4998 State.set(Def, V, Part); 4999 addMetadata(V, &I); 5000 } 5001 5002 break; 5003 } 5004 case Instruction::ICmp: 5005 case Instruction::FCmp: { 5006 // Widen compares. Generate vector compares. 5007 bool FCmp = (I.getOpcode() == Instruction::FCmp); 5008 auto *Cmp = cast<CmpInst>(&I); 5009 setDebugLocFromInst(Builder, Cmp); 5010 for (unsigned Part = 0; Part < UF; ++Part) { 5011 Value *A = State.get(User.getOperand(0), Part); 5012 Value *B = State.get(User.getOperand(1), Part); 5013 Value *C = nullptr; 5014 if (FCmp) { 5015 // Propagate fast math flags. 5016 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 5017 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 5018 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 5019 } else { 5020 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 5021 } 5022 State.set(Def, C, Part); 5023 addMetadata(C, &I); 5024 } 5025 5026 break; 5027 } 5028 5029 case Instruction::ZExt: 5030 case Instruction::SExt: 5031 case Instruction::FPToUI: 5032 case Instruction::FPToSI: 5033 case Instruction::FPExt: 5034 case Instruction::PtrToInt: 5035 case Instruction::IntToPtr: 5036 case Instruction::SIToFP: 5037 case Instruction::UIToFP: 5038 case Instruction::Trunc: 5039 case Instruction::FPTrunc: 5040 case Instruction::BitCast: { 5041 auto *CI = cast<CastInst>(&I); 5042 setDebugLocFromInst(Builder, CI); 5043 5044 /// Vectorize casts. 5045 Type *DestTy = 5046 (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); 5047 5048 for (unsigned Part = 0; Part < UF; ++Part) { 5049 Value *A = State.get(User.getOperand(0), Part); 5050 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 5051 State.set(Def, Cast, Part); 5052 addMetadata(Cast, &I); 5053 } 5054 break; 5055 } 5056 default: 5057 // This instruction is not vectorized by simple widening. 5058 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 5059 llvm_unreachable("Unhandled instruction!"); 5060 } // end of switch. 5061 } 5062 5063 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 5064 VPUser &ArgOperands, 5065 VPTransformState &State) { 5066 assert(!isa<DbgInfoIntrinsic>(I) && 5067 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 5068 setDebugLocFromInst(Builder, &I); 5069 5070 Module *M = I.getParent()->getParent()->getParent(); 5071 auto *CI = cast<CallInst>(&I); 5072 5073 SmallVector<Type *, 4> Tys; 5074 for (Value *ArgOperand : CI->arg_operands()) 5075 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 5076 5077 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 5078 5079 // The flag shows whether we use Intrinsic or a usual Call for vectorized 5080 // version of the instruction. 5081 // Is it beneficial to perform intrinsic call compared to lib call? 5082 bool NeedToScalarize = false; 5083 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 5084 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 5085 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 5086 assert((UseVectorIntrinsic || !NeedToScalarize) && 5087 "Instruction should be scalarized elsewhere."); 5088 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 5089 "Either the intrinsic cost or vector call cost must be valid"); 5090 5091 for (unsigned Part = 0; Part < UF; ++Part) { 5092 SmallVector<Value *, 4> Args; 5093 for (auto &I : enumerate(ArgOperands.operands())) { 5094 // Some intrinsics have a scalar argument - don't replace it with a 5095 // vector. 5096 Value *Arg; 5097 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 5098 Arg = State.get(I.value(), Part); 5099 else 5100 Arg = State.get(I.value(), VPIteration(0, 0)); 5101 Args.push_back(Arg); 5102 } 5103 5104 Function *VectorF; 5105 if (UseVectorIntrinsic) { 5106 // Use vector version of the intrinsic. 5107 Type *TysForDecl[] = {CI->getType()}; 5108 if (VF.isVector()) 5109 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 5110 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 5111 assert(VectorF && "Can't retrieve vector intrinsic."); 5112 } else { 5113 // Use vector version of the function call. 5114 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 5115 #ifndef NDEBUG 5116 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 5117 "Can't create vector function."); 5118 #endif 5119 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 5120 } 5121 SmallVector<OperandBundleDef, 1> OpBundles; 5122 CI->getOperandBundlesAsDefs(OpBundles); 5123 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 5124 5125 if (isa<FPMathOperator>(V)) 5126 V->copyFastMathFlags(CI); 5127 5128 State.set(Def, V, Part); 5129 addMetadata(V, &I); 5130 } 5131 } 5132 5133 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, 5134 VPUser &Operands, 5135 bool InvariantCond, 5136 VPTransformState &State) { 5137 setDebugLocFromInst(Builder, &I); 5138 5139 // The condition can be loop invariant but still defined inside the 5140 // loop. This means that we can't just use the original 'cond' value. 5141 // We have to take the 'vectorized' value and pick the first lane. 5142 // Instcombine will make this a no-op. 5143 auto *InvarCond = InvariantCond 5144 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 5145 : nullptr; 5146 5147 for (unsigned Part = 0; Part < UF; ++Part) { 5148 Value *Cond = 5149 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); 5150 Value *Op0 = State.get(Operands.getOperand(1), Part); 5151 Value *Op1 = State.get(Operands.getOperand(2), Part); 5152 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); 5153 State.set(VPDef, Sel, Part); 5154 addMetadata(Sel, &I); 5155 } 5156 } 5157 5158 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 5159 // We should not collect Scalars more than once per VF. Right now, this 5160 // function is called from collectUniformsAndScalars(), which already does 5161 // this check. Collecting Scalars for VF=1 does not make any sense. 5162 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 5163 "This function should not be visited twice for the same VF"); 5164 5165 SmallSetVector<Instruction *, 8> Worklist; 5166 5167 // These sets are used to seed the analysis with pointers used by memory 5168 // accesses that will remain scalar. 5169 SmallSetVector<Instruction *, 8> ScalarPtrs; 5170 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 5171 auto *Latch = TheLoop->getLoopLatch(); 5172 5173 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 5174 // The pointer operands of loads and stores will be scalar as long as the 5175 // memory access is not a gather or scatter operation. The value operand of a 5176 // store will remain scalar if the store is scalarized. 5177 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 5178 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 5179 assert(WideningDecision != CM_Unknown && 5180 "Widening decision should be ready at this moment"); 5181 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 5182 if (Ptr == Store->getValueOperand()) 5183 return WideningDecision == CM_Scalarize; 5184 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 5185 "Ptr is neither a value or pointer operand"); 5186 return WideningDecision != CM_GatherScatter; 5187 }; 5188 5189 // A helper that returns true if the given value is a bitcast or 5190 // getelementptr instruction contained in the loop. 5191 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 5192 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 5193 isa<GetElementPtrInst>(V)) && 5194 !TheLoop->isLoopInvariant(V); 5195 }; 5196 5197 auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { 5198 if (!isa<PHINode>(Ptr) || 5199 !Legal->getInductionVars().count(cast<PHINode>(Ptr))) 5200 return false; 5201 auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; 5202 if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) 5203 return false; 5204 return isScalarUse(MemAccess, Ptr); 5205 }; 5206 5207 // A helper that evaluates a memory access's use of a pointer. If the 5208 // pointer is actually the pointer induction of a loop, it is being 5209 // inserted into Worklist. If the use will be a scalar use, and the 5210 // pointer is only used by memory accesses, we place the pointer in 5211 // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. 5212 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5213 if (isScalarPtrInduction(MemAccess, Ptr)) { 5214 Worklist.insert(cast<Instruction>(Ptr)); 5215 Instruction *Update = cast<Instruction>( 5216 cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); 5217 Worklist.insert(Update); 5218 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr 5219 << "\n"); 5220 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update 5221 << "\n"); 5222 return; 5223 } 5224 // We only care about bitcast and getelementptr instructions contained in 5225 // the loop. 5226 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5227 return; 5228 5229 // If the pointer has already been identified as scalar (e.g., if it was 5230 // also identified as uniform), there's nothing to do. 5231 auto *I = cast<Instruction>(Ptr); 5232 if (Worklist.count(I)) 5233 return; 5234 5235 // If the use of the pointer will be a scalar use, and all users of the 5236 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 5237 // place the pointer in PossibleNonScalarPtrs. 5238 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 5239 return isa<LoadInst>(U) || isa<StoreInst>(U); 5240 })) 5241 ScalarPtrs.insert(I); 5242 else 5243 PossibleNonScalarPtrs.insert(I); 5244 }; 5245 5246 // We seed the scalars analysis with three classes of instructions: (1) 5247 // instructions marked uniform-after-vectorization and (2) bitcast, 5248 // getelementptr and (pointer) phi instructions used by memory accesses 5249 // requiring a scalar use. 5250 // 5251 // (1) Add to the worklist all instructions that have been identified as 5252 // uniform-after-vectorization. 5253 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5254 5255 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5256 // memory accesses requiring a scalar use. The pointer operands of loads and 5257 // stores will be scalar as long as the memory accesses is not a gather or 5258 // scatter operation. The value operand of a store will remain scalar if the 5259 // store is scalarized. 5260 for (auto *BB : TheLoop->blocks()) 5261 for (auto &I : *BB) { 5262 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5263 evaluatePtrUse(Load, Load->getPointerOperand()); 5264 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5265 evaluatePtrUse(Store, Store->getPointerOperand()); 5266 evaluatePtrUse(Store, Store->getValueOperand()); 5267 } 5268 } 5269 for (auto *I : ScalarPtrs) 5270 if (!PossibleNonScalarPtrs.count(I)) { 5271 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5272 Worklist.insert(I); 5273 } 5274 5275 // Insert the forced scalars. 5276 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5277 // induction variable when the PHI user is scalarized. 5278 auto ForcedScalar = ForcedScalars.find(VF); 5279 if (ForcedScalar != ForcedScalars.end()) 5280 for (auto *I : ForcedScalar->second) 5281 Worklist.insert(I); 5282 5283 // Expand the worklist by looking through any bitcasts and getelementptr 5284 // instructions we've already identified as scalar. This is similar to the 5285 // expansion step in collectLoopUniforms(); however, here we're only 5286 // expanding to include additional bitcasts and getelementptr instructions. 5287 unsigned Idx = 0; 5288 while (Idx != Worklist.size()) { 5289 Instruction *Dst = Worklist[Idx++]; 5290 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5291 continue; 5292 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5293 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 5294 auto *J = cast<Instruction>(U); 5295 return !TheLoop->contains(J) || Worklist.count(J) || 5296 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5297 isScalarUse(J, Src)); 5298 })) { 5299 Worklist.insert(Src); 5300 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5301 } 5302 } 5303 5304 // An induction variable will remain scalar if all users of the induction 5305 // variable and induction variable update remain scalar. 5306 for (auto &Induction : Legal->getInductionVars()) { 5307 auto *Ind = Induction.first; 5308 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5309 5310 // If tail-folding is applied, the primary induction variable will be used 5311 // to feed a vector compare. 5312 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 5313 continue; 5314 5315 // Determine if all users of the induction variable are scalar after 5316 // vectorization. 5317 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5318 auto *I = cast<Instruction>(U); 5319 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5320 }); 5321 if (!ScalarInd) 5322 continue; 5323 5324 // Determine if all users of the induction variable update instruction are 5325 // scalar after vectorization. 5326 auto ScalarIndUpdate = 5327 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5328 auto *I = cast<Instruction>(U); 5329 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5330 }); 5331 if (!ScalarIndUpdate) 5332 continue; 5333 5334 // The induction variable and its update instruction will remain scalar. 5335 Worklist.insert(Ind); 5336 Worklist.insert(IndUpdate); 5337 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5338 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 5339 << "\n"); 5340 } 5341 5342 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5343 } 5344 5345 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const { 5346 if (!blockNeedsPredication(I->getParent())) 5347 return false; 5348 switch(I->getOpcode()) { 5349 default: 5350 break; 5351 case Instruction::Load: 5352 case Instruction::Store: { 5353 if (!Legal->isMaskRequired(I)) 5354 return false; 5355 auto *Ptr = getLoadStorePointerOperand(I); 5356 auto *Ty = getLoadStoreType(I); 5357 const Align Alignment = getLoadStoreAlignment(I); 5358 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 5359 TTI.isLegalMaskedGather(Ty, Alignment)) 5360 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 5361 TTI.isLegalMaskedScatter(Ty, Alignment)); 5362 } 5363 case Instruction::UDiv: 5364 case Instruction::SDiv: 5365 case Instruction::SRem: 5366 case Instruction::URem: 5367 return mayDivideByZero(*I); 5368 } 5369 return false; 5370 } 5371 5372 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5373 Instruction *I, ElementCount VF) { 5374 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5375 assert(getWideningDecision(I, VF) == CM_Unknown && 5376 "Decision should not be set yet."); 5377 auto *Group = getInterleavedAccessGroup(I); 5378 assert(Group && "Must have a group."); 5379 5380 // If the instruction's allocated size doesn't equal it's type size, it 5381 // requires padding and will be scalarized. 5382 auto &DL = I->getModule()->getDataLayout(); 5383 auto *ScalarTy = getLoadStoreType(I); 5384 if (hasIrregularType(ScalarTy, DL)) 5385 return false; 5386 5387 // Check if masking is required. 5388 // A Group may need masking for one of two reasons: it resides in a block that 5389 // needs predication, or it was decided to use masking to deal with gaps. 5390 bool PredicatedAccessRequiresMasking = 5391 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 5392 bool AccessWithGapsRequiresMasking = 5393 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 5394 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 5395 return true; 5396 5397 // If masked interleaving is required, we expect that the user/target had 5398 // enabled it, because otherwise it either wouldn't have been created or 5399 // it should have been invalidated by the CostModel. 5400 assert(useMaskedInterleavedAccesses(TTI) && 5401 "Masked interleave-groups for predicated accesses are not enabled."); 5402 5403 auto *Ty = getLoadStoreType(I); 5404 const Align Alignment = getLoadStoreAlignment(I); 5405 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5406 : TTI.isLegalMaskedStore(Ty, Alignment); 5407 } 5408 5409 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5410 Instruction *I, ElementCount VF) { 5411 // Get and ensure we have a valid memory instruction. 5412 LoadInst *LI = dyn_cast<LoadInst>(I); 5413 StoreInst *SI = dyn_cast<StoreInst>(I); 5414 assert((LI || SI) && "Invalid memory instruction"); 5415 5416 auto *Ptr = getLoadStorePointerOperand(I); 5417 5418 // In order to be widened, the pointer should be consecutive, first of all. 5419 if (!Legal->isConsecutivePtr(Ptr)) 5420 return false; 5421 5422 // If the instruction is a store located in a predicated block, it will be 5423 // scalarized. 5424 if (isScalarWithPredication(I)) 5425 return false; 5426 5427 // If the instruction's allocated size doesn't equal it's type size, it 5428 // requires padding and will be scalarized. 5429 auto &DL = I->getModule()->getDataLayout(); 5430 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5431 if (hasIrregularType(ScalarTy, DL)) 5432 return false; 5433 5434 return true; 5435 } 5436 5437 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5438 // We should not collect Uniforms more than once per VF. Right now, 5439 // this function is called from collectUniformsAndScalars(), which 5440 // already does this check. Collecting Uniforms for VF=1 does not make any 5441 // sense. 5442 5443 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5444 "This function should not be visited twice for the same VF"); 5445 5446 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5447 // not analyze again. Uniforms.count(VF) will return 1. 5448 Uniforms[VF].clear(); 5449 5450 // We now know that the loop is vectorizable! 5451 // Collect instructions inside the loop that will remain uniform after 5452 // vectorization. 5453 5454 // Global values, params and instructions outside of current loop are out of 5455 // scope. 5456 auto isOutOfScope = [&](Value *V) -> bool { 5457 Instruction *I = dyn_cast<Instruction>(V); 5458 return (!I || !TheLoop->contains(I)); 5459 }; 5460 5461 SetVector<Instruction *> Worklist; 5462 BasicBlock *Latch = TheLoop->getLoopLatch(); 5463 5464 // Instructions that are scalar with predication must not be considered 5465 // uniform after vectorization, because that would create an erroneous 5466 // replicating region where only a single instance out of VF should be formed. 5467 // TODO: optimize such seldom cases if found important, see PR40816. 5468 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5469 if (isOutOfScope(I)) { 5470 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5471 << *I << "\n"); 5472 return; 5473 } 5474 if (isScalarWithPredication(I)) { 5475 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5476 << *I << "\n"); 5477 return; 5478 } 5479 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5480 Worklist.insert(I); 5481 }; 5482 5483 // Start with the conditional branch. If the branch condition is an 5484 // instruction contained in the loop that is only used by the branch, it is 5485 // uniform. 5486 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5487 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5488 addToWorklistIfAllowed(Cmp); 5489 5490 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5491 InstWidening WideningDecision = getWideningDecision(I, VF); 5492 assert(WideningDecision != CM_Unknown && 5493 "Widening decision should be ready at this moment"); 5494 5495 // A uniform memory op is itself uniform. We exclude uniform stores 5496 // here as they demand the last lane, not the first one. 5497 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5498 assert(WideningDecision == CM_Scalarize); 5499 return true; 5500 } 5501 5502 return (WideningDecision == CM_Widen || 5503 WideningDecision == CM_Widen_Reverse || 5504 WideningDecision == CM_Interleave); 5505 }; 5506 5507 5508 // Returns true if Ptr is the pointer operand of a memory access instruction 5509 // I, and I is known to not require scalarization. 5510 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5511 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5512 }; 5513 5514 // Holds a list of values which are known to have at least one uniform use. 5515 // Note that there may be other uses which aren't uniform. A "uniform use" 5516 // here is something which only demands lane 0 of the unrolled iterations; 5517 // it does not imply that all lanes produce the same value (e.g. this is not 5518 // the usual meaning of uniform) 5519 SetVector<Value *> HasUniformUse; 5520 5521 // Scan the loop for instructions which are either a) known to have only 5522 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5523 for (auto *BB : TheLoop->blocks()) 5524 for (auto &I : *BB) { 5525 // If there's no pointer operand, there's nothing to do. 5526 auto *Ptr = getLoadStorePointerOperand(&I); 5527 if (!Ptr) 5528 continue; 5529 5530 // A uniform memory op is itself uniform. We exclude uniform stores 5531 // here as they demand the last lane, not the first one. 5532 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5533 addToWorklistIfAllowed(&I); 5534 5535 if (isUniformDecision(&I, VF)) { 5536 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5537 HasUniformUse.insert(Ptr); 5538 } 5539 } 5540 5541 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5542 // demanding) users. Since loops are assumed to be in LCSSA form, this 5543 // disallows uses outside the loop as well. 5544 for (auto *V : HasUniformUse) { 5545 if (isOutOfScope(V)) 5546 continue; 5547 auto *I = cast<Instruction>(V); 5548 auto UsersAreMemAccesses = 5549 llvm::all_of(I->users(), [&](User *U) -> bool { 5550 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5551 }); 5552 if (UsersAreMemAccesses) 5553 addToWorklistIfAllowed(I); 5554 } 5555 5556 // Expand Worklist in topological order: whenever a new instruction 5557 // is added , its users should be already inside Worklist. It ensures 5558 // a uniform instruction will only be used by uniform instructions. 5559 unsigned idx = 0; 5560 while (idx != Worklist.size()) { 5561 Instruction *I = Worklist[idx++]; 5562 5563 for (auto OV : I->operand_values()) { 5564 // isOutOfScope operands cannot be uniform instructions. 5565 if (isOutOfScope(OV)) 5566 continue; 5567 // First order recurrence Phi's should typically be considered 5568 // non-uniform. 5569 auto *OP = dyn_cast<PHINode>(OV); 5570 if (OP && Legal->isFirstOrderRecurrence(OP)) 5571 continue; 5572 // If all the users of the operand are uniform, then add the 5573 // operand into the uniform worklist. 5574 auto *OI = cast<Instruction>(OV); 5575 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5576 auto *J = cast<Instruction>(U); 5577 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5578 })) 5579 addToWorklistIfAllowed(OI); 5580 } 5581 } 5582 5583 // For an instruction to be added into Worklist above, all its users inside 5584 // the loop should also be in Worklist. However, this condition cannot be 5585 // true for phi nodes that form a cyclic dependence. We must process phi 5586 // nodes separately. An induction variable will remain uniform if all users 5587 // of the induction variable and induction variable update remain uniform. 5588 // The code below handles both pointer and non-pointer induction variables. 5589 for (auto &Induction : Legal->getInductionVars()) { 5590 auto *Ind = Induction.first; 5591 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5592 5593 // Determine if all users of the induction variable are uniform after 5594 // vectorization. 5595 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5596 auto *I = cast<Instruction>(U); 5597 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5598 isVectorizedMemAccessUse(I, Ind); 5599 }); 5600 if (!UniformInd) 5601 continue; 5602 5603 // Determine if all users of the induction variable update instruction are 5604 // uniform after vectorization. 5605 auto UniformIndUpdate = 5606 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5607 auto *I = cast<Instruction>(U); 5608 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5609 isVectorizedMemAccessUse(I, IndUpdate); 5610 }); 5611 if (!UniformIndUpdate) 5612 continue; 5613 5614 // The induction variable and its update instruction will remain uniform. 5615 addToWorklistIfAllowed(Ind); 5616 addToWorklistIfAllowed(IndUpdate); 5617 } 5618 5619 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5620 } 5621 5622 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5623 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5624 5625 if (Legal->getRuntimePointerChecking()->Need) { 5626 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5627 "runtime pointer checks needed. Enable vectorization of this " 5628 "loop with '#pragma clang loop vectorize(enable)' when " 5629 "compiling with -Os/-Oz", 5630 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5631 return true; 5632 } 5633 5634 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5635 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5636 "runtime SCEV checks needed. Enable vectorization of this " 5637 "loop with '#pragma clang loop vectorize(enable)' when " 5638 "compiling with -Os/-Oz", 5639 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5640 return true; 5641 } 5642 5643 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5644 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5645 reportVectorizationFailure("Runtime stride check for small trip count", 5646 "runtime stride == 1 checks needed. Enable vectorization of " 5647 "this loop without such check by compiling with -Os/-Oz", 5648 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5649 return true; 5650 } 5651 5652 return false; 5653 } 5654 5655 ElementCount 5656 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 5657 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 5658 reportVectorizationInfo( 5659 "Disabling scalable vectorization, because target does not " 5660 "support scalable vectors.", 5661 "ScalableVectorsUnsupported", ORE, TheLoop); 5662 return ElementCount::getScalable(0); 5663 } 5664 5665 if (Hints->isScalableVectorizationDisabled()) { 5666 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 5667 "ScalableVectorizationDisabled", ORE, TheLoop); 5668 return ElementCount::getScalable(0); 5669 } 5670 5671 auto MaxScalableVF = ElementCount::getScalable( 5672 std::numeric_limits<ElementCount::ScalarTy>::max()); 5673 5674 // Disable scalable vectorization if the loop contains unsupported reductions. 5675 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 5676 // FIXME: While for scalable vectors this is currently sufficient, this should 5677 // be replaced by a more detailed mechanism that filters out specific VFs, 5678 // instead of invalidating vectorization for a whole set of VFs based on the 5679 // MaxVF. 5680 if (!canVectorizeReductions(MaxScalableVF)) { 5681 reportVectorizationInfo( 5682 "Scalable vectorization not supported for the reduction " 5683 "operations found in this loop.", 5684 "ScalableVFUnfeasible", ORE, TheLoop); 5685 return ElementCount::getScalable(0); 5686 } 5687 5688 if (Legal->isSafeForAnyVectorWidth()) 5689 return MaxScalableVF; 5690 5691 // Limit MaxScalableVF by the maximum safe dependence distance. 5692 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5693 MaxScalableVF = ElementCount::getScalable( 5694 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5695 if (!MaxScalableVF) 5696 reportVectorizationInfo( 5697 "Max legal vector width too small, scalable vectorization " 5698 "unfeasible.", 5699 "ScalableVFUnfeasible", ORE, TheLoop); 5700 5701 return MaxScalableVF; 5702 } 5703 5704 FixedScalableVFPair 5705 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5706 ElementCount UserVF) { 5707 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5708 unsigned SmallestType, WidestType; 5709 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5710 5711 // Get the maximum safe dependence distance in bits computed by LAA. 5712 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5713 // the memory accesses that is most restrictive (involved in the smallest 5714 // dependence distance). 5715 unsigned MaxSafeElements = 5716 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 5717 5718 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 5719 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 5720 5721 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 5722 << ".\n"); 5723 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 5724 << ".\n"); 5725 5726 // First analyze the UserVF, fall back if the UserVF should be ignored. 5727 if (UserVF) { 5728 auto MaxSafeUserVF = 5729 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 5730 5731 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) 5732 return UserVF; 5733 5734 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 5735 5736 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 5737 // is better to ignore the hint and let the compiler choose a suitable VF. 5738 if (!UserVF.isScalable()) { 5739 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5740 << " is unsafe, clamping to max safe VF=" 5741 << MaxSafeFixedVF << ".\n"); 5742 ORE->emit([&]() { 5743 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5744 TheLoop->getStartLoc(), 5745 TheLoop->getHeader()) 5746 << "User-specified vectorization factor " 5747 << ore::NV("UserVectorizationFactor", UserVF) 5748 << " is unsafe, clamping to maximum safe vectorization factor " 5749 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 5750 }); 5751 return MaxSafeFixedVF; 5752 } 5753 5754 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5755 << " is unsafe. Ignoring scalable UserVF.\n"); 5756 ORE->emit([&]() { 5757 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5758 TheLoop->getStartLoc(), 5759 TheLoop->getHeader()) 5760 << "User-specified vectorization factor " 5761 << ore::NV("UserVectorizationFactor", UserVF) 5762 << " is unsafe. Ignoring the hint to let the compiler pick a " 5763 "suitable VF."; 5764 }); 5765 } 5766 5767 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5768 << " / " << WidestType << " bits.\n"); 5769 5770 FixedScalableVFPair Result(ElementCount::getFixed(1), 5771 ElementCount::getScalable(0)); 5772 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5773 WidestType, MaxSafeFixedVF)) 5774 Result.FixedVF = MaxVF; 5775 5776 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5777 WidestType, MaxSafeScalableVF)) 5778 if (MaxVF.isScalable()) { 5779 Result.ScalableVF = MaxVF; 5780 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5781 << "\n"); 5782 } 5783 5784 return Result; 5785 } 5786 5787 FixedScalableVFPair 5788 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5789 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5790 // TODO: It may by useful to do since it's still likely to be dynamically 5791 // uniform if the target can skip. 5792 reportVectorizationFailure( 5793 "Not inserting runtime ptr check for divergent target", 5794 "runtime pointer checks needed. Not enabled for divergent target", 5795 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5796 return FixedScalableVFPair::getNone(); 5797 } 5798 5799 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5800 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5801 if (TC == 1) { 5802 reportVectorizationFailure("Single iteration (non) loop", 5803 "loop trip count is one, irrelevant for vectorization", 5804 "SingleIterationLoop", ORE, TheLoop); 5805 return FixedScalableVFPair::getNone(); 5806 } 5807 5808 switch (ScalarEpilogueStatus) { 5809 case CM_ScalarEpilogueAllowed: 5810 return computeFeasibleMaxVF(TC, UserVF); 5811 case CM_ScalarEpilogueNotAllowedUsePredicate: 5812 LLVM_FALLTHROUGH; 5813 case CM_ScalarEpilogueNotNeededUsePredicate: 5814 LLVM_DEBUG( 5815 dbgs() << "LV: vector predicate hint/switch found.\n" 5816 << "LV: Not allowing scalar epilogue, creating predicated " 5817 << "vector loop.\n"); 5818 break; 5819 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5820 // fallthrough as a special case of OptForSize 5821 case CM_ScalarEpilogueNotAllowedOptSize: 5822 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5823 LLVM_DEBUG( 5824 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5825 else 5826 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5827 << "count.\n"); 5828 5829 // Bail if runtime checks are required, which are not good when optimising 5830 // for size. 5831 if (runtimeChecksRequired()) 5832 return FixedScalableVFPair::getNone(); 5833 5834 break; 5835 } 5836 5837 // The only loops we can vectorize without a scalar epilogue, are loops with 5838 // a bottom-test and a single exiting block. We'd have to handle the fact 5839 // that not every instruction executes on the last iteration. This will 5840 // require a lane mask which varies through the vector loop body. (TODO) 5841 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5842 // If there was a tail-folding hint/switch, but we can't fold the tail by 5843 // masking, fallback to a vectorization with a scalar epilogue. 5844 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5845 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5846 "scalar epilogue instead.\n"); 5847 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5848 return computeFeasibleMaxVF(TC, UserVF); 5849 } 5850 return FixedScalableVFPair::getNone(); 5851 } 5852 5853 // Now try the tail folding 5854 5855 // Invalidate interleave groups that require an epilogue if we can't mask 5856 // the interleave-group. 5857 if (!useMaskedInterleavedAccesses(TTI)) { 5858 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5859 "No decisions should have been taken at this point"); 5860 // Note: There is no need to invalidate any cost modeling decisions here, as 5861 // non where taken so far. 5862 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5863 } 5864 5865 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF); 5866 // Avoid tail folding if the trip count is known to be a multiple of any VF 5867 // we chose. 5868 // FIXME: The condition below pessimises the case for fixed-width vectors, 5869 // when scalable VFs are also candidates for vectorization. 5870 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5871 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5872 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5873 "MaxFixedVF must be a power of 2"); 5874 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5875 : MaxFixedVF.getFixedValue(); 5876 ScalarEvolution *SE = PSE.getSE(); 5877 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5878 const SCEV *ExitCount = SE->getAddExpr( 5879 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5880 const SCEV *Rem = SE->getURemExpr( 5881 SE->applyLoopGuards(ExitCount, TheLoop), 5882 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5883 if (Rem->isZero()) { 5884 // Accept MaxFixedVF if we do not have a tail. 5885 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5886 return MaxFactors; 5887 } 5888 } 5889 5890 // If we don't know the precise trip count, or if the trip count that we 5891 // found modulo the vectorization factor is not zero, try to fold the tail 5892 // by masking. 5893 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5894 if (Legal->prepareToFoldTailByMasking()) { 5895 FoldTailByMasking = true; 5896 return MaxFactors; 5897 } 5898 5899 // If there was a tail-folding hint/switch, but we can't fold the tail by 5900 // masking, fallback to a vectorization with a scalar epilogue. 5901 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5902 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5903 "scalar epilogue instead.\n"); 5904 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5905 return MaxFactors; 5906 } 5907 5908 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5909 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5910 return FixedScalableVFPair::getNone(); 5911 } 5912 5913 if (TC == 0) { 5914 reportVectorizationFailure( 5915 "Unable to calculate the loop count due to complex control flow", 5916 "unable to calculate the loop count due to complex control flow", 5917 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5918 return FixedScalableVFPair::getNone(); 5919 } 5920 5921 reportVectorizationFailure( 5922 "Cannot optimize for size and vectorize at the same time.", 5923 "cannot optimize for size and vectorize at the same time. " 5924 "Enable vectorization of this loop with '#pragma clang loop " 5925 "vectorize(enable)' when compiling with -Os/-Oz", 5926 "NoTailLoopWithOptForSize", ORE, TheLoop); 5927 return FixedScalableVFPair::getNone(); 5928 } 5929 5930 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5931 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5932 const ElementCount &MaxSafeVF) { 5933 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5934 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5935 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5936 : TargetTransformInfo::RGK_FixedWidthVector); 5937 5938 // Convenience function to return the minimum of two ElementCounts. 5939 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5940 assert((LHS.isScalable() == RHS.isScalable()) && 5941 "Scalable flags must match"); 5942 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5943 }; 5944 5945 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5946 // Note that both WidestRegister and WidestType may not be a powers of 2. 5947 auto MaxVectorElementCount = ElementCount::get( 5948 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5949 ComputeScalableMaxVF); 5950 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5951 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5952 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5953 5954 if (!MaxVectorElementCount) { 5955 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5956 return ElementCount::getFixed(1); 5957 } 5958 5959 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5960 if (ConstTripCount && 5961 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5962 isPowerOf2_32(ConstTripCount)) { 5963 // We need to clamp the VF to be the ConstTripCount. There is no point in 5964 // choosing a higher viable VF as done in the loop below. If 5965 // MaxVectorElementCount is scalable, we only fall back on a fixed VF when 5966 // the TC is less than or equal to the known number of lanes. 5967 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5968 << ConstTripCount << "\n"); 5969 return TripCountEC; 5970 } 5971 5972 ElementCount MaxVF = MaxVectorElementCount; 5973 if (TTI.shouldMaximizeVectorBandwidth() || 5974 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5975 auto MaxVectorElementCountMaxBW = ElementCount::get( 5976 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5977 ComputeScalableMaxVF); 5978 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5979 5980 // Collect all viable vectorization factors larger than the default MaxVF 5981 // (i.e. MaxVectorElementCount). 5982 SmallVector<ElementCount, 8> VFs; 5983 for (ElementCount VS = MaxVectorElementCount * 2; 5984 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5985 VFs.push_back(VS); 5986 5987 // For each VF calculate its register usage. 5988 auto RUs = calculateRegisterUsage(VFs); 5989 5990 // Select the largest VF which doesn't require more registers than existing 5991 // ones. 5992 for (int i = RUs.size() - 1; i >= 0; --i) { 5993 bool Selected = true; 5994 for (auto &pair : RUs[i].MaxLocalUsers) { 5995 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5996 if (pair.second > TargetNumRegisters) 5997 Selected = false; 5998 } 5999 if (Selected) { 6000 MaxVF = VFs[i]; 6001 break; 6002 } 6003 } 6004 if (ElementCount MinVF = 6005 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 6006 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 6007 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 6008 << ") with target's minimum: " << MinVF << '\n'); 6009 MaxVF = MinVF; 6010 } 6011 } 6012 } 6013 return MaxVF; 6014 } 6015 6016 bool LoopVectorizationCostModel::isMoreProfitable( 6017 const VectorizationFactor &A, const VectorizationFactor &B) const { 6018 InstructionCost::CostType CostA = *A.Cost.getValue(); 6019 InstructionCost::CostType CostB = *B.Cost.getValue(); 6020 6021 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 6022 6023 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 6024 MaxTripCount) { 6025 // If we are folding the tail and the trip count is a known (possibly small) 6026 // constant, the trip count will be rounded up to an integer number of 6027 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 6028 // which we compare directly. When not folding the tail, the total cost will 6029 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 6030 // approximated with the per-lane cost below instead of using the tripcount 6031 // as here. 6032 int64_t RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 6033 int64_t RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 6034 return RTCostA < RTCostB; 6035 } 6036 6037 // When set to preferred, for now assume vscale may be larger than 1, so 6038 // that scalable vectorization is slightly favorable over fixed-width 6039 // vectorization. 6040 if (Hints->isScalableVectorizationPreferred()) 6041 if (A.Width.isScalable() && !B.Width.isScalable()) 6042 return (CostA * B.Width.getKnownMinValue()) <= 6043 (CostB * A.Width.getKnownMinValue()); 6044 6045 // To avoid the need for FP division: 6046 // (CostA / A.Width) < (CostB / B.Width) 6047 // <=> (CostA * B.Width) < (CostB * A.Width) 6048 return (CostA * B.Width.getKnownMinValue()) < 6049 (CostB * A.Width.getKnownMinValue()); 6050 } 6051 6052 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 6053 const ElementCountSet &VFCandidates) { 6054 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 6055 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 6056 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 6057 assert(VFCandidates.count(ElementCount::getFixed(1)) && 6058 "Expected Scalar VF to be a candidate"); 6059 6060 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 6061 VectorizationFactor ChosenFactor = ScalarCost; 6062 6063 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 6064 if (ForceVectorization && VFCandidates.size() > 1) { 6065 // Ignore scalar width, because the user explicitly wants vectorization. 6066 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 6067 // evaluation. 6068 ChosenFactor.Cost = std::numeric_limits<InstructionCost::CostType>::max(); 6069 } 6070 6071 for (const auto &i : VFCandidates) { 6072 // The cost for scalar VF=1 is already calculated, so ignore it. 6073 if (i.isScalar()) 6074 continue; 6075 6076 // Notice that the vector loop needs to be executed less times, so 6077 // we need to divide the cost of the vector loops by the width of 6078 // the vector elements. 6079 VectorizationCostTy C = expectedCost(i); 6080 6081 assert(C.first.isValid() && "Unexpected invalid cost for vector loop"); 6082 VectorizationFactor Candidate(i, C.first); 6083 LLVM_DEBUG( 6084 dbgs() << "LV: Vector loop of width " << i << " costs: " 6085 << (*Candidate.Cost.getValue() / 6086 Candidate.Width.getKnownMinValue()) 6087 << (i.isScalable() ? " (assuming a minimum vscale of 1)" : "") 6088 << ".\n"); 6089 6090 if (!C.second && !ForceVectorization) { 6091 LLVM_DEBUG( 6092 dbgs() << "LV: Not considering vector loop of width " << i 6093 << " because it will not generate any vector instructions.\n"); 6094 continue; 6095 } 6096 6097 // If profitable add it to ProfitableVF list. 6098 if (isMoreProfitable(Candidate, ScalarCost)) 6099 ProfitableVFs.push_back(Candidate); 6100 6101 if (isMoreProfitable(Candidate, ChosenFactor)) 6102 ChosenFactor = Candidate; 6103 } 6104 6105 if (!EnableCondStoresVectorization && NumPredStores) { 6106 reportVectorizationFailure("There are conditional stores.", 6107 "store that is conditionally executed prevents vectorization", 6108 "ConditionalStore", ORE, TheLoop); 6109 ChosenFactor = ScalarCost; 6110 } 6111 6112 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 6113 *ChosenFactor.Cost.getValue() >= *ScalarCost.Cost.getValue()) 6114 dbgs() 6115 << "LV: Vectorization seems to be not beneficial, " 6116 << "but was forced by a user.\n"); 6117 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 6118 return ChosenFactor; 6119 } 6120 6121 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 6122 const Loop &L, ElementCount VF) const { 6123 // Cross iteration phis such as reductions need special handling and are 6124 // currently unsupported. 6125 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 6126 return Legal->isFirstOrderRecurrence(&Phi) || 6127 Legal->isReductionVariable(&Phi); 6128 })) 6129 return false; 6130 6131 // Phis with uses outside of the loop require special handling and are 6132 // currently unsupported. 6133 for (auto &Entry : Legal->getInductionVars()) { 6134 // Look for uses of the value of the induction at the last iteration. 6135 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 6136 for (User *U : PostInc->users()) 6137 if (!L.contains(cast<Instruction>(U))) 6138 return false; 6139 // Look for uses of penultimate value of the induction. 6140 for (User *U : Entry.first->users()) 6141 if (!L.contains(cast<Instruction>(U))) 6142 return false; 6143 } 6144 6145 // Induction variables that are widened require special handling that is 6146 // currently not supported. 6147 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 6148 return !(this->isScalarAfterVectorization(Entry.first, VF) || 6149 this->isProfitableToScalarize(Entry.first, VF)); 6150 })) 6151 return false; 6152 6153 return true; 6154 } 6155 6156 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 6157 const ElementCount VF) const { 6158 // FIXME: We need a much better cost-model to take different parameters such 6159 // as register pressure, code size increase and cost of extra branches into 6160 // account. For now we apply a very crude heuristic and only consider loops 6161 // with vectorization factors larger than a certain value. 6162 // We also consider epilogue vectorization unprofitable for targets that don't 6163 // consider interleaving beneficial (eg. MVE). 6164 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 6165 return false; 6166 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 6167 return true; 6168 return false; 6169 } 6170 6171 VectorizationFactor 6172 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 6173 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 6174 VectorizationFactor Result = VectorizationFactor::Disabled(); 6175 if (!EnableEpilogueVectorization) { 6176 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 6177 return Result; 6178 } 6179 6180 if (!isScalarEpilogueAllowed()) { 6181 LLVM_DEBUG( 6182 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 6183 "allowed.\n";); 6184 return Result; 6185 } 6186 6187 // FIXME: This can be fixed for scalable vectors later, because at this stage 6188 // the LoopVectorizer will only consider vectorizing a loop with scalable 6189 // vectors when the loop has a hint to enable vectorization for a given VF. 6190 if (MainLoopVF.isScalable()) { 6191 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not " 6192 "yet supported.\n"); 6193 return Result; 6194 } 6195 6196 // Not really a cost consideration, but check for unsupported cases here to 6197 // simplify the logic. 6198 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 6199 LLVM_DEBUG( 6200 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 6201 "not a supported candidate.\n";); 6202 return Result; 6203 } 6204 6205 if (EpilogueVectorizationForceVF > 1) { 6206 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 6207 if (LVP.hasPlanWithVFs( 6208 {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)})) 6209 return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0}; 6210 else { 6211 LLVM_DEBUG( 6212 dbgs() 6213 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 6214 return Result; 6215 } 6216 } 6217 6218 if (TheLoop->getHeader()->getParent()->hasOptSize() || 6219 TheLoop->getHeader()->getParent()->hasMinSize()) { 6220 LLVM_DEBUG( 6221 dbgs() 6222 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 6223 return Result; 6224 } 6225 6226 if (!isEpilogueVectorizationProfitable(MainLoopVF)) 6227 return Result; 6228 6229 for (auto &NextVF : ProfitableVFs) 6230 if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) && 6231 (Result.Width.getFixedValue() == 1 || 6232 isMoreProfitable(NextVF, Result)) && 6233 LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width})) 6234 Result = NextVF; 6235 6236 if (Result != VectorizationFactor::Disabled()) 6237 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 6238 << Result.Width.getFixedValue() << "\n";); 6239 return Result; 6240 } 6241 6242 std::pair<unsigned, unsigned> 6243 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6244 unsigned MinWidth = -1U; 6245 unsigned MaxWidth = 8; 6246 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6247 6248 // For each block. 6249 for (BasicBlock *BB : TheLoop->blocks()) { 6250 // For each instruction in the loop. 6251 for (Instruction &I : BB->instructionsWithoutDebug()) { 6252 Type *T = I.getType(); 6253 6254 // Skip ignored values. 6255 if (ValuesToIgnore.count(&I)) 6256 continue; 6257 6258 // Only examine Loads, Stores and PHINodes. 6259 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6260 continue; 6261 6262 // Examine PHI nodes that are reduction variables. Update the type to 6263 // account for the recurrence type. 6264 if (auto *PN = dyn_cast<PHINode>(&I)) { 6265 if (!Legal->isReductionVariable(PN)) 6266 continue; 6267 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN]; 6268 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 6269 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 6270 RdxDesc.getRecurrenceType(), 6271 TargetTransformInfo::ReductionFlags())) 6272 continue; 6273 T = RdxDesc.getRecurrenceType(); 6274 } 6275 6276 // Examine the stored values. 6277 if (auto *ST = dyn_cast<StoreInst>(&I)) 6278 T = ST->getValueOperand()->getType(); 6279 6280 // Ignore loaded pointer types and stored pointer types that are not 6281 // vectorizable. 6282 // 6283 // FIXME: The check here attempts to predict whether a load or store will 6284 // be vectorized. We only know this for certain after a VF has 6285 // been selected. Here, we assume that if an access can be 6286 // vectorized, it will be. We should also look at extending this 6287 // optimization to non-pointer types. 6288 // 6289 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6290 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 6291 continue; 6292 6293 MinWidth = std::min(MinWidth, 6294 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6295 MaxWidth = std::max(MaxWidth, 6296 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6297 } 6298 } 6299 6300 return {MinWidth, MaxWidth}; 6301 } 6302 6303 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 6304 unsigned LoopCost) { 6305 // -- The interleave heuristics -- 6306 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6307 // There are many micro-architectural considerations that we can't predict 6308 // at this level. For example, frontend pressure (on decode or fetch) due to 6309 // code size, or the number and capabilities of the execution ports. 6310 // 6311 // We use the following heuristics to select the interleave count: 6312 // 1. If the code has reductions, then we interleave to break the cross 6313 // iteration dependency. 6314 // 2. If the loop is really small, then we interleave to reduce the loop 6315 // overhead. 6316 // 3. We don't interleave if we think that we will spill registers to memory 6317 // due to the increased register pressure. 6318 6319 if (!isScalarEpilogueAllowed()) 6320 return 1; 6321 6322 // We used the distance for the interleave count. 6323 if (Legal->getMaxSafeDepDistBytes() != -1U) 6324 return 1; 6325 6326 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6327 const bool HasReductions = !Legal->getReductionVars().empty(); 6328 // Do not interleave loops with a relatively small known or estimated trip 6329 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6330 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6331 // because with the above conditions interleaving can expose ILP and break 6332 // cross iteration dependences for reductions. 6333 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6334 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6335 return 1; 6336 6337 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6338 // We divide by these constants so assume that we have at least one 6339 // instruction that uses at least one register. 6340 for (auto& pair : R.MaxLocalUsers) { 6341 pair.second = std::max(pair.second, 1U); 6342 } 6343 6344 // We calculate the interleave count using the following formula. 6345 // Subtract the number of loop invariants from the number of available 6346 // registers. These registers are used by all of the interleaved instances. 6347 // Next, divide the remaining registers by the number of registers that is 6348 // required by the loop, in order to estimate how many parallel instances 6349 // fit without causing spills. All of this is rounded down if necessary to be 6350 // a power of two. We want power of two interleave count to simplify any 6351 // addressing operations or alignment considerations. 6352 // We also want power of two interleave counts to ensure that the induction 6353 // variable of the vector loop wraps to zero, when tail is folded by masking; 6354 // this currently happens when OptForSize, in which case IC is set to 1 above. 6355 unsigned IC = UINT_MAX; 6356 6357 for (auto& pair : R.MaxLocalUsers) { 6358 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6359 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6360 << " registers of " 6361 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6362 if (VF.isScalar()) { 6363 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6364 TargetNumRegisters = ForceTargetNumScalarRegs; 6365 } else { 6366 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6367 TargetNumRegisters = ForceTargetNumVectorRegs; 6368 } 6369 unsigned MaxLocalUsers = pair.second; 6370 unsigned LoopInvariantRegs = 0; 6371 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6372 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6373 6374 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6375 // Don't count the induction variable as interleaved. 6376 if (EnableIndVarRegisterHeur) { 6377 TmpIC = 6378 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6379 std::max(1U, (MaxLocalUsers - 1))); 6380 } 6381 6382 IC = std::min(IC, TmpIC); 6383 } 6384 6385 // Clamp the interleave ranges to reasonable counts. 6386 unsigned MaxInterleaveCount = 6387 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6388 6389 // Check if the user has overridden the max. 6390 if (VF.isScalar()) { 6391 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6392 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6393 } else { 6394 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6395 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6396 } 6397 6398 // If trip count is known or estimated compile time constant, limit the 6399 // interleave count to be less than the trip count divided by VF, provided it 6400 // is at least 1. 6401 // 6402 // For scalable vectors we can't know if interleaving is beneficial. It may 6403 // not be beneficial for small loops if none of the lanes in the second vector 6404 // iterations is enabled. However, for larger loops, there is likely to be a 6405 // similar benefit as for fixed-width vectors. For now, we choose to leave 6406 // the InterleaveCount as if vscale is '1', although if some information about 6407 // the vector is known (e.g. min vector size), we can make a better decision. 6408 if (BestKnownTC) { 6409 MaxInterleaveCount = 6410 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6411 // Make sure MaxInterleaveCount is greater than 0. 6412 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6413 } 6414 6415 assert(MaxInterleaveCount > 0 && 6416 "Maximum interleave count must be greater than 0"); 6417 6418 // Clamp the calculated IC to be between the 1 and the max interleave count 6419 // that the target and trip count allows. 6420 if (IC > MaxInterleaveCount) 6421 IC = MaxInterleaveCount; 6422 else 6423 // Make sure IC is greater than 0. 6424 IC = std::max(1u, IC); 6425 6426 assert(IC > 0 && "Interleave count must be greater than 0."); 6427 6428 // If we did not calculate the cost for VF (because the user selected the VF) 6429 // then we calculate the cost of VF here. 6430 if (LoopCost == 0) { 6431 assert(expectedCost(VF).first.isValid() && "Expected a valid cost"); 6432 LoopCost = *expectedCost(VF).first.getValue(); 6433 } 6434 6435 assert(LoopCost && "Non-zero loop cost expected"); 6436 6437 // Interleave if we vectorized this loop and there is a reduction that could 6438 // benefit from interleaving. 6439 if (VF.isVector() && HasReductions) { 6440 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6441 return IC; 6442 } 6443 6444 // Note that if we've already vectorized the loop we will have done the 6445 // runtime check and so interleaving won't require further checks. 6446 bool InterleavingRequiresRuntimePointerCheck = 6447 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6448 6449 // We want to interleave small loops in order to reduce the loop overhead and 6450 // potentially expose ILP opportunities. 6451 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6452 << "LV: IC is " << IC << '\n' 6453 << "LV: VF is " << VF << '\n'); 6454 const bool AggressivelyInterleaveReductions = 6455 TTI.enableAggressiveInterleaving(HasReductions); 6456 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6457 // We assume that the cost overhead is 1 and we use the cost model 6458 // to estimate the cost of the loop and interleave until the cost of the 6459 // loop overhead is about 5% of the cost of the loop. 6460 unsigned SmallIC = 6461 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6462 6463 // Interleave until store/load ports (estimated by max interleave count) are 6464 // saturated. 6465 unsigned NumStores = Legal->getNumStores(); 6466 unsigned NumLoads = Legal->getNumLoads(); 6467 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6468 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6469 6470 // If we have a scalar reduction (vector reductions are already dealt with 6471 // by this point), we can increase the critical path length if the loop 6472 // we're interleaving is inside another loop. Limit, by default to 2, so the 6473 // critical path only gets increased by one reduction operation. 6474 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6475 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6476 SmallIC = std::min(SmallIC, F); 6477 StoresIC = std::min(StoresIC, F); 6478 LoadsIC = std::min(LoadsIC, F); 6479 } 6480 6481 if (EnableLoadStoreRuntimeInterleave && 6482 std::max(StoresIC, LoadsIC) > SmallIC) { 6483 LLVM_DEBUG( 6484 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6485 return std::max(StoresIC, LoadsIC); 6486 } 6487 6488 // If there are scalar reductions and TTI has enabled aggressive 6489 // interleaving for reductions, we will interleave to expose ILP. 6490 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6491 AggressivelyInterleaveReductions) { 6492 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6493 // Interleave no less than SmallIC but not as aggressive as the normal IC 6494 // to satisfy the rare situation when resources are too limited. 6495 return std::max(IC / 2, SmallIC); 6496 } else { 6497 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6498 return SmallIC; 6499 } 6500 } 6501 6502 // Interleave if this is a large loop (small loops are already dealt with by 6503 // this point) that could benefit from interleaving. 6504 if (AggressivelyInterleaveReductions) { 6505 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6506 return IC; 6507 } 6508 6509 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6510 return 1; 6511 } 6512 6513 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6514 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6515 // This function calculates the register usage by measuring the highest number 6516 // of values that are alive at a single location. Obviously, this is a very 6517 // rough estimation. We scan the loop in a topological order in order and 6518 // assign a number to each instruction. We use RPO to ensure that defs are 6519 // met before their users. We assume that each instruction that has in-loop 6520 // users starts an interval. We record every time that an in-loop value is 6521 // used, so we have a list of the first and last occurrences of each 6522 // instruction. Next, we transpose this data structure into a multi map that 6523 // holds the list of intervals that *end* at a specific location. This multi 6524 // map allows us to perform a linear search. We scan the instructions linearly 6525 // and record each time that a new interval starts, by placing it in a set. 6526 // If we find this value in the multi-map then we remove it from the set. 6527 // The max register usage is the maximum size of the set. 6528 // We also search for instructions that are defined outside the loop, but are 6529 // used inside the loop. We need this number separately from the max-interval 6530 // usage number because when we unroll, loop-invariant values do not take 6531 // more register. 6532 LoopBlocksDFS DFS(TheLoop); 6533 DFS.perform(LI); 6534 6535 RegisterUsage RU; 6536 6537 // Each 'key' in the map opens a new interval. The values 6538 // of the map are the index of the 'last seen' usage of the 6539 // instruction that is the key. 6540 using IntervalMap = DenseMap<Instruction *, unsigned>; 6541 6542 // Maps instruction to its index. 6543 SmallVector<Instruction *, 64> IdxToInstr; 6544 // Marks the end of each interval. 6545 IntervalMap EndPoint; 6546 // Saves the list of instruction indices that are used in the loop. 6547 SmallPtrSet<Instruction *, 8> Ends; 6548 // Saves the list of values that are used in the loop but are 6549 // defined outside the loop, such as arguments and constants. 6550 SmallPtrSet<Value *, 8> LoopInvariants; 6551 6552 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6553 for (Instruction &I : BB->instructionsWithoutDebug()) { 6554 IdxToInstr.push_back(&I); 6555 6556 // Save the end location of each USE. 6557 for (Value *U : I.operands()) { 6558 auto *Instr = dyn_cast<Instruction>(U); 6559 6560 // Ignore non-instruction values such as arguments, constants, etc. 6561 if (!Instr) 6562 continue; 6563 6564 // If this instruction is outside the loop then record it and continue. 6565 if (!TheLoop->contains(Instr)) { 6566 LoopInvariants.insert(Instr); 6567 continue; 6568 } 6569 6570 // Overwrite previous end points. 6571 EndPoint[Instr] = IdxToInstr.size(); 6572 Ends.insert(Instr); 6573 } 6574 } 6575 } 6576 6577 // Saves the list of intervals that end with the index in 'key'. 6578 using InstrList = SmallVector<Instruction *, 2>; 6579 DenseMap<unsigned, InstrList> TransposeEnds; 6580 6581 // Transpose the EndPoints to a list of values that end at each index. 6582 for (auto &Interval : EndPoint) 6583 TransposeEnds[Interval.second].push_back(Interval.first); 6584 6585 SmallPtrSet<Instruction *, 8> OpenIntervals; 6586 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6587 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6588 6589 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6590 6591 // A lambda that gets the register usage for the given type and VF. 6592 const auto &TTICapture = TTI; 6593 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) { 6594 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6595 return 0; 6596 return *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 6597 }; 6598 6599 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6600 Instruction *I = IdxToInstr[i]; 6601 6602 // Remove all of the instructions that end at this location. 6603 InstrList &List = TransposeEnds[i]; 6604 for (Instruction *ToRemove : List) 6605 OpenIntervals.erase(ToRemove); 6606 6607 // Ignore instructions that are never used within the loop. 6608 if (!Ends.count(I)) 6609 continue; 6610 6611 // Skip ignored values. 6612 if (ValuesToIgnore.count(I)) 6613 continue; 6614 6615 // For each VF find the maximum usage of registers. 6616 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6617 // Count the number of live intervals. 6618 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6619 6620 if (VFs[j].isScalar()) { 6621 for (auto Inst : OpenIntervals) { 6622 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6623 if (RegUsage.find(ClassID) == RegUsage.end()) 6624 RegUsage[ClassID] = 1; 6625 else 6626 RegUsage[ClassID] += 1; 6627 } 6628 } else { 6629 collectUniformsAndScalars(VFs[j]); 6630 for (auto Inst : OpenIntervals) { 6631 // Skip ignored values for VF > 1. 6632 if (VecValuesToIgnore.count(Inst)) 6633 continue; 6634 if (isScalarAfterVectorization(Inst, VFs[j])) { 6635 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6636 if (RegUsage.find(ClassID) == RegUsage.end()) 6637 RegUsage[ClassID] = 1; 6638 else 6639 RegUsage[ClassID] += 1; 6640 } else { 6641 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6642 if (RegUsage.find(ClassID) == RegUsage.end()) 6643 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6644 else 6645 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6646 } 6647 } 6648 } 6649 6650 for (auto& pair : RegUsage) { 6651 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6652 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6653 else 6654 MaxUsages[j][pair.first] = pair.second; 6655 } 6656 } 6657 6658 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6659 << OpenIntervals.size() << '\n'); 6660 6661 // Add the current instruction to the list of open intervals. 6662 OpenIntervals.insert(I); 6663 } 6664 6665 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6666 SmallMapVector<unsigned, unsigned, 4> Invariant; 6667 6668 for (auto Inst : LoopInvariants) { 6669 unsigned Usage = 6670 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6671 unsigned ClassID = 6672 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6673 if (Invariant.find(ClassID) == Invariant.end()) 6674 Invariant[ClassID] = Usage; 6675 else 6676 Invariant[ClassID] += Usage; 6677 } 6678 6679 LLVM_DEBUG({ 6680 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6681 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6682 << " item\n"; 6683 for (const auto &pair : MaxUsages[i]) { 6684 dbgs() << "LV(REG): RegisterClass: " 6685 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6686 << " registers\n"; 6687 } 6688 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6689 << " item\n"; 6690 for (const auto &pair : Invariant) { 6691 dbgs() << "LV(REG): RegisterClass: " 6692 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6693 << " registers\n"; 6694 } 6695 }); 6696 6697 RU.LoopInvariantRegs = Invariant; 6698 RU.MaxLocalUsers = MaxUsages[i]; 6699 RUs[i] = RU; 6700 } 6701 6702 return RUs; 6703 } 6704 6705 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6706 // TODO: Cost model for emulated masked load/store is completely 6707 // broken. This hack guides the cost model to use an artificially 6708 // high enough value to practically disable vectorization with such 6709 // operations, except where previously deployed legality hack allowed 6710 // using very low cost values. This is to avoid regressions coming simply 6711 // from moving "masked load/store" check from legality to cost model. 6712 // Masked Load/Gather emulation was previously never allowed. 6713 // Limited number of Masked Store/Scatter emulation was allowed. 6714 assert(isPredicatedInst(I) && 6715 "Expecting a scalar emulated instruction"); 6716 return isa<LoadInst>(I) || 6717 (isa<StoreInst>(I) && 6718 NumPredStores > NumberOfStoresToPredicate); 6719 } 6720 6721 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6722 // If we aren't vectorizing the loop, or if we've already collected the 6723 // instructions to scalarize, there's nothing to do. Collection may already 6724 // have occurred if we have a user-selected VF and are now computing the 6725 // expected cost for interleaving. 6726 if (VF.isScalar() || VF.isZero() || 6727 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6728 return; 6729 6730 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6731 // not profitable to scalarize any instructions, the presence of VF in the 6732 // map will indicate that we've analyzed it already. 6733 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6734 6735 // Find all the instructions that are scalar with predication in the loop and 6736 // determine if it would be better to not if-convert the blocks they are in. 6737 // If so, we also record the instructions to scalarize. 6738 for (BasicBlock *BB : TheLoop->blocks()) { 6739 if (!blockNeedsPredication(BB)) 6740 continue; 6741 for (Instruction &I : *BB) 6742 if (isScalarWithPredication(&I)) { 6743 ScalarCostsTy ScalarCosts; 6744 // Do not apply discount logic if hacked cost is needed 6745 // for emulated masked memrefs. 6746 if (!useEmulatedMaskMemRefHack(&I) && 6747 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6748 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6749 // Remember that BB will remain after vectorization. 6750 PredicatedBBsAfterVectorization.insert(BB); 6751 } 6752 } 6753 } 6754 6755 int LoopVectorizationCostModel::computePredInstDiscount( 6756 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6757 assert(!isUniformAfterVectorization(PredInst, VF) && 6758 "Instruction marked uniform-after-vectorization will be predicated"); 6759 6760 // Initialize the discount to zero, meaning that the scalar version and the 6761 // vector version cost the same. 6762 InstructionCost Discount = 0; 6763 6764 // Holds instructions to analyze. The instructions we visit are mapped in 6765 // ScalarCosts. Those instructions are the ones that would be scalarized if 6766 // we find that the scalar version costs less. 6767 SmallVector<Instruction *, 8> Worklist; 6768 6769 // Returns true if the given instruction can be scalarized. 6770 auto canBeScalarized = [&](Instruction *I) -> bool { 6771 // We only attempt to scalarize instructions forming a single-use chain 6772 // from the original predicated block that would otherwise be vectorized. 6773 // Although not strictly necessary, we give up on instructions we know will 6774 // already be scalar to avoid traversing chains that are unlikely to be 6775 // beneficial. 6776 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6777 isScalarAfterVectorization(I, VF)) 6778 return false; 6779 6780 // If the instruction is scalar with predication, it will be analyzed 6781 // separately. We ignore it within the context of PredInst. 6782 if (isScalarWithPredication(I)) 6783 return false; 6784 6785 // If any of the instruction's operands are uniform after vectorization, 6786 // the instruction cannot be scalarized. This prevents, for example, a 6787 // masked load from being scalarized. 6788 // 6789 // We assume we will only emit a value for lane zero of an instruction 6790 // marked uniform after vectorization, rather than VF identical values. 6791 // Thus, if we scalarize an instruction that uses a uniform, we would 6792 // create uses of values corresponding to the lanes we aren't emitting code 6793 // for. This behavior can be changed by allowing getScalarValue to clone 6794 // the lane zero values for uniforms rather than asserting. 6795 for (Use &U : I->operands()) 6796 if (auto *J = dyn_cast<Instruction>(U.get())) 6797 if (isUniformAfterVectorization(J, VF)) 6798 return false; 6799 6800 // Otherwise, we can scalarize the instruction. 6801 return true; 6802 }; 6803 6804 // Compute the expected cost discount from scalarizing the entire expression 6805 // feeding the predicated instruction. We currently only consider expressions 6806 // that are single-use instruction chains. 6807 Worklist.push_back(PredInst); 6808 while (!Worklist.empty()) { 6809 Instruction *I = Worklist.pop_back_val(); 6810 6811 // If we've already analyzed the instruction, there's nothing to do. 6812 if (ScalarCosts.find(I) != ScalarCosts.end()) 6813 continue; 6814 6815 // Compute the cost of the vector instruction. Note that this cost already 6816 // includes the scalarization overhead of the predicated instruction. 6817 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6818 6819 // Compute the cost of the scalarized instruction. This cost is the cost of 6820 // the instruction as if it wasn't if-converted and instead remained in the 6821 // predicated block. We will scale this cost by block probability after 6822 // computing the scalarization overhead. 6823 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6824 InstructionCost ScalarCost = 6825 VF.getKnownMinValue() * 6826 getInstructionCost(I, ElementCount::getFixed(1)).first; 6827 6828 // Compute the scalarization overhead of needed insertelement instructions 6829 // and phi nodes. 6830 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6831 ScalarCost += TTI.getScalarizationOverhead( 6832 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6833 APInt::getAllOnesValue(VF.getKnownMinValue()), true, false); 6834 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6835 ScalarCost += 6836 VF.getKnownMinValue() * 6837 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6838 } 6839 6840 // Compute the scalarization overhead of needed extractelement 6841 // instructions. For each of the instruction's operands, if the operand can 6842 // be scalarized, add it to the worklist; otherwise, account for the 6843 // overhead. 6844 for (Use &U : I->operands()) 6845 if (auto *J = dyn_cast<Instruction>(U.get())) { 6846 assert(VectorType::isValidElementType(J->getType()) && 6847 "Instruction has non-scalar type"); 6848 if (canBeScalarized(J)) 6849 Worklist.push_back(J); 6850 else if (needsExtract(J, VF)) { 6851 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6852 ScalarCost += TTI.getScalarizationOverhead( 6853 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6854 APInt::getAllOnesValue(VF.getKnownMinValue()), false, true); 6855 } 6856 } 6857 6858 // Scale the total scalar cost by block probability. 6859 ScalarCost /= getReciprocalPredBlockProb(); 6860 6861 // Compute the discount. A non-negative discount means the vector version 6862 // of the instruction costs more, and scalarizing would be beneficial. 6863 Discount += VectorCost - ScalarCost; 6864 ScalarCosts[I] = ScalarCost; 6865 } 6866 6867 return *Discount.getValue(); 6868 } 6869 6870 LoopVectorizationCostModel::VectorizationCostTy 6871 LoopVectorizationCostModel::expectedCost(ElementCount VF) { 6872 VectorizationCostTy Cost; 6873 6874 // For each block. 6875 for (BasicBlock *BB : TheLoop->blocks()) { 6876 VectorizationCostTy BlockCost; 6877 6878 // For each instruction in the old loop. 6879 for (Instruction &I : BB->instructionsWithoutDebug()) { 6880 // Skip ignored values. 6881 if (ValuesToIgnore.count(&I) || 6882 (VF.isVector() && VecValuesToIgnore.count(&I))) 6883 continue; 6884 6885 VectorizationCostTy C = getInstructionCost(&I, VF); 6886 6887 // Check if we should override the cost. 6888 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6889 C.first = InstructionCost(ForceTargetInstructionCost); 6890 6891 BlockCost.first += C.first; 6892 BlockCost.second |= C.second; 6893 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6894 << " for VF " << VF << " For instruction: " << I 6895 << '\n'); 6896 } 6897 6898 // If we are vectorizing a predicated block, it will have been 6899 // if-converted. This means that the block's instructions (aside from 6900 // stores and instructions that may divide by zero) will now be 6901 // unconditionally executed. For the scalar case, we may not always execute 6902 // the predicated block, if it is an if-else block. Thus, scale the block's 6903 // cost by the probability of executing it. blockNeedsPredication from 6904 // Legal is used so as to not include all blocks in tail folded loops. 6905 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6906 BlockCost.first /= getReciprocalPredBlockProb(); 6907 6908 Cost.first += BlockCost.first; 6909 Cost.second |= BlockCost.second; 6910 } 6911 6912 return Cost; 6913 } 6914 6915 /// Gets Address Access SCEV after verifying that the access pattern 6916 /// is loop invariant except the induction variable dependence. 6917 /// 6918 /// This SCEV can be sent to the Target in order to estimate the address 6919 /// calculation cost. 6920 static const SCEV *getAddressAccessSCEV( 6921 Value *Ptr, 6922 LoopVectorizationLegality *Legal, 6923 PredicatedScalarEvolution &PSE, 6924 const Loop *TheLoop) { 6925 6926 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6927 if (!Gep) 6928 return nullptr; 6929 6930 // We are looking for a gep with all loop invariant indices except for one 6931 // which should be an induction variable. 6932 auto SE = PSE.getSE(); 6933 unsigned NumOperands = Gep->getNumOperands(); 6934 for (unsigned i = 1; i < NumOperands; ++i) { 6935 Value *Opd = Gep->getOperand(i); 6936 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6937 !Legal->isInductionVariable(Opd)) 6938 return nullptr; 6939 } 6940 6941 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6942 return PSE.getSCEV(Ptr); 6943 } 6944 6945 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6946 return Legal->hasStride(I->getOperand(0)) || 6947 Legal->hasStride(I->getOperand(1)); 6948 } 6949 6950 InstructionCost 6951 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6952 ElementCount VF) { 6953 assert(VF.isVector() && 6954 "Scalarization cost of instruction implies vectorization."); 6955 if (VF.isScalable()) 6956 return InstructionCost::getInvalid(); 6957 6958 Type *ValTy = getLoadStoreType(I); 6959 auto SE = PSE.getSE(); 6960 6961 unsigned AS = getLoadStoreAddressSpace(I); 6962 Value *Ptr = getLoadStorePointerOperand(I); 6963 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6964 6965 // Figure out whether the access is strided and get the stride value 6966 // if it's known in compile time 6967 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6968 6969 // Get the cost of the scalar memory instruction and address computation. 6970 InstructionCost Cost = 6971 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6972 6973 // Don't pass *I here, since it is scalar but will actually be part of a 6974 // vectorized loop where the user of it is a vectorized instruction. 6975 const Align Alignment = getLoadStoreAlignment(I); 6976 Cost += VF.getKnownMinValue() * 6977 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6978 AS, TTI::TCK_RecipThroughput); 6979 6980 // Get the overhead of the extractelement and insertelement instructions 6981 // we might create due to scalarization. 6982 Cost += getScalarizationOverhead(I, VF); 6983 6984 // If we have a predicated load/store, it will need extra i1 extracts and 6985 // conditional branches, but may not be executed for each vector lane. Scale 6986 // the cost by the probability of executing the predicated block. 6987 if (isPredicatedInst(I)) { 6988 Cost /= getReciprocalPredBlockProb(); 6989 6990 // Add the cost of an i1 extract and a branch 6991 auto *Vec_i1Ty = 6992 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6993 Cost += TTI.getScalarizationOverhead( 6994 Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), 6995 /*Insert=*/false, /*Extract=*/true); 6996 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6997 6998 if (useEmulatedMaskMemRefHack(I)) 6999 // Artificially setting to a high enough value to practically disable 7000 // vectorization with such operations. 7001 Cost = 3000000; 7002 } 7003 7004 return Cost; 7005 } 7006 7007 InstructionCost 7008 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 7009 ElementCount VF) { 7010 Type *ValTy = getLoadStoreType(I); 7011 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7012 Value *Ptr = getLoadStorePointerOperand(I); 7013 unsigned AS = getLoadStoreAddressSpace(I); 7014 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 7015 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7016 7017 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7018 "Stride should be 1 or -1 for consecutive memory access"); 7019 const Align Alignment = getLoadStoreAlignment(I); 7020 InstructionCost Cost = 0; 7021 if (Legal->isMaskRequired(I)) 7022 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 7023 CostKind); 7024 else 7025 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 7026 CostKind, I); 7027 7028 bool Reverse = ConsecutiveStride < 0; 7029 if (Reverse) 7030 Cost += 7031 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 7032 return Cost; 7033 } 7034 7035 InstructionCost 7036 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 7037 ElementCount VF) { 7038 assert(Legal->isUniformMemOp(*I)); 7039 7040 Type *ValTy = getLoadStoreType(I); 7041 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7042 const Align Alignment = getLoadStoreAlignment(I); 7043 unsigned AS = getLoadStoreAddressSpace(I); 7044 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7045 if (isa<LoadInst>(I)) { 7046 return TTI.getAddressComputationCost(ValTy) + 7047 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 7048 CostKind) + 7049 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 7050 } 7051 StoreInst *SI = cast<StoreInst>(I); 7052 7053 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 7054 return TTI.getAddressComputationCost(ValTy) + 7055 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 7056 CostKind) + 7057 (isLoopInvariantStoreValue 7058 ? 0 7059 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 7060 VF.getKnownMinValue() - 1)); 7061 } 7062 7063 InstructionCost 7064 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 7065 ElementCount VF) { 7066 Type *ValTy = getLoadStoreType(I); 7067 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7068 const Align Alignment = getLoadStoreAlignment(I); 7069 const Value *Ptr = getLoadStorePointerOperand(I); 7070 7071 return TTI.getAddressComputationCost(VectorTy) + 7072 TTI.getGatherScatterOpCost( 7073 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 7074 TargetTransformInfo::TCK_RecipThroughput, I); 7075 } 7076 7077 InstructionCost 7078 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 7079 ElementCount VF) { 7080 // TODO: Once we have support for interleaving with scalable vectors 7081 // we can calculate the cost properly here. 7082 if (VF.isScalable()) 7083 return InstructionCost::getInvalid(); 7084 7085 Type *ValTy = getLoadStoreType(I); 7086 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7087 unsigned AS = getLoadStoreAddressSpace(I); 7088 7089 auto Group = getInterleavedAccessGroup(I); 7090 assert(Group && "Fail to get an interleaved access group."); 7091 7092 unsigned InterleaveFactor = Group->getFactor(); 7093 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 7094 7095 // Holds the indices of existing members in an interleaved load group. 7096 // An interleaved store group doesn't need this as it doesn't allow gaps. 7097 SmallVector<unsigned, 4> Indices; 7098 if (isa<LoadInst>(I)) { 7099 for (unsigned i = 0; i < InterleaveFactor; i++) 7100 if (Group->getMember(i)) 7101 Indices.push_back(i); 7102 } 7103 7104 // Calculate the cost of the whole interleaved group. 7105 bool UseMaskForGaps = 7106 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 7107 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 7108 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 7109 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 7110 7111 if (Group->isReverse()) { 7112 // TODO: Add support for reversed masked interleaved access. 7113 assert(!Legal->isMaskRequired(I) && 7114 "Reverse masked interleaved access not supported."); 7115 Cost += 7116 Group->getNumMembers() * 7117 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 7118 } 7119 return Cost; 7120 } 7121 7122 InstructionCost LoopVectorizationCostModel::getReductionPatternCost( 7123 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 7124 // Early exit for no inloop reductions 7125 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 7126 return InstructionCost::getInvalid(); 7127 auto *VectorTy = cast<VectorType>(Ty); 7128 7129 // We are looking for a pattern of, and finding the minimal acceptable cost: 7130 // reduce(mul(ext(A), ext(B))) or 7131 // reduce(mul(A, B)) or 7132 // reduce(ext(A)) or 7133 // reduce(A). 7134 // The basic idea is that we walk down the tree to do that, finding the root 7135 // reduction instruction in InLoopReductionImmediateChains. From there we find 7136 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 7137 // of the components. If the reduction cost is lower then we return it for the 7138 // reduction instruction and 0 for the other instructions in the pattern. If 7139 // it is not we return an invalid cost specifying the orignal cost method 7140 // should be used. 7141 Instruction *RetI = I; 7142 if ((RetI->getOpcode() == Instruction::SExt || 7143 RetI->getOpcode() == Instruction::ZExt)) { 7144 if (!RetI->hasOneUser()) 7145 return InstructionCost::getInvalid(); 7146 RetI = RetI->user_back(); 7147 } 7148 if (RetI->getOpcode() == Instruction::Mul && 7149 RetI->user_back()->getOpcode() == Instruction::Add) { 7150 if (!RetI->hasOneUser()) 7151 return InstructionCost::getInvalid(); 7152 RetI = RetI->user_back(); 7153 } 7154 7155 // Test if the found instruction is a reduction, and if not return an invalid 7156 // cost specifying the parent to use the original cost modelling. 7157 if (!InLoopReductionImmediateChains.count(RetI)) 7158 return InstructionCost::getInvalid(); 7159 7160 // Find the reduction this chain is a part of and calculate the basic cost of 7161 // the reduction on its own. 7162 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 7163 Instruction *ReductionPhi = LastChain; 7164 while (!isa<PHINode>(ReductionPhi)) 7165 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 7166 7167 RecurrenceDescriptor RdxDesc = 7168 Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; 7169 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 7170 RdxDesc.getOpcode(), VectorTy, false, CostKind); 7171 7172 // Get the operand that was not the reduction chain and match it to one of the 7173 // patterns, returning the better cost if it is found. 7174 Instruction *RedOp = RetI->getOperand(1) == LastChain 7175 ? dyn_cast<Instruction>(RetI->getOperand(0)) 7176 : dyn_cast<Instruction>(RetI->getOperand(1)); 7177 7178 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 7179 7180 if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) && 7181 !TheLoop->isLoopInvariant(RedOp)) { 7182 bool IsUnsigned = isa<ZExtInst>(RedOp); 7183 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 7184 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7185 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7186 CostKind); 7187 7188 InstructionCost ExtCost = 7189 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 7190 TTI::CastContextHint::None, CostKind, RedOp); 7191 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 7192 return I == RetI ? *RedCost.getValue() : 0; 7193 } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) { 7194 Instruction *Mul = RedOp; 7195 Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0)); 7196 Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1)); 7197 if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) && 7198 Op0->getOpcode() == Op1->getOpcode() && 7199 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7200 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 7201 bool IsUnsigned = isa<ZExtInst>(Op0); 7202 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7203 // reduce(mul(ext, ext)) 7204 InstructionCost ExtCost = 7205 TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, 7206 TTI::CastContextHint::None, CostKind, Op0); 7207 InstructionCost MulCost = 7208 TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); 7209 7210 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7211 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7212 CostKind); 7213 7214 if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) 7215 return I == RetI ? *RedCost.getValue() : 0; 7216 } else { 7217 InstructionCost MulCost = 7218 TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); 7219 7220 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7221 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 7222 CostKind); 7223 7224 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 7225 return I == RetI ? *RedCost.getValue() : 0; 7226 } 7227 } 7228 7229 return I == RetI ? BaseCost : InstructionCost::getInvalid(); 7230 } 7231 7232 InstructionCost 7233 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7234 ElementCount VF) { 7235 // Calculate scalar cost only. Vectorization cost should be ready at this 7236 // moment. 7237 if (VF.isScalar()) { 7238 Type *ValTy = getLoadStoreType(I); 7239 const Align Alignment = getLoadStoreAlignment(I); 7240 unsigned AS = getLoadStoreAddressSpace(I); 7241 7242 return TTI.getAddressComputationCost(ValTy) + 7243 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 7244 TTI::TCK_RecipThroughput, I); 7245 } 7246 return getWideningCost(I, VF); 7247 } 7248 7249 LoopVectorizationCostModel::VectorizationCostTy 7250 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7251 ElementCount VF) { 7252 // If we know that this instruction will remain uniform, check the cost of 7253 // the scalar version. 7254 if (isUniformAfterVectorization(I, VF)) 7255 VF = ElementCount::getFixed(1); 7256 7257 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7258 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7259 7260 // Forced scalars do not have any scalarization overhead. 7261 auto ForcedScalar = ForcedScalars.find(VF); 7262 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7263 auto InstSet = ForcedScalar->second; 7264 if (InstSet.count(I)) 7265 return VectorizationCostTy( 7266 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7267 VF.getKnownMinValue()), 7268 false); 7269 } 7270 7271 Type *VectorTy; 7272 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7273 7274 bool TypeNotScalarized = 7275 VF.isVector() && VectorTy->isVectorTy() && 7276 TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue(); 7277 return VectorizationCostTy(C, TypeNotScalarized); 7278 } 7279 7280 InstructionCost 7281 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7282 ElementCount VF) const { 7283 7284 if (VF.isScalable()) 7285 return InstructionCost::getInvalid(); 7286 7287 if (VF.isScalar()) 7288 return 0; 7289 7290 InstructionCost Cost = 0; 7291 Type *RetTy = ToVectorTy(I->getType(), VF); 7292 if (!RetTy->isVoidTy() && 7293 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7294 Cost += TTI.getScalarizationOverhead( 7295 cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()), 7296 true, false); 7297 7298 // Some targets keep addresses scalar. 7299 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7300 return Cost; 7301 7302 // Some targets support efficient element stores. 7303 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7304 return Cost; 7305 7306 // Collect operands to consider. 7307 CallInst *CI = dyn_cast<CallInst>(I); 7308 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 7309 7310 // Skip operands that do not require extraction/scalarization and do not incur 7311 // any overhead. 7312 SmallVector<Type *> Tys; 7313 for (auto *V : filterExtractingOperands(Ops, VF)) 7314 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 7315 return Cost + TTI.getOperandsScalarizationOverhead( 7316 filterExtractingOperands(Ops, VF), Tys); 7317 } 7318 7319 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7320 if (VF.isScalar()) 7321 return; 7322 NumPredStores = 0; 7323 for (BasicBlock *BB : TheLoop->blocks()) { 7324 // For each instruction in the old loop. 7325 for (Instruction &I : *BB) { 7326 Value *Ptr = getLoadStorePointerOperand(&I); 7327 if (!Ptr) 7328 continue; 7329 7330 // TODO: We should generate better code and update the cost model for 7331 // predicated uniform stores. Today they are treated as any other 7332 // predicated store (see added test cases in 7333 // invariant-store-vectorization.ll). 7334 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 7335 NumPredStores++; 7336 7337 if (Legal->isUniformMemOp(I)) { 7338 // TODO: Avoid replicating loads and stores instead of 7339 // relying on instcombine to remove them. 7340 // Load: Scalar load + broadcast 7341 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7342 InstructionCost Cost = getUniformMemOpCost(&I, VF); 7343 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7344 continue; 7345 } 7346 7347 // We assume that widening is the best solution when possible. 7348 if (memoryInstructionCanBeWidened(&I, VF)) { 7349 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7350 int ConsecutiveStride = 7351 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 7352 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7353 "Expected consecutive stride."); 7354 InstWidening Decision = 7355 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7356 setWideningDecision(&I, VF, Decision, Cost); 7357 continue; 7358 } 7359 7360 // Choose between Interleaving, Gather/Scatter or Scalarization. 7361 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7362 unsigned NumAccesses = 1; 7363 if (isAccessInterleaved(&I)) { 7364 auto Group = getInterleavedAccessGroup(&I); 7365 assert(Group && "Fail to get an interleaved access group."); 7366 7367 // Make one decision for the whole group. 7368 if (getWideningDecision(&I, VF) != CM_Unknown) 7369 continue; 7370 7371 NumAccesses = Group->getNumMembers(); 7372 if (interleavedAccessCanBeWidened(&I, VF)) 7373 InterleaveCost = getInterleaveGroupCost(&I, VF); 7374 } 7375 7376 InstructionCost GatherScatterCost = 7377 isLegalGatherOrScatter(&I) 7378 ? getGatherScatterCost(&I, VF) * NumAccesses 7379 : InstructionCost::getInvalid(); 7380 7381 InstructionCost ScalarizationCost = 7382 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7383 7384 // Choose better solution for the current VF, 7385 // write down this decision and use it during vectorization. 7386 InstructionCost Cost; 7387 InstWidening Decision; 7388 if (InterleaveCost <= GatherScatterCost && 7389 InterleaveCost < ScalarizationCost) { 7390 Decision = CM_Interleave; 7391 Cost = InterleaveCost; 7392 } else if (GatherScatterCost < ScalarizationCost) { 7393 Decision = CM_GatherScatter; 7394 Cost = GatherScatterCost; 7395 } else { 7396 assert(!VF.isScalable() && 7397 "We cannot yet scalarise for scalable vectors"); 7398 Decision = CM_Scalarize; 7399 Cost = ScalarizationCost; 7400 } 7401 // If the instructions belongs to an interleave group, the whole group 7402 // receives the same decision. The whole group receives the cost, but 7403 // the cost will actually be assigned to one instruction. 7404 if (auto Group = getInterleavedAccessGroup(&I)) 7405 setWideningDecision(Group, VF, Decision, Cost); 7406 else 7407 setWideningDecision(&I, VF, Decision, Cost); 7408 } 7409 } 7410 7411 // Make sure that any load of address and any other address computation 7412 // remains scalar unless there is gather/scatter support. This avoids 7413 // inevitable extracts into address registers, and also has the benefit of 7414 // activating LSR more, since that pass can't optimize vectorized 7415 // addresses. 7416 if (TTI.prefersVectorizedAddressing()) 7417 return; 7418 7419 // Start with all scalar pointer uses. 7420 SmallPtrSet<Instruction *, 8> AddrDefs; 7421 for (BasicBlock *BB : TheLoop->blocks()) 7422 for (Instruction &I : *BB) { 7423 Instruction *PtrDef = 7424 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7425 if (PtrDef && TheLoop->contains(PtrDef) && 7426 getWideningDecision(&I, VF) != CM_GatherScatter) 7427 AddrDefs.insert(PtrDef); 7428 } 7429 7430 // Add all instructions used to generate the addresses. 7431 SmallVector<Instruction *, 4> Worklist; 7432 append_range(Worklist, AddrDefs); 7433 while (!Worklist.empty()) { 7434 Instruction *I = Worklist.pop_back_val(); 7435 for (auto &Op : I->operands()) 7436 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7437 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7438 AddrDefs.insert(InstOp).second) 7439 Worklist.push_back(InstOp); 7440 } 7441 7442 for (auto *I : AddrDefs) { 7443 if (isa<LoadInst>(I)) { 7444 // Setting the desired widening decision should ideally be handled in 7445 // by cost functions, but since this involves the task of finding out 7446 // if the loaded register is involved in an address computation, it is 7447 // instead changed here when we know this is the case. 7448 InstWidening Decision = getWideningDecision(I, VF); 7449 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7450 // Scalarize a widened load of address. 7451 setWideningDecision( 7452 I, VF, CM_Scalarize, 7453 (VF.getKnownMinValue() * 7454 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7455 else if (auto Group = getInterleavedAccessGroup(I)) { 7456 // Scalarize an interleave group of address loads. 7457 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7458 if (Instruction *Member = Group->getMember(I)) 7459 setWideningDecision( 7460 Member, VF, CM_Scalarize, 7461 (VF.getKnownMinValue() * 7462 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7463 } 7464 } 7465 } else 7466 // Make sure I gets scalarized and a cost estimate without 7467 // scalarization overhead. 7468 ForcedScalars[VF].insert(I); 7469 } 7470 } 7471 7472 InstructionCost 7473 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7474 Type *&VectorTy) { 7475 Type *RetTy = I->getType(); 7476 if (canTruncateToMinimalBitwidth(I, VF)) 7477 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7478 auto SE = PSE.getSE(); 7479 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7480 7481 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 7482 ElementCount VF) -> bool { 7483 if (VF.isScalar()) 7484 return true; 7485 7486 auto Scalarized = InstsToScalarize.find(VF); 7487 assert(Scalarized != InstsToScalarize.end() && 7488 "VF not yet analyzed for scalarization profitability"); 7489 return !Scalarized->second.count(I) && 7490 llvm::all_of(I->users(), [&](User *U) { 7491 auto *UI = cast<Instruction>(U); 7492 return !Scalarized->second.count(UI); 7493 }); 7494 }; 7495 (void) hasSingleCopyAfterVectorization; 7496 7497 if (isScalarAfterVectorization(I, VF)) { 7498 // With the exception of GEPs and PHIs, after scalarization there should 7499 // only be one copy of the instruction generated in the loop. This is 7500 // because the VF is either 1, or any instructions that need scalarizing 7501 // have already been dealt with by the the time we get here. As a result, 7502 // it means we don't have to multiply the instruction cost by VF. 7503 assert(I->getOpcode() == Instruction::GetElementPtr || 7504 I->getOpcode() == Instruction::PHI || 7505 (I->getOpcode() == Instruction::BitCast && 7506 I->getType()->isPointerTy()) || 7507 hasSingleCopyAfterVectorization(I, VF)); 7508 VectorTy = RetTy; 7509 } else 7510 VectorTy = ToVectorTy(RetTy, VF); 7511 7512 // TODO: We need to estimate the cost of intrinsic calls. 7513 switch (I->getOpcode()) { 7514 case Instruction::GetElementPtr: 7515 // We mark this instruction as zero-cost because the cost of GEPs in 7516 // vectorized code depends on whether the corresponding memory instruction 7517 // is scalarized or not. Therefore, we handle GEPs with the memory 7518 // instruction cost. 7519 return 0; 7520 case Instruction::Br: { 7521 // In cases of scalarized and predicated instructions, there will be VF 7522 // predicated blocks in the vectorized loop. Each branch around these 7523 // blocks requires also an extract of its vector compare i1 element. 7524 bool ScalarPredicatedBB = false; 7525 BranchInst *BI = cast<BranchInst>(I); 7526 if (VF.isVector() && BI->isConditional() && 7527 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7528 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7529 ScalarPredicatedBB = true; 7530 7531 if (ScalarPredicatedBB) { 7532 // Return cost for branches around scalarized and predicated blocks. 7533 assert(!VF.isScalable() && "scalable vectors not yet supported."); 7534 auto *Vec_i1Ty = 7535 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7536 return (TTI.getScalarizationOverhead( 7537 Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), 7538 false, true) + 7539 (TTI.getCFInstrCost(Instruction::Br, CostKind) * 7540 VF.getKnownMinValue())); 7541 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7542 // The back-edge branch will remain, as will all scalar branches. 7543 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7544 else 7545 // This branch will be eliminated by if-conversion. 7546 return 0; 7547 // Note: We currently assume zero cost for an unconditional branch inside 7548 // a predicated block since it will become a fall-through, although we 7549 // may decide in the future to call TTI for all branches. 7550 } 7551 case Instruction::PHI: { 7552 auto *Phi = cast<PHINode>(I); 7553 7554 // First-order recurrences are replaced by vector shuffles inside the loop. 7555 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7556 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7557 return TTI.getShuffleCost( 7558 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7559 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7560 7561 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7562 // converted into select instructions. We require N - 1 selects per phi 7563 // node, where N is the number of incoming values. 7564 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7565 return (Phi->getNumIncomingValues() - 1) * 7566 TTI.getCmpSelInstrCost( 7567 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7568 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7569 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7570 7571 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7572 } 7573 case Instruction::UDiv: 7574 case Instruction::SDiv: 7575 case Instruction::URem: 7576 case Instruction::SRem: 7577 // If we have a predicated instruction, it may not be executed for each 7578 // vector lane. Get the scalarization cost and scale this amount by the 7579 // probability of executing the predicated block. If the instruction is not 7580 // predicated, we fall through to the next case. 7581 if (VF.isVector() && isScalarWithPredication(I)) { 7582 InstructionCost Cost = 0; 7583 7584 // These instructions have a non-void type, so account for the phi nodes 7585 // that we will create. This cost is likely to be zero. The phi node 7586 // cost, if any, should be scaled by the block probability because it 7587 // models a copy at the end of each predicated block. 7588 Cost += VF.getKnownMinValue() * 7589 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7590 7591 // The cost of the non-predicated instruction. 7592 Cost += VF.getKnownMinValue() * 7593 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7594 7595 // The cost of insertelement and extractelement instructions needed for 7596 // scalarization. 7597 Cost += getScalarizationOverhead(I, VF); 7598 7599 // Scale the cost by the probability of executing the predicated blocks. 7600 // This assumes the predicated block for each vector lane is equally 7601 // likely. 7602 return Cost / getReciprocalPredBlockProb(); 7603 } 7604 LLVM_FALLTHROUGH; 7605 case Instruction::Add: 7606 case Instruction::FAdd: 7607 case Instruction::Sub: 7608 case Instruction::FSub: 7609 case Instruction::Mul: 7610 case Instruction::FMul: 7611 case Instruction::FDiv: 7612 case Instruction::FRem: 7613 case Instruction::Shl: 7614 case Instruction::LShr: 7615 case Instruction::AShr: 7616 case Instruction::And: 7617 case Instruction::Or: 7618 case Instruction::Xor: { 7619 // Since we will replace the stride by 1 the multiplication should go away. 7620 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7621 return 0; 7622 7623 // Detect reduction patterns 7624 InstructionCost RedCost; 7625 if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7626 .isValid()) 7627 return RedCost; 7628 7629 // Certain instructions can be cheaper to vectorize if they have a constant 7630 // second vector operand. One example of this are shifts on x86. 7631 Value *Op2 = I->getOperand(1); 7632 TargetTransformInfo::OperandValueProperties Op2VP; 7633 TargetTransformInfo::OperandValueKind Op2VK = 7634 TTI.getOperandInfo(Op2, Op2VP); 7635 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7636 Op2VK = TargetTransformInfo::OK_UniformValue; 7637 7638 SmallVector<const Value *, 4> Operands(I->operand_values()); 7639 return TTI.getArithmeticInstrCost( 7640 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7641 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7642 } 7643 case Instruction::FNeg: { 7644 return TTI.getArithmeticInstrCost( 7645 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7646 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7647 TargetTransformInfo::OP_None, I->getOperand(0), I); 7648 } 7649 case Instruction::Select: { 7650 SelectInst *SI = cast<SelectInst>(I); 7651 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7652 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7653 7654 const Value *Op0, *Op1; 7655 using namespace llvm::PatternMatch; 7656 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7657 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7658 // select x, y, false --> x & y 7659 // select x, true, y --> x | y 7660 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7661 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7662 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7663 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7664 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7665 Op1->getType()->getScalarSizeInBits() == 1); 7666 7667 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7668 return TTI.getArithmeticInstrCost( 7669 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7670 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7671 } 7672 7673 Type *CondTy = SI->getCondition()->getType(); 7674 if (!ScalarCond) 7675 CondTy = VectorType::get(CondTy, VF); 7676 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 7677 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7678 } 7679 case Instruction::ICmp: 7680 case Instruction::FCmp: { 7681 Type *ValTy = I->getOperand(0)->getType(); 7682 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7683 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7684 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7685 VectorTy = ToVectorTy(ValTy, VF); 7686 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7687 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7688 } 7689 case Instruction::Store: 7690 case Instruction::Load: { 7691 ElementCount Width = VF; 7692 if (Width.isVector()) { 7693 InstWidening Decision = getWideningDecision(I, Width); 7694 assert(Decision != CM_Unknown && 7695 "CM decision should be taken at this point"); 7696 if (Decision == CM_Scalarize) 7697 Width = ElementCount::getFixed(1); 7698 } 7699 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7700 return getMemoryInstructionCost(I, VF); 7701 } 7702 case Instruction::BitCast: 7703 if (I->getType()->isPointerTy()) 7704 return 0; 7705 LLVM_FALLTHROUGH; 7706 case Instruction::ZExt: 7707 case Instruction::SExt: 7708 case Instruction::FPToUI: 7709 case Instruction::FPToSI: 7710 case Instruction::FPExt: 7711 case Instruction::PtrToInt: 7712 case Instruction::IntToPtr: 7713 case Instruction::SIToFP: 7714 case Instruction::UIToFP: 7715 case Instruction::Trunc: 7716 case Instruction::FPTrunc: { 7717 // Computes the CastContextHint from a Load/Store instruction. 7718 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7719 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7720 "Expected a load or a store!"); 7721 7722 if (VF.isScalar() || !TheLoop->contains(I)) 7723 return TTI::CastContextHint::Normal; 7724 7725 switch (getWideningDecision(I, VF)) { 7726 case LoopVectorizationCostModel::CM_GatherScatter: 7727 return TTI::CastContextHint::GatherScatter; 7728 case LoopVectorizationCostModel::CM_Interleave: 7729 return TTI::CastContextHint::Interleave; 7730 case LoopVectorizationCostModel::CM_Scalarize: 7731 case LoopVectorizationCostModel::CM_Widen: 7732 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7733 : TTI::CastContextHint::Normal; 7734 case LoopVectorizationCostModel::CM_Widen_Reverse: 7735 return TTI::CastContextHint::Reversed; 7736 case LoopVectorizationCostModel::CM_Unknown: 7737 llvm_unreachable("Instr did not go through cost modelling?"); 7738 } 7739 7740 llvm_unreachable("Unhandled case!"); 7741 }; 7742 7743 unsigned Opcode = I->getOpcode(); 7744 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7745 // For Trunc, the context is the only user, which must be a StoreInst. 7746 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7747 if (I->hasOneUse()) 7748 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7749 CCH = ComputeCCH(Store); 7750 } 7751 // For Z/Sext, the context is the operand, which must be a LoadInst. 7752 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7753 Opcode == Instruction::FPExt) { 7754 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7755 CCH = ComputeCCH(Load); 7756 } 7757 7758 // We optimize the truncation of induction variables having constant 7759 // integer steps. The cost of these truncations is the same as the scalar 7760 // operation. 7761 if (isOptimizableIVTruncate(I, VF)) { 7762 auto *Trunc = cast<TruncInst>(I); 7763 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7764 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7765 } 7766 7767 // Detect reduction patterns 7768 InstructionCost RedCost; 7769 if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7770 .isValid()) 7771 return RedCost; 7772 7773 Type *SrcScalarTy = I->getOperand(0)->getType(); 7774 Type *SrcVecTy = 7775 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7776 if (canTruncateToMinimalBitwidth(I, VF)) { 7777 // This cast is going to be shrunk. This may remove the cast or it might 7778 // turn it into slightly different cast. For example, if MinBW == 16, 7779 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7780 // 7781 // Calculate the modified src and dest types. 7782 Type *MinVecTy = VectorTy; 7783 if (Opcode == Instruction::Trunc) { 7784 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7785 VectorTy = 7786 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7787 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7788 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7789 VectorTy = 7790 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7791 } 7792 } 7793 7794 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7795 } 7796 case Instruction::Call: { 7797 bool NeedToScalarize; 7798 CallInst *CI = cast<CallInst>(I); 7799 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7800 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7801 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7802 return std::min(CallCost, IntrinsicCost); 7803 } 7804 return CallCost; 7805 } 7806 case Instruction::ExtractValue: 7807 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7808 default: 7809 // This opcode is unknown. Assume that it is the same as 'mul'. 7810 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7811 } // end of switch. 7812 } 7813 7814 char LoopVectorize::ID = 0; 7815 7816 static const char lv_name[] = "Loop Vectorization"; 7817 7818 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7819 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7820 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7821 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7822 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7823 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7824 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7825 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7826 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7827 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7828 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7829 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7830 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7831 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7832 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7833 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7834 7835 namespace llvm { 7836 7837 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7838 7839 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7840 bool VectorizeOnlyWhenForced) { 7841 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7842 } 7843 7844 } // end namespace llvm 7845 7846 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7847 // Check if the pointer operand of a load or store instruction is 7848 // consecutive. 7849 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7850 return Legal->isConsecutivePtr(Ptr); 7851 return false; 7852 } 7853 7854 void LoopVectorizationCostModel::collectValuesToIgnore() { 7855 // Ignore ephemeral values. 7856 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7857 7858 // Ignore type-promoting instructions we identified during reduction 7859 // detection. 7860 for (auto &Reduction : Legal->getReductionVars()) { 7861 RecurrenceDescriptor &RedDes = Reduction.second; 7862 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7863 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7864 } 7865 // Ignore type-casting instructions we identified during induction 7866 // detection. 7867 for (auto &Induction : Legal->getInductionVars()) { 7868 InductionDescriptor &IndDes = Induction.second; 7869 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7870 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7871 } 7872 } 7873 7874 void LoopVectorizationCostModel::collectInLoopReductions() { 7875 for (auto &Reduction : Legal->getReductionVars()) { 7876 PHINode *Phi = Reduction.first; 7877 RecurrenceDescriptor &RdxDesc = Reduction.second; 7878 7879 // We don't collect reductions that are type promoted (yet). 7880 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7881 continue; 7882 7883 // If the target would prefer this reduction to happen "in-loop", then we 7884 // want to record it as such. 7885 unsigned Opcode = RdxDesc.getOpcode(); 7886 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7887 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7888 TargetTransformInfo::ReductionFlags())) 7889 continue; 7890 7891 // Check that we can correctly put the reductions into the loop, by 7892 // finding the chain of operations that leads from the phi to the loop 7893 // exit value. 7894 SmallVector<Instruction *, 4> ReductionOperations = 7895 RdxDesc.getReductionOpChain(Phi, TheLoop); 7896 bool InLoop = !ReductionOperations.empty(); 7897 if (InLoop) { 7898 InLoopReductionChains[Phi] = ReductionOperations; 7899 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7900 Instruction *LastChain = Phi; 7901 for (auto *I : ReductionOperations) { 7902 InLoopReductionImmediateChains[I] = LastChain; 7903 LastChain = I; 7904 } 7905 } 7906 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7907 << " reduction for phi: " << *Phi << "\n"); 7908 } 7909 } 7910 7911 // TODO: we could return a pair of values that specify the max VF and 7912 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7913 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7914 // doesn't have a cost model that can choose which plan to execute if 7915 // more than one is generated. 7916 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7917 LoopVectorizationCostModel &CM) { 7918 unsigned WidestType; 7919 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7920 return WidestVectorRegBits / WidestType; 7921 } 7922 7923 VectorizationFactor 7924 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7925 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7926 ElementCount VF = UserVF; 7927 // Outer loop handling: They may require CFG and instruction level 7928 // transformations before even evaluating whether vectorization is profitable. 7929 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7930 // the vectorization pipeline. 7931 if (!OrigLoop->isInnermost()) { 7932 // If the user doesn't provide a vectorization factor, determine a 7933 // reasonable one. 7934 if (UserVF.isZero()) { 7935 VF = ElementCount::getFixed(determineVPlanVF( 7936 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7937 .getFixedSize(), 7938 CM)); 7939 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7940 7941 // Make sure we have a VF > 1 for stress testing. 7942 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7943 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7944 << "overriding computed VF.\n"); 7945 VF = ElementCount::getFixed(4); 7946 } 7947 } 7948 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7949 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7950 "VF needs to be a power of two"); 7951 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7952 << "VF " << VF << " to build VPlans.\n"); 7953 buildVPlans(VF, VF); 7954 7955 // For VPlan build stress testing, we bail out after VPlan construction. 7956 if (VPlanBuildStressTest) 7957 return VectorizationFactor::Disabled(); 7958 7959 return {VF, 0 /*Cost*/}; 7960 } 7961 7962 LLVM_DEBUG( 7963 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7964 "VPlan-native path.\n"); 7965 return VectorizationFactor::Disabled(); 7966 } 7967 7968 Optional<VectorizationFactor> 7969 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7970 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7971 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 7972 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 7973 return None; 7974 7975 // Invalidate interleave groups if all blocks of loop will be predicated. 7976 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 7977 !useMaskedInterleavedAccesses(*TTI)) { 7978 LLVM_DEBUG( 7979 dbgs() 7980 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7981 "which requires masked-interleaved support.\n"); 7982 if (CM.InterleaveInfo.invalidateGroups()) 7983 // Invalidating interleave groups also requires invalidating all decisions 7984 // based on them, which includes widening decisions and uniform and scalar 7985 // values. 7986 CM.invalidateCostModelingDecisions(); 7987 } 7988 7989 ElementCount MaxUserVF = 7990 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 7991 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 7992 if (!UserVF.isZero() && UserVFIsLegal) { 7993 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max") 7994 << " VF " << UserVF << ".\n"); 7995 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 7996 "VF needs to be a power of two"); 7997 // Collect the instructions (and their associated costs) that will be more 7998 // profitable to scalarize. 7999 CM.selectUserVectorizationFactor(UserVF); 8000 CM.collectInLoopReductions(); 8001 buildVPlansWithVPRecipes(UserVF, UserVF); 8002 LLVM_DEBUG(printPlans(dbgs())); 8003 return {{UserVF, 0}}; 8004 } 8005 8006 // Populate the set of Vectorization Factor Candidates. 8007 ElementCountSet VFCandidates; 8008 for (auto VF = ElementCount::getFixed(1); 8009 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 8010 VFCandidates.insert(VF); 8011 for (auto VF = ElementCount::getScalable(1); 8012 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 8013 VFCandidates.insert(VF); 8014 8015 for (const auto VF : VFCandidates) { 8016 // Collect Uniform and Scalar instructions after vectorization with VF. 8017 CM.collectUniformsAndScalars(VF); 8018 8019 // Collect the instructions (and their associated costs) that will be more 8020 // profitable to scalarize. 8021 if (VF.isVector()) 8022 CM.collectInstsToScalarize(VF); 8023 } 8024 8025 CM.collectInLoopReductions(); 8026 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 8027 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 8028 8029 LLVM_DEBUG(printPlans(dbgs())); 8030 if (!MaxFactors.hasVector()) 8031 return VectorizationFactor::Disabled(); 8032 8033 // Select the optimal vectorization factor. 8034 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 8035 8036 // Check if it is profitable to vectorize with runtime checks. 8037 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 8038 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 8039 bool PragmaThresholdReached = 8040 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 8041 bool ThresholdReached = 8042 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 8043 if ((ThresholdReached && !Hints.allowReordering()) || 8044 PragmaThresholdReached) { 8045 ORE->emit([&]() { 8046 return OptimizationRemarkAnalysisAliasing( 8047 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 8048 OrigLoop->getHeader()) 8049 << "loop not vectorized: cannot prove it is safe to reorder " 8050 "memory operations"; 8051 }); 8052 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 8053 Hints.emitRemarkWithHints(); 8054 return VectorizationFactor::Disabled(); 8055 } 8056 } 8057 return SelectedVF; 8058 } 8059 8060 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) { 8061 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 8062 << '\n'); 8063 BestVF = VF; 8064 BestUF = UF; 8065 8066 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 8067 return !Plan->hasVF(VF); 8068 }); 8069 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 8070 } 8071 8072 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 8073 DominatorTree *DT) { 8074 // Perform the actual loop transformation. 8075 8076 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 8077 assert(BestVF.hasValue() && "Vectorization Factor is missing"); 8078 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 8079 8080 VPTransformState State{ 8081 *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()}; 8082 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 8083 State.TripCount = ILV.getOrCreateTripCount(nullptr); 8084 State.CanonicalIV = ILV.Induction; 8085 8086 ILV.printDebugTracesAtStart(); 8087 8088 //===------------------------------------------------===// 8089 // 8090 // Notice: any optimization or new instruction that go 8091 // into the code below should also be implemented in 8092 // the cost-model. 8093 // 8094 //===------------------------------------------------===// 8095 8096 // 2. Copy and widen instructions from the old loop into the new loop. 8097 VPlans.front()->execute(&State); 8098 8099 // 3. Fix the vectorized code: take care of header phi's, live-outs, 8100 // predication, updating analyses. 8101 ILV.fixVectorizedLoop(State); 8102 8103 ILV.printDebugTracesAtEnd(); 8104 } 8105 8106 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 8107 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 8108 for (const auto &Plan : VPlans) 8109 if (PrintVPlansInDotFormat) 8110 Plan->printDOT(O); 8111 else 8112 Plan->print(O); 8113 } 8114 #endif 8115 8116 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 8117 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 8118 8119 // We create new control-flow for the vectorized loop, so the original exit 8120 // conditions will be dead after vectorization if it's only used by the 8121 // terminator 8122 SmallVector<BasicBlock*> ExitingBlocks; 8123 OrigLoop->getExitingBlocks(ExitingBlocks); 8124 for (auto *BB : ExitingBlocks) { 8125 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 8126 if (!Cmp || !Cmp->hasOneUse()) 8127 continue; 8128 8129 // TODO: we should introduce a getUniqueExitingBlocks on Loop 8130 if (!DeadInstructions.insert(Cmp).second) 8131 continue; 8132 8133 // The operands of the icmp is often a dead trunc, used by IndUpdate. 8134 // TODO: can recurse through operands in general 8135 for (Value *Op : Cmp->operands()) { 8136 if (isa<TruncInst>(Op) && Op->hasOneUse()) 8137 DeadInstructions.insert(cast<Instruction>(Op)); 8138 } 8139 } 8140 8141 // We create new "steps" for induction variable updates to which the original 8142 // induction variables map. An original update instruction will be dead if 8143 // all its users except the induction variable are dead. 8144 auto *Latch = OrigLoop->getLoopLatch(); 8145 for (auto &Induction : Legal->getInductionVars()) { 8146 PHINode *Ind = Induction.first; 8147 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 8148 8149 // If the tail is to be folded by masking, the primary induction variable, 8150 // if exists, isn't dead: it will be used for masking. Don't kill it. 8151 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 8152 continue; 8153 8154 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 8155 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 8156 })) 8157 DeadInstructions.insert(IndUpdate); 8158 8159 // We record as "Dead" also the type-casting instructions we had identified 8160 // during induction analysis. We don't need any handling for them in the 8161 // vectorized loop because we have proven that, under a proper runtime 8162 // test guarding the vectorized loop, the value of the phi, and the casted 8163 // value of the phi, are the same. The last instruction in this casting chain 8164 // will get its scalar/vector/widened def from the scalar/vector/widened def 8165 // of the respective phi node. Any other casts in the induction def-use chain 8166 // have no other uses outside the phi update chain, and will be ignored. 8167 InductionDescriptor &IndDes = Induction.second; 8168 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 8169 DeadInstructions.insert(Casts.begin(), Casts.end()); 8170 } 8171 } 8172 8173 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 8174 8175 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 8176 8177 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 8178 Instruction::BinaryOps BinOp) { 8179 // When unrolling and the VF is 1, we only need to add a simple scalar. 8180 Type *Ty = Val->getType(); 8181 assert(!Ty->isVectorTy() && "Val must be a scalar"); 8182 8183 if (Ty->isFloatingPointTy()) { 8184 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 8185 8186 // Floating-point operations inherit FMF via the builder's flags. 8187 Value *MulOp = Builder.CreateFMul(C, Step); 8188 return Builder.CreateBinOp(BinOp, Val, MulOp); 8189 } 8190 Constant *C = ConstantInt::get(Ty, StartIdx); 8191 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 8192 } 8193 8194 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 8195 SmallVector<Metadata *, 4> MDs; 8196 // Reserve first location for self reference to the LoopID metadata node. 8197 MDs.push_back(nullptr); 8198 bool IsUnrollMetadata = false; 8199 MDNode *LoopID = L->getLoopID(); 8200 if (LoopID) { 8201 // First find existing loop unrolling disable metadata. 8202 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 8203 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 8204 if (MD) { 8205 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 8206 IsUnrollMetadata = 8207 S && S->getString().startswith("llvm.loop.unroll.disable"); 8208 } 8209 MDs.push_back(LoopID->getOperand(i)); 8210 } 8211 } 8212 8213 if (!IsUnrollMetadata) { 8214 // Add runtime unroll disable metadata. 8215 LLVMContext &Context = L->getHeader()->getContext(); 8216 SmallVector<Metadata *, 1> DisableOperands; 8217 DisableOperands.push_back( 8218 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 8219 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 8220 MDs.push_back(DisableNode); 8221 MDNode *NewLoopID = MDNode::get(Context, MDs); 8222 // Set operand 0 to refer to the loop id itself. 8223 NewLoopID->replaceOperandWith(0, NewLoopID); 8224 L->setLoopID(NewLoopID); 8225 } 8226 } 8227 8228 //===--------------------------------------------------------------------===// 8229 // EpilogueVectorizerMainLoop 8230 //===--------------------------------------------------------------------===// 8231 8232 /// This function is partially responsible for generating the control flow 8233 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8234 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 8235 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8236 Loop *Lp = createVectorLoopSkeleton(""); 8237 8238 // Generate the code to check the minimum iteration count of the vector 8239 // epilogue (see below). 8240 EPI.EpilogueIterationCountCheck = 8241 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 8242 EPI.EpilogueIterationCountCheck->setName("iter.check"); 8243 8244 // Generate the code to check any assumptions that we've made for SCEV 8245 // expressions. 8246 EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); 8247 8248 // Generate the code that checks at runtime if arrays overlap. We put the 8249 // checks into a separate block to make the more common case of few elements 8250 // faster. 8251 EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 8252 8253 // Generate the iteration count check for the main loop, *after* the check 8254 // for the epilogue loop, so that the path-length is shorter for the case 8255 // that goes directly through the vector epilogue. The longer-path length for 8256 // the main loop is compensated for, by the gain from vectorizing the larger 8257 // trip count. Note: the branch will get updated later on when we vectorize 8258 // the epilogue. 8259 EPI.MainLoopIterationCountCheck = 8260 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 8261 8262 // Generate the induction variable. 8263 OldInduction = Legal->getPrimaryInduction(); 8264 Type *IdxTy = Legal->getWidestInductionType(); 8265 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8266 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8267 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8268 EPI.VectorTripCount = CountRoundDown; 8269 Induction = 8270 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8271 getDebugLocFromInstOrOperands(OldInduction)); 8272 8273 // Skip induction resume value creation here because they will be created in 8274 // the second pass. If we created them here, they wouldn't be used anyway, 8275 // because the vplan in the second pass still contains the inductions from the 8276 // original loop. 8277 8278 return completeLoopSkeleton(Lp, OrigLoopID); 8279 } 8280 8281 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 8282 LLVM_DEBUG({ 8283 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 8284 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 8285 << ", Main Loop UF:" << EPI.MainLoopUF 8286 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 8287 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8288 }); 8289 } 8290 8291 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 8292 DEBUG_WITH_TYPE(VerboseDebug, { 8293 dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; 8294 }); 8295 } 8296 8297 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 8298 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 8299 assert(L && "Expected valid Loop."); 8300 assert(Bypass && "Expected valid bypass basic block."); 8301 unsigned VFactor = 8302 ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue(); 8303 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 8304 Value *Count = getOrCreateTripCount(L); 8305 // Reuse existing vector loop preheader for TC checks. 8306 // Note that new preheader block is generated for vector loop. 8307 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 8308 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8309 8310 // Generate code to check if the loop's trip count is less than VF * UF of the 8311 // main vector loop. 8312 auto P = 8313 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8314 8315 Value *CheckMinIters = Builder.CreateICmp( 8316 P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor), 8317 "min.iters.check"); 8318 8319 if (!ForEpilogue) 8320 TCCheckBlock->setName("vector.main.loop.iter.check"); 8321 8322 // Create new preheader for vector loop. 8323 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8324 DT, LI, nullptr, "vector.ph"); 8325 8326 if (ForEpilogue) { 8327 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8328 DT->getNode(Bypass)->getIDom()) && 8329 "TC check is expected to dominate Bypass"); 8330 8331 // Update dominator for Bypass & LoopExit. 8332 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8333 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8334 8335 LoopBypassBlocks.push_back(TCCheckBlock); 8336 8337 // Save the trip count so we don't have to regenerate it in the 8338 // vec.epilog.iter.check. This is safe to do because the trip count 8339 // generated here dominates the vector epilog iter check. 8340 EPI.TripCount = Count; 8341 } 8342 8343 ReplaceInstWithInst( 8344 TCCheckBlock->getTerminator(), 8345 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8346 8347 return TCCheckBlock; 8348 } 8349 8350 //===--------------------------------------------------------------------===// 8351 // EpilogueVectorizerEpilogueLoop 8352 //===--------------------------------------------------------------------===// 8353 8354 /// This function is partially responsible for generating the control flow 8355 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8356 BasicBlock * 8357 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8358 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8359 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8360 8361 // Now, compare the remaining count and if there aren't enough iterations to 8362 // execute the vectorized epilogue skip to the scalar part. 8363 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8364 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8365 LoopVectorPreHeader = 8366 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8367 LI, nullptr, "vec.epilog.ph"); 8368 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8369 VecEpilogueIterationCountCheck); 8370 8371 // Adjust the control flow taking the state info from the main loop 8372 // vectorization into account. 8373 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8374 "expected this to be saved from the previous pass."); 8375 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8376 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8377 8378 DT->changeImmediateDominator(LoopVectorPreHeader, 8379 EPI.MainLoopIterationCountCheck); 8380 8381 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8382 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8383 8384 if (EPI.SCEVSafetyCheck) 8385 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8386 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8387 if (EPI.MemSafetyCheck) 8388 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8389 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8390 8391 DT->changeImmediateDominator( 8392 VecEpilogueIterationCountCheck, 8393 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8394 8395 DT->changeImmediateDominator(LoopScalarPreHeader, 8396 EPI.EpilogueIterationCountCheck); 8397 DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck); 8398 8399 // Keep track of bypass blocks, as they feed start values to the induction 8400 // phis in the scalar loop preheader. 8401 if (EPI.SCEVSafetyCheck) 8402 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8403 if (EPI.MemSafetyCheck) 8404 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8405 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8406 8407 // Generate a resume induction for the vector epilogue and put it in the 8408 // vector epilogue preheader 8409 Type *IdxTy = Legal->getWidestInductionType(); 8410 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8411 LoopVectorPreHeader->getFirstNonPHI()); 8412 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8413 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8414 EPI.MainLoopIterationCountCheck); 8415 8416 // Generate the induction variable. 8417 OldInduction = Legal->getPrimaryInduction(); 8418 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8419 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8420 Value *StartIdx = EPResumeVal; 8421 Induction = 8422 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8423 getDebugLocFromInstOrOperands(OldInduction)); 8424 8425 // Generate induction resume values. These variables save the new starting 8426 // indexes for the scalar loop. They are used to test if there are any tail 8427 // iterations left once the vector loop has completed. 8428 // Note that when the vectorized epilogue is skipped due to iteration count 8429 // check, then the resume value for the induction variable comes from 8430 // the trip count of the main vector loop, hence passing the AdditionalBypass 8431 // argument. 8432 createInductionResumeValues(Lp, CountRoundDown, 8433 {VecEpilogueIterationCountCheck, 8434 EPI.VectorTripCount} /* AdditionalBypass */); 8435 8436 AddRuntimeUnrollDisableMetaData(Lp); 8437 return completeLoopSkeleton(Lp, OrigLoopID); 8438 } 8439 8440 BasicBlock * 8441 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8442 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8443 8444 assert(EPI.TripCount && 8445 "Expected trip count to have been safed in the first pass."); 8446 assert( 8447 (!isa<Instruction>(EPI.TripCount) || 8448 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8449 "saved trip count does not dominate insertion point."); 8450 Value *TC = EPI.TripCount; 8451 IRBuilder<> Builder(Insert->getTerminator()); 8452 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8453 8454 // Generate code to check if the loop's trip count is less than VF * UF of the 8455 // vector epilogue loop. 8456 auto P = 8457 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8458 8459 Value *CheckMinIters = Builder.CreateICmp( 8460 P, Count, 8461 ConstantInt::get(Count->getType(), 8462 EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF), 8463 "min.epilog.iters.check"); 8464 8465 ReplaceInstWithInst( 8466 Insert->getTerminator(), 8467 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8468 8469 LoopBypassBlocks.push_back(Insert); 8470 return Insert; 8471 } 8472 8473 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8474 LLVM_DEBUG({ 8475 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8476 << "Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 8477 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8478 }); 8479 } 8480 8481 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8482 DEBUG_WITH_TYPE(VerboseDebug, { 8483 dbgs() << "final fn:\n" << *Induction->getFunction() << "\n"; 8484 }); 8485 } 8486 8487 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8488 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8489 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8490 bool PredicateAtRangeStart = Predicate(Range.Start); 8491 8492 for (ElementCount TmpVF = Range.Start * 2; 8493 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8494 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8495 Range.End = TmpVF; 8496 break; 8497 } 8498 8499 return PredicateAtRangeStart; 8500 } 8501 8502 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8503 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8504 /// of VF's starting at a given VF and extending it as much as possible. Each 8505 /// vectorization decision can potentially shorten this sub-range during 8506 /// buildVPlan(). 8507 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8508 ElementCount MaxVF) { 8509 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8510 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8511 VFRange SubRange = {VF, MaxVFPlusOne}; 8512 VPlans.push_back(buildVPlan(SubRange)); 8513 VF = SubRange.End; 8514 } 8515 } 8516 8517 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8518 VPlanPtr &Plan) { 8519 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8520 8521 // Look for cached value. 8522 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8523 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8524 if (ECEntryIt != EdgeMaskCache.end()) 8525 return ECEntryIt->second; 8526 8527 VPValue *SrcMask = createBlockInMask(Src, Plan); 8528 8529 // The terminator has to be a branch inst! 8530 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8531 assert(BI && "Unexpected terminator found"); 8532 8533 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8534 return EdgeMaskCache[Edge] = SrcMask; 8535 8536 // If source is an exiting block, we know the exit edge is dynamically dead 8537 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8538 // adding uses of an otherwise potentially dead instruction. 8539 if (OrigLoop->isLoopExiting(Src)) 8540 return EdgeMaskCache[Edge] = SrcMask; 8541 8542 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8543 assert(EdgeMask && "No Edge Mask found for condition"); 8544 8545 if (BI->getSuccessor(0) != Dst) 8546 EdgeMask = Builder.createNot(EdgeMask); 8547 8548 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8549 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8550 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8551 // The select version does not introduce new UB if SrcMask is false and 8552 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8553 VPValue *False = Plan->getOrAddVPValue( 8554 ConstantInt::getFalse(BI->getCondition()->getType())); 8555 EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False); 8556 } 8557 8558 return EdgeMaskCache[Edge] = EdgeMask; 8559 } 8560 8561 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8562 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8563 8564 // Look for cached value. 8565 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8566 if (BCEntryIt != BlockMaskCache.end()) 8567 return BCEntryIt->second; 8568 8569 // All-one mask is modelled as no-mask following the convention for masked 8570 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8571 VPValue *BlockMask = nullptr; 8572 8573 if (OrigLoop->getHeader() == BB) { 8574 if (!CM.blockNeedsPredication(BB)) 8575 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8576 8577 // Create the block in mask as the first non-phi instruction in the block. 8578 VPBuilder::InsertPointGuard Guard(Builder); 8579 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8580 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8581 8582 // Introduce the early-exit compare IV <= BTC to form header block mask. 8583 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8584 // Start by constructing the desired canonical IV. 8585 VPValue *IV = nullptr; 8586 if (Legal->getPrimaryInduction()) 8587 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8588 else { 8589 auto IVRecipe = new VPWidenCanonicalIVRecipe(); 8590 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8591 IV = IVRecipe->getVPSingleValue(); 8592 } 8593 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8594 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8595 8596 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8597 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8598 // as a second argument, we only pass the IV here and extract the 8599 // tripcount from the transform state where codegen of the VP instructions 8600 // happen. 8601 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8602 } else { 8603 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8604 } 8605 return BlockMaskCache[BB] = BlockMask; 8606 } 8607 8608 // This is the block mask. We OR all incoming edges. 8609 for (auto *Predecessor : predecessors(BB)) { 8610 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8611 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8612 return BlockMaskCache[BB] = EdgeMask; 8613 8614 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8615 BlockMask = EdgeMask; 8616 continue; 8617 } 8618 8619 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8620 } 8621 8622 return BlockMaskCache[BB] = BlockMask; 8623 } 8624 8625 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8626 ArrayRef<VPValue *> Operands, 8627 VFRange &Range, 8628 VPlanPtr &Plan) { 8629 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8630 "Must be called with either a load or store"); 8631 8632 auto willWiden = [&](ElementCount VF) -> bool { 8633 if (VF.isScalar()) 8634 return false; 8635 LoopVectorizationCostModel::InstWidening Decision = 8636 CM.getWideningDecision(I, VF); 8637 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8638 "CM decision should be taken at this point."); 8639 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8640 return true; 8641 if (CM.isScalarAfterVectorization(I, VF) || 8642 CM.isProfitableToScalarize(I, VF)) 8643 return false; 8644 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8645 }; 8646 8647 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8648 return nullptr; 8649 8650 VPValue *Mask = nullptr; 8651 if (Legal->isMaskRequired(I)) 8652 Mask = createBlockInMask(I->getParent(), Plan); 8653 8654 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8655 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask); 8656 8657 StoreInst *Store = cast<StoreInst>(I); 8658 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8659 Mask); 8660 } 8661 8662 VPWidenIntOrFpInductionRecipe * 8663 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, 8664 ArrayRef<VPValue *> Operands) const { 8665 // Check if this is an integer or fp induction. If so, build the recipe that 8666 // produces its scalar and vector values. 8667 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8668 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8669 II.getKind() == InductionDescriptor::IK_FpInduction) { 8670 assert(II.getStartValue() == 8671 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8672 const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); 8673 return new VPWidenIntOrFpInductionRecipe( 8674 Phi, Operands[0], Casts.empty() ? nullptr : Casts.front()); 8675 } 8676 8677 return nullptr; 8678 } 8679 8680 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8681 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8682 VPlan &Plan) const { 8683 // Optimize the special case where the source is a constant integer 8684 // induction variable. Notice that we can only optimize the 'trunc' case 8685 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8686 // (c) other casts depend on pointer size. 8687 8688 // Determine whether \p K is a truncation based on an induction variable that 8689 // can be optimized. 8690 auto isOptimizableIVTruncate = 8691 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8692 return [=](ElementCount VF) -> bool { 8693 return CM.isOptimizableIVTruncate(K, VF); 8694 }; 8695 }; 8696 8697 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8698 isOptimizableIVTruncate(I), Range)) { 8699 8700 InductionDescriptor II = 8701 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 8702 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8703 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8704 Start, nullptr, I); 8705 } 8706 return nullptr; 8707 } 8708 8709 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8710 ArrayRef<VPValue *> Operands, 8711 VPlanPtr &Plan) { 8712 // If all incoming values are equal, the incoming VPValue can be used directly 8713 // instead of creating a new VPBlendRecipe. 8714 VPValue *FirstIncoming = Operands[0]; 8715 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8716 return FirstIncoming == Inc; 8717 })) { 8718 return Operands[0]; 8719 } 8720 8721 // We know that all PHIs in non-header blocks are converted into selects, so 8722 // we don't have to worry about the insertion order and we can just use the 8723 // builder. At this point we generate the predication tree. There may be 8724 // duplications since this is a simple recursive scan, but future 8725 // optimizations will clean it up. 8726 SmallVector<VPValue *, 2> OperandsWithMask; 8727 unsigned NumIncoming = Phi->getNumIncomingValues(); 8728 8729 for (unsigned In = 0; In < NumIncoming; In++) { 8730 VPValue *EdgeMask = 8731 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8732 assert((EdgeMask || NumIncoming == 1) && 8733 "Multiple predecessors with one having a full mask"); 8734 OperandsWithMask.push_back(Operands[In]); 8735 if (EdgeMask) 8736 OperandsWithMask.push_back(EdgeMask); 8737 } 8738 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8739 } 8740 8741 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8742 ArrayRef<VPValue *> Operands, 8743 VFRange &Range) const { 8744 8745 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8746 [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); }, 8747 Range); 8748 8749 if (IsPredicated) 8750 return nullptr; 8751 8752 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8753 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8754 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8755 ID == Intrinsic::pseudoprobe || 8756 ID == Intrinsic::experimental_noalias_scope_decl)) 8757 return nullptr; 8758 8759 auto willWiden = [&](ElementCount VF) -> bool { 8760 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8761 // The following case may be scalarized depending on the VF. 8762 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8763 // version of the instruction. 8764 // Is it beneficial to perform intrinsic call compared to lib call? 8765 bool NeedToScalarize = false; 8766 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8767 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8768 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8769 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 8770 "Either the intrinsic cost or vector call cost must be valid"); 8771 return UseVectorIntrinsic || !NeedToScalarize; 8772 }; 8773 8774 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8775 return nullptr; 8776 8777 ArrayRef<VPValue *> Ops = Operands.take_front(CI->getNumArgOperands()); 8778 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8779 } 8780 8781 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8782 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8783 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8784 // Instruction should be widened, unless it is scalar after vectorization, 8785 // scalarization is profitable or it is predicated. 8786 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8787 return CM.isScalarAfterVectorization(I, VF) || 8788 CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I); 8789 }; 8790 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8791 Range); 8792 } 8793 8794 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8795 ArrayRef<VPValue *> Operands) const { 8796 auto IsVectorizableOpcode = [](unsigned Opcode) { 8797 switch (Opcode) { 8798 case Instruction::Add: 8799 case Instruction::And: 8800 case Instruction::AShr: 8801 case Instruction::BitCast: 8802 case Instruction::FAdd: 8803 case Instruction::FCmp: 8804 case Instruction::FDiv: 8805 case Instruction::FMul: 8806 case Instruction::FNeg: 8807 case Instruction::FPExt: 8808 case Instruction::FPToSI: 8809 case Instruction::FPToUI: 8810 case Instruction::FPTrunc: 8811 case Instruction::FRem: 8812 case Instruction::FSub: 8813 case Instruction::ICmp: 8814 case Instruction::IntToPtr: 8815 case Instruction::LShr: 8816 case Instruction::Mul: 8817 case Instruction::Or: 8818 case Instruction::PtrToInt: 8819 case Instruction::SDiv: 8820 case Instruction::Select: 8821 case Instruction::SExt: 8822 case Instruction::Shl: 8823 case Instruction::SIToFP: 8824 case Instruction::SRem: 8825 case Instruction::Sub: 8826 case Instruction::Trunc: 8827 case Instruction::UDiv: 8828 case Instruction::UIToFP: 8829 case Instruction::URem: 8830 case Instruction::Xor: 8831 case Instruction::ZExt: 8832 return true; 8833 } 8834 return false; 8835 }; 8836 8837 if (!IsVectorizableOpcode(I->getOpcode())) 8838 return nullptr; 8839 8840 // Success: widen this instruction. 8841 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8842 } 8843 8844 void VPRecipeBuilder::fixHeaderPhis() { 8845 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8846 for (VPWidenPHIRecipe *R : PhisToFix) { 8847 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8848 VPRecipeBase *IncR = 8849 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8850 R->addOperand(IncR->getVPSingleValue()); 8851 } 8852 } 8853 8854 VPBasicBlock *VPRecipeBuilder::handleReplication( 8855 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8856 VPlanPtr &Plan) { 8857 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8858 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8859 Range); 8860 8861 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8862 [&](ElementCount VF) { return CM.isPredicatedInst(I); }, Range); 8863 8864 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8865 IsUniform, IsPredicated); 8866 setRecipe(I, Recipe); 8867 Plan->addVPValue(I, Recipe); 8868 8869 // Find if I uses a predicated instruction. If so, it will use its scalar 8870 // value. Avoid hoisting the insert-element which packs the scalar value into 8871 // a vector value, as that happens iff all users use the vector value. 8872 for (VPValue *Op : Recipe->operands()) { 8873 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8874 if (!PredR) 8875 continue; 8876 auto *RepR = 8877 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8878 assert(RepR->isPredicated() && 8879 "expected Replicate recipe to be predicated"); 8880 RepR->setAlsoPack(false); 8881 } 8882 8883 // Finalize the recipe for Instr, first if it is not predicated. 8884 if (!IsPredicated) { 8885 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8886 VPBB->appendRecipe(Recipe); 8887 return VPBB; 8888 } 8889 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8890 assert(VPBB->getSuccessors().empty() && 8891 "VPBB has successors when handling predicated replication."); 8892 // Record predicated instructions for above packing optimizations. 8893 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8894 VPBlockUtils::insertBlockAfter(Region, VPBB); 8895 auto *RegSucc = new VPBasicBlock(); 8896 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8897 return RegSucc; 8898 } 8899 8900 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8901 VPRecipeBase *PredRecipe, 8902 VPlanPtr &Plan) { 8903 // Instructions marked for predication are replicated and placed under an 8904 // if-then construct to prevent side-effects. 8905 8906 // Generate recipes to compute the block mask for this region. 8907 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8908 8909 // Build the triangular if-then region. 8910 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8911 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8912 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8913 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8914 auto *PHIRecipe = Instr->getType()->isVoidTy() 8915 ? nullptr 8916 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8917 if (PHIRecipe) { 8918 Plan->removeVPValueFor(Instr); 8919 Plan->addVPValue(Instr, PHIRecipe); 8920 } 8921 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8922 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8923 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8924 8925 // Note: first set Entry as region entry and then connect successors starting 8926 // from it in order, to propagate the "parent" of each VPBasicBlock. 8927 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8928 VPBlockUtils::connectBlocks(Pred, Exit); 8929 8930 return Region; 8931 } 8932 8933 VPRecipeOrVPValueTy 8934 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8935 ArrayRef<VPValue *> Operands, 8936 VFRange &Range, VPlanPtr &Plan) { 8937 // First, check for specific widening recipes that deal with calls, memory 8938 // operations, inductions and Phi nodes. 8939 if (auto *CI = dyn_cast<CallInst>(Instr)) 8940 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8941 8942 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8943 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8944 8945 VPRecipeBase *Recipe; 8946 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8947 if (Phi->getParent() != OrigLoop->getHeader()) 8948 return tryToBlend(Phi, Operands, Plan); 8949 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands))) 8950 return toVPRecipeResult(Recipe); 8951 8952 if (Legal->isReductionVariable(Phi)) { 8953 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 8954 assert(RdxDesc.getRecurrenceStartValue() == 8955 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8956 VPValue *StartV = Operands[0]; 8957 8958 auto *PhiRecipe = new VPWidenPHIRecipe(Phi, RdxDesc, *StartV); 8959 PhisToFix.push_back(PhiRecipe); 8960 // Record the incoming value from the backedge, so we can add the incoming 8961 // value from the backedge after all recipes have been created. 8962 recordRecipeOf(cast<Instruction>( 8963 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8964 return toVPRecipeResult(PhiRecipe); 8965 } 8966 8967 return toVPRecipeResult(new VPWidenPHIRecipe(Phi)); 8968 } 8969 8970 if (isa<TruncInst>(Instr) && 8971 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8972 Range, *Plan))) 8973 return toVPRecipeResult(Recipe); 8974 8975 if (!shouldWiden(Instr, Range)) 8976 return nullptr; 8977 8978 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8979 return toVPRecipeResult(new VPWidenGEPRecipe( 8980 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8981 8982 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8983 bool InvariantCond = 8984 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8985 return toVPRecipeResult(new VPWidenSelectRecipe( 8986 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8987 } 8988 8989 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8990 } 8991 8992 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8993 ElementCount MaxVF) { 8994 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8995 8996 // Collect instructions from the original loop that will become trivially dead 8997 // in the vectorized loop. We don't need to vectorize these instructions. For 8998 // example, original induction update instructions can become dead because we 8999 // separately emit induction "steps" when generating code for the new loop. 9000 // Similarly, we create a new latch condition when setting up the structure 9001 // of the new loop, so the old one can become dead. 9002 SmallPtrSet<Instruction *, 4> DeadInstructions; 9003 collectTriviallyDeadInstructions(DeadInstructions); 9004 9005 // Add assume instructions we need to drop to DeadInstructions, to prevent 9006 // them from being added to the VPlan. 9007 // TODO: We only need to drop assumes in blocks that get flattend. If the 9008 // control flow is preserved, we should keep them. 9009 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 9010 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 9011 9012 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 9013 // Dead instructions do not need sinking. Remove them from SinkAfter. 9014 for (Instruction *I : DeadInstructions) 9015 SinkAfter.erase(I); 9016 9017 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 9018 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 9019 VFRange SubRange = {VF, MaxVFPlusOne}; 9020 VPlans.push_back( 9021 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 9022 VF = SubRange.End; 9023 } 9024 } 9025 9026 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 9027 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 9028 const MapVector<Instruction *, Instruction *> &SinkAfter) { 9029 9030 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 9031 9032 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 9033 9034 // --------------------------------------------------------------------------- 9035 // Pre-construction: record ingredients whose recipes we'll need to further 9036 // process after constructing the initial VPlan. 9037 // --------------------------------------------------------------------------- 9038 9039 // Mark instructions we'll need to sink later and their targets as 9040 // ingredients whose recipe we'll need to record. 9041 for (auto &Entry : SinkAfter) { 9042 RecipeBuilder.recordRecipeOf(Entry.first); 9043 RecipeBuilder.recordRecipeOf(Entry.second); 9044 } 9045 for (auto &Reduction : CM.getInLoopReductionChains()) { 9046 PHINode *Phi = Reduction.first; 9047 RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); 9048 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9049 9050 RecipeBuilder.recordRecipeOf(Phi); 9051 for (auto &R : ReductionOperations) { 9052 RecipeBuilder.recordRecipeOf(R); 9053 // For min/max reducitons, where we have a pair of icmp/select, we also 9054 // need to record the ICmp recipe, so it can be removed later. 9055 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 9056 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 9057 } 9058 } 9059 9060 // For each interleave group which is relevant for this (possibly trimmed) 9061 // Range, add it to the set of groups to be later applied to the VPlan and add 9062 // placeholders for its members' Recipes which we'll be replacing with a 9063 // single VPInterleaveRecipe. 9064 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 9065 auto applyIG = [IG, this](ElementCount VF) -> bool { 9066 return (VF.isVector() && // Query is illegal for VF == 1 9067 CM.getWideningDecision(IG->getInsertPos(), VF) == 9068 LoopVectorizationCostModel::CM_Interleave); 9069 }; 9070 if (!getDecisionAndClampRange(applyIG, Range)) 9071 continue; 9072 InterleaveGroups.insert(IG); 9073 for (unsigned i = 0; i < IG->getFactor(); i++) 9074 if (Instruction *Member = IG->getMember(i)) 9075 RecipeBuilder.recordRecipeOf(Member); 9076 }; 9077 9078 // --------------------------------------------------------------------------- 9079 // Build initial VPlan: Scan the body of the loop in a topological order to 9080 // visit each basic block after having visited its predecessor basic blocks. 9081 // --------------------------------------------------------------------------- 9082 9083 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 9084 auto Plan = std::make_unique<VPlan>(); 9085 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 9086 Plan->setEntry(VPBB); 9087 9088 // Scan the body of the loop in a topological order to visit each basic block 9089 // after having visited its predecessor basic blocks. 9090 LoopBlocksDFS DFS(OrigLoop); 9091 DFS.perform(LI); 9092 9093 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 9094 // Relevant instructions from basic block BB will be grouped into VPRecipe 9095 // ingredients and fill a new VPBasicBlock. 9096 unsigned VPBBsForBB = 0; 9097 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 9098 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 9099 VPBB = FirstVPBBForBB; 9100 Builder.setInsertPoint(VPBB); 9101 9102 // Introduce each ingredient into VPlan. 9103 // TODO: Model and preserve debug instrinsics in VPlan. 9104 for (Instruction &I : BB->instructionsWithoutDebug()) { 9105 Instruction *Instr = &I; 9106 9107 // First filter out irrelevant instructions, to ensure no recipes are 9108 // built for them. 9109 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 9110 continue; 9111 9112 SmallVector<VPValue *, 4> Operands; 9113 auto *Phi = dyn_cast<PHINode>(Instr); 9114 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 9115 Operands.push_back(Plan->getOrAddVPValue( 9116 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 9117 } else { 9118 auto OpRange = Plan->mapToVPValues(Instr->operands()); 9119 Operands = {OpRange.begin(), OpRange.end()}; 9120 } 9121 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 9122 Instr, Operands, Range, Plan)) { 9123 // If Instr can be simplified to an existing VPValue, use it. 9124 if (RecipeOrValue.is<VPValue *>()) { 9125 auto *VPV = RecipeOrValue.get<VPValue *>(); 9126 Plan->addVPValue(Instr, VPV); 9127 // If the re-used value is a recipe, register the recipe for the 9128 // instruction, in case the recipe for Instr needs to be recorded. 9129 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 9130 RecipeBuilder.setRecipe(Instr, R); 9131 continue; 9132 } 9133 // Otherwise, add the new recipe. 9134 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 9135 for (auto *Def : Recipe->definedValues()) { 9136 auto *UV = Def->getUnderlyingValue(); 9137 Plan->addVPValue(UV, Def); 9138 } 9139 9140 RecipeBuilder.setRecipe(Instr, Recipe); 9141 VPBB->appendRecipe(Recipe); 9142 continue; 9143 } 9144 9145 // Otherwise, if all widening options failed, Instruction is to be 9146 // replicated. This may create a successor for VPBB. 9147 VPBasicBlock *NextVPBB = 9148 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 9149 if (NextVPBB != VPBB) { 9150 VPBB = NextVPBB; 9151 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 9152 : ""); 9153 } 9154 } 9155 } 9156 9157 RecipeBuilder.fixHeaderPhis(); 9158 9159 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 9160 // may also be empty, such as the last one VPBB, reflecting original 9161 // basic-blocks with no recipes. 9162 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 9163 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 9164 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 9165 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 9166 delete PreEntry; 9167 9168 // --------------------------------------------------------------------------- 9169 // Transform initial VPlan: Apply previously taken decisions, in order, to 9170 // bring the VPlan to its final state. 9171 // --------------------------------------------------------------------------- 9172 9173 // Apply Sink-After legal constraints. 9174 for (auto &Entry : SinkAfter) { 9175 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 9176 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 9177 9178 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 9179 auto *Region = 9180 dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 9181 if (Region && Region->isReplicator()) 9182 return Region; 9183 return nullptr; 9184 }; 9185 9186 // If the target is in a replication region, make sure to move Sink to the 9187 // block after it, not into the replication region itself. 9188 if (auto *TargetRegion = GetReplicateRegion(Target)) { 9189 assert(TargetRegion->getNumSuccessors() == 1 && "Expected SESE region!"); 9190 assert(!GetReplicateRegion(Sink) && 9191 "cannot sink a region into another region yet"); 9192 VPBasicBlock *NextBlock = 9193 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 9194 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 9195 continue; 9196 } 9197 9198 auto *SinkRegion = GetReplicateRegion(Sink); 9199 // Unless the sink source is in a replicate region, sink the recipe 9200 // directly. 9201 if (!SinkRegion) { 9202 Sink->moveAfter(Target); 9203 continue; 9204 } 9205 9206 // If the sink source is in a replicate region, we need to move the whole 9207 // replicate region, which should only contain a single recipe in the main 9208 // block. 9209 assert(Sink->getParent()->size() == 1 && 9210 "parent must be a replicator with a single recipe"); 9211 auto *SplitBlock = 9212 Target->getParent()->splitAt(std::next(Target->getIterator())); 9213 9214 auto *Pred = SinkRegion->getSinglePredecessor(); 9215 auto *Succ = SinkRegion->getSingleSuccessor(); 9216 VPBlockUtils::disconnectBlocks(Pred, SinkRegion); 9217 VPBlockUtils::disconnectBlocks(SinkRegion, Succ); 9218 VPBlockUtils::connectBlocks(Pred, Succ); 9219 9220 auto *SplitPred = SplitBlock->getSinglePredecessor(); 9221 9222 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 9223 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 9224 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 9225 if (VPBB == SplitPred) 9226 VPBB = SplitBlock; 9227 } 9228 9229 // Interleave memory: for each Interleave Group we marked earlier as relevant 9230 // for this VPlan, replace the Recipes widening its memory instructions with a 9231 // single VPInterleaveRecipe at its insertion point. 9232 for (auto IG : InterleaveGroups) { 9233 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9234 RecipeBuilder.getRecipe(IG->getInsertPos())); 9235 SmallVector<VPValue *, 4> StoredValues; 9236 for (unsigned i = 0; i < IG->getFactor(); ++i) 9237 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) 9238 StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0))); 9239 9240 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9241 Recipe->getMask()); 9242 VPIG->insertBefore(Recipe); 9243 unsigned J = 0; 9244 for (unsigned i = 0; i < IG->getFactor(); ++i) 9245 if (Instruction *Member = IG->getMember(i)) { 9246 if (!Member->getType()->isVoidTy()) { 9247 VPValue *OriginalV = Plan->getVPValue(Member); 9248 Plan->removeVPValueFor(Member); 9249 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9250 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9251 J++; 9252 } 9253 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9254 } 9255 } 9256 9257 // Adjust the recipes for any inloop reductions. 9258 if (Range.Start.isVector()) 9259 adjustRecipesForInLoopReductions(Plan, RecipeBuilder); 9260 9261 // Finally, if tail is folded by masking, introduce selects between the phi 9262 // and the live-out instruction of each reduction, at the end of the latch. 9263 if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) { 9264 Builder.setInsertPoint(VPBB); 9265 auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9266 for (auto &Reduction : Legal->getReductionVars()) { 9267 if (CM.isInLoopReduction(Reduction.first)) 9268 continue; 9269 VPValue *Phi = Plan->getOrAddVPValue(Reduction.first); 9270 VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr()); 9271 Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi}); 9272 } 9273 } 9274 9275 VPlanTransforms::sinkScalarOperands(*Plan); 9276 9277 std::string PlanName; 9278 raw_string_ostream RSO(PlanName); 9279 ElementCount VF = Range.Start; 9280 Plan->addVF(VF); 9281 RSO << "Initial VPlan for VF={" << VF; 9282 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9283 Plan->addVF(VF); 9284 RSO << "," << VF; 9285 } 9286 RSO << "},UF>=1"; 9287 RSO.flush(); 9288 Plan->setName(PlanName); 9289 9290 return Plan; 9291 } 9292 9293 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9294 // Outer loop handling: They may require CFG and instruction level 9295 // transformations before even evaluating whether vectorization is profitable. 9296 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9297 // the vectorization pipeline. 9298 assert(!OrigLoop->isInnermost()); 9299 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9300 9301 // Create new empty VPlan 9302 auto Plan = std::make_unique<VPlan>(); 9303 9304 // Build hierarchical CFG 9305 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9306 HCFGBuilder.buildHierarchicalCFG(); 9307 9308 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9309 VF *= 2) 9310 Plan->addVF(VF); 9311 9312 if (EnableVPlanPredication) { 9313 VPlanPredicator VPP(*Plan); 9314 VPP.predicate(); 9315 9316 // Avoid running transformation to recipes until masked code generation in 9317 // VPlan-native path is in place. 9318 return Plan; 9319 } 9320 9321 SmallPtrSet<Instruction *, 1> DeadInstructions; 9322 VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan, 9323 Legal->getInductionVars(), 9324 DeadInstructions, *PSE.getSE()); 9325 return Plan; 9326 } 9327 9328 // Adjust the recipes for any inloop reductions. The chain of instructions 9329 // leading from the loop exit instr to the phi need to be converted to 9330 // reductions, with one operand being vector and the other being the scalar 9331 // reduction chain. 9332 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions( 9333 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) { 9334 for (auto &Reduction : CM.getInLoopReductionChains()) { 9335 PHINode *Phi = Reduction.first; 9336 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 9337 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9338 9339 // ReductionOperations are orders top-down from the phi's use to the 9340 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9341 // which of the two operands will remain scalar and which will be reduced. 9342 // For minmax the chain will be the select instructions. 9343 Instruction *Chain = Phi; 9344 for (Instruction *R : ReductionOperations) { 9345 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9346 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9347 9348 VPValue *ChainOp = Plan->getVPValue(Chain); 9349 unsigned FirstOpId; 9350 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9351 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9352 "Expected to replace a VPWidenSelectSC"); 9353 FirstOpId = 1; 9354 } else { 9355 assert(isa<VPWidenRecipe>(WidenRecipe) && 9356 "Expected to replace a VPWidenSC"); 9357 FirstOpId = 0; 9358 } 9359 unsigned VecOpId = 9360 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9361 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9362 9363 auto *CondOp = CM.foldTailByMasking() 9364 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9365 : nullptr; 9366 VPReductionRecipe *RedRecipe = new VPReductionRecipe( 9367 &RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9368 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9369 Plan->removeVPValueFor(R); 9370 Plan->addVPValue(R, RedRecipe); 9371 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9372 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9373 WidenRecipe->eraseFromParent(); 9374 9375 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9376 VPRecipeBase *CompareRecipe = 9377 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9378 assert(isa<VPWidenRecipe>(CompareRecipe) && 9379 "Expected to replace a VPWidenSC"); 9380 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9381 "Expected no remaining users"); 9382 CompareRecipe->eraseFromParent(); 9383 } 9384 Chain = R; 9385 } 9386 } 9387 } 9388 9389 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9390 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9391 VPSlotTracker &SlotTracker) const { 9392 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9393 IG->getInsertPos()->printAsOperand(O, false); 9394 O << ", "; 9395 getAddr()->printAsOperand(O, SlotTracker); 9396 VPValue *Mask = getMask(); 9397 if (Mask) { 9398 O << ", "; 9399 Mask->printAsOperand(O, SlotTracker); 9400 } 9401 for (unsigned i = 0; i < IG->getFactor(); ++i) 9402 if (Instruction *I = IG->getMember(i)) 9403 O << "\n" << Indent << " " << VPlanIngredient(I) << " " << i; 9404 } 9405 #endif 9406 9407 void VPWidenCallRecipe::execute(VPTransformState &State) { 9408 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9409 *this, State); 9410 } 9411 9412 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9413 State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), 9414 this, *this, InvariantCond, State); 9415 } 9416 9417 void VPWidenRecipe::execute(VPTransformState &State) { 9418 State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State); 9419 } 9420 9421 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9422 State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, 9423 *this, State.UF, State.VF, IsPtrLoopInvariant, 9424 IsIndexLoopInvariant, State); 9425 } 9426 9427 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9428 assert(!State.Instance && "Int or FP induction being replicated."); 9429 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 9430 getTruncInst(), getVPValue(0), 9431 getCastValue(), State); 9432 } 9433 9434 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9435 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), RdxDesc, 9436 this, State); 9437 } 9438 9439 void VPBlendRecipe::execute(VPTransformState &State) { 9440 State.ILV->setDebugLocFromInst(State.Builder, Phi); 9441 // We know that all PHIs in non-header blocks are converted into 9442 // selects, so we don't have to worry about the insertion order and we 9443 // can just use the builder. 9444 // At this point we generate the predication tree. There may be 9445 // duplications since this is a simple recursive scan, but future 9446 // optimizations will clean it up. 9447 9448 unsigned NumIncoming = getNumIncomingValues(); 9449 9450 // Generate a sequence of selects of the form: 9451 // SELECT(Mask3, In3, 9452 // SELECT(Mask2, In2, 9453 // SELECT(Mask1, In1, 9454 // In0))) 9455 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9456 // are essentially undef are taken from In0. 9457 InnerLoopVectorizer::VectorParts Entry(State.UF); 9458 for (unsigned In = 0; In < NumIncoming; ++In) { 9459 for (unsigned Part = 0; Part < State.UF; ++Part) { 9460 // We might have single edge PHIs (blocks) - use an identity 9461 // 'select' for the first PHI operand. 9462 Value *In0 = State.get(getIncomingValue(In), Part); 9463 if (In == 0) 9464 Entry[Part] = In0; // Initialize with the first incoming value. 9465 else { 9466 // Select between the current value and the previous incoming edge 9467 // based on the incoming mask. 9468 Value *Cond = State.get(getMask(In), Part); 9469 Entry[Part] = 9470 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9471 } 9472 } 9473 } 9474 for (unsigned Part = 0; Part < State.UF; ++Part) 9475 State.set(this, Entry[Part], Part); 9476 } 9477 9478 void VPInterleaveRecipe::execute(VPTransformState &State) { 9479 assert(!State.Instance && "Interleave group being replicated."); 9480 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9481 getStoredValues(), getMask()); 9482 } 9483 9484 void VPReductionRecipe::execute(VPTransformState &State) { 9485 assert(!State.Instance && "Reduction being replicated."); 9486 Value *PrevInChain = State.get(getChainOp(), 0); 9487 for (unsigned Part = 0; Part < State.UF; ++Part) { 9488 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9489 bool IsOrdered = useOrderedReductions(*RdxDesc); 9490 Value *NewVecOp = State.get(getVecOp(), Part); 9491 if (VPValue *Cond = getCondOp()) { 9492 Value *NewCond = State.get(Cond, Part); 9493 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9494 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 9495 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9496 Constant *IdenVec = 9497 ConstantVector::getSplat(VecTy->getElementCount(), Iden); 9498 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9499 NewVecOp = Select; 9500 } 9501 Value *NewRed; 9502 Value *NextInChain; 9503 if (IsOrdered) { 9504 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9505 PrevInChain); 9506 PrevInChain = NewRed; 9507 } else { 9508 PrevInChain = State.get(getChainOp(), Part); 9509 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9510 } 9511 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9512 NextInChain = 9513 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9514 NewRed, PrevInChain); 9515 } else if (IsOrdered) 9516 NextInChain = NewRed; 9517 else { 9518 NextInChain = State.Builder.CreateBinOp( 9519 (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed, 9520 PrevInChain); 9521 } 9522 State.set(this, NextInChain, Part); 9523 } 9524 } 9525 9526 void VPReplicateRecipe::execute(VPTransformState &State) { 9527 if (State.Instance) { // Generate a single instance. 9528 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9529 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9530 *State.Instance, IsPredicated, State); 9531 // Insert scalar instance packing it into a vector. 9532 if (AlsoPack && State.VF.isVector()) { 9533 // If we're constructing lane 0, initialize to start from poison. 9534 if (State.Instance->Lane.isFirstLane()) { 9535 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9536 Value *Poison = PoisonValue::get( 9537 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9538 State.set(this, Poison, State.Instance->Part); 9539 } 9540 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9541 } 9542 return; 9543 } 9544 9545 // Generate scalar instances for all VF lanes of all UF parts, unless the 9546 // instruction is uniform inwhich case generate only the first lane for each 9547 // of the UF parts. 9548 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9549 assert((!State.VF.isScalable() || IsUniform) && 9550 "Can't scalarize a scalable vector"); 9551 for (unsigned Part = 0; Part < State.UF; ++Part) 9552 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9553 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9554 VPIteration(Part, Lane), IsPredicated, 9555 State); 9556 } 9557 9558 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9559 assert(State.Instance && "Branch on Mask works only on single instance."); 9560 9561 unsigned Part = State.Instance->Part; 9562 unsigned Lane = State.Instance->Lane.getKnownLane(); 9563 9564 Value *ConditionBit = nullptr; 9565 VPValue *BlockInMask = getMask(); 9566 if (BlockInMask) { 9567 ConditionBit = State.get(BlockInMask, Part); 9568 if (ConditionBit->getType()->isVectorTy()) 9569 ConditionBit = State.Builder.CreateExtractElement( 9570 ConditionBit, State.Builder.getInt32(Lane)); 9571 } else // Block in mask is all-one. 9572 ConditionBit = State.Builder.getTrue(); 9573 9574 // Replace the temporary unreachable terminator with a new conditional branch, 9575 // whose two destinations will be set later when they are created. 9576 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9577 assert(isa<UnreachableInst>(CurrentTerminator) && 9578 "Expected to replace unreachable terminator with conditional branch."); 9579 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9580 CondBr->setSuccessor(0, nullptr); 9581 ReplaceInstWithInst(CurrentTerminator, CondBr); 9582 } 9583 9584 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9585 assert(State.Instance && "Predicated instruction PHI works per instance."); 9586 Instruction *ScalarPredInst = 9587 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9588 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9589 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9590 assert(PredicatingBB && "Predicated block has no single predecessor."); 9591 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9592 "operand must be VPReplicateRecipe"); 9593 9594 // By current pack/unpack logic we need to generate only a single phi node: if 9595 // a vector value for the predicated instruction exists at this point it means 9596 // the instruction has vector users only, and a phi for the vector value is 9597 // needed. In this case the recipe of the predicated instruction is marked to 9598 // also do that packing, thereby "hoisting" the insert-element sequence. 9599 // Otherwise, a phi node for the scalar value is needed. 9600 unsigned Part = State.Instance->Part; 9601 if (State.hasVectorValue(getOperand(0), Part)) { 9602 Value *VectorValue = State.get(getOperand(0), Part); 9603 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9604 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9605 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9606 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9607 if (State.hasVectorValue(this, Part)) 9608 State.reset(this, VPhi, Part); 9609 else 9610 State.set(this, VPhi, Part); 9611 // NOTE: Currently we need to update the value of the operand, so the next 9612 // predicated iteration inserts its generated value in the correct vector. 9613 State.reset(getOperand(0), VPhi, Part); 9614 } else { 9615 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9616 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9617 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9618 PredicatingBB); 9619 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9620 if (State.hasScalarValue(this, *State.Instance)) 9621 State.reset(this, Phi, *State.Instance); 9622 else 9623 State.set(this, Phi, *State.Instance); 9624 // NOTE: Currently we need to update the value of the operand, so the next 9625 // predicated iteration inserts its generated value in the correct vector. 9626 State.reset(getOperand(0), Phi, *State.Instance); 9627 } 9628 } 9629 9630 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9631 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9632 State.ILV->vectorizeMemoryInstruction( 9633 &Ingredient, State, StoredValue ? nullptr : getVPSingleValue(), getAddr(), 9634 StoredValue, getMask()); 9635 } 9636 9637 // Determine how to lower the scalar epilogue, which depends on 1) optimising 9638 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 9639 // predication, and 4) a TTI hook that analyses whether the loop is suitable 9640 // for predication. 9641 static ScalarEpilogueLowering getScalarEpilogueLowering( 9642 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 9643 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 9644 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 9645 LoopVectorizationLegality &LVL) { 9646 // 1) OptSize takes precedence over all other options, i.e. if this is set, 9647 // don't look at hints or options, and don't request a scalar epilogue. 9648 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 9649 // LoopAccessInfo (due to code dependency and not being able to reliably get 9650 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 9651 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 9652 // versioning when the vectorization is forced, unlike hasOptSize. So revert 9653 // back to the old way and vectorize with versioning when forced. See D81345.) 9654 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 9655 PGSOQueryType::IRPass) && 9656 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 9657 return CM_ScalarEpilogueNotAllowedOptSize; 9658 9659 // 2) If set, obey the directives 9660 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 9661 switch (PreferPredicateOverEpilogue) { 9662 case PreferPredicateTy::ScalarEpilogue: 9663 return CM_ScalarEpilogueAllowed; 9664 case PreferPredicateTy::PredicateElseScalarEpilogue: 9665 return CM_ScalarEpilogueNotNeededUsePredicate; 9666 case PreferPredicateTy::PredicateOrDontVectorize: 9667 return CM_ScalarEpilogueNotAllowedUsePredicate; 9668 }; 9669 } 9670 9671 // 3) If set, obey the hints 9672 switch (Hints.getPredicate()) { 9673 case LoopVectorizeHints::FK_Enabled: 9674 return CM_ScalarEpilogueNotNeededUsePredicate; 9675 case LoopVectorizeHints::FK_Disabled: 9676 return CM_ScalarEpilogueAllowed; 9677 }; 9678 9679 // 4) if the TTI hook indicates this is profitable, request predication. 9680 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 9681 LVL.getLAI())) 9682 return CM_ScalarEpilogueNotNeededUsePredicate; 9683 9684 return CM_ScalarEpilogueAllowed; 9685 } 9686 9687 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 9688 // If Values have been set for this Def return the one relevant for \p Part. 9689 if (hasVectorValue(Def, Part)) 9690 return Data.PerPartOutput[Def][Part]; 9691 9692 if (!hasScalarValue(Def, {Part, 0})) { 9693 Value *IRV = Def->getLiveInIRValue(); 9694 Value *B = ILV->getBroadcastInstrs(IRV); 9695 set(Def, B, Part); 9696 return B; 9697 } 9698 9699 Value *ScalarValue = get(Def, {Part, 0}); 9700 // If we aren't vectorizing, we can just copy the scalar map values over 9701 // to the vector map. 9702 if (VF.isScalar()) { 9703 set(Def, ScalarValue, Part); 9704 return ScalarValue; 9705 } 9706 9707 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 9708 bool IsUniform = RepR && RepR->isUniform(); 9709 9710 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 9711 // Check if there is a scalar value for the selected lane. 9712 if (!hasScalarValue(Def, {Part, LastLane})) { 9713 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 9714 assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && 9715 "unexpected recipe found to be invariant"); 9716 IsUniform = true; 9717 LastLane = 0; 9718 } 9719 9720 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 9721 9722 // Set the insert point after the last scalarized instruction. This 9723 // ensures the insertelement sequence will directly follow the scalar 9724 // definitions. 9725 auto OldIP = Builder.saveIP(); 9726 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 9727 Builder.SetInsertPoint(&*NewIP); 9728 9729 // However, if we are vectorizing, we need to construct the vector values. 9730 // If the value is known to be uniform after vectorization, we can just 9731 // broadcast the scalar value corresponding to lane zero for each unroll 9732 // iteration. Otherwise, we construct the vector values using 9733 // insertelement instructions. Since the resulting vectors are stored in 9734 // State, we will only generate the insertelements once. 9735 Value *VectorValue = nullptr; 9736 if (IsUniform) { 9737 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 9738 set(Def, VectorValue, Part); 9739 } else { 9740 // Initialize packing with insertelements to start from undef. 9741 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 9742 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 9743 set(Def, Undef, Part); 9744 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 9745 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 9746 VectorValue = get(Def, Part); 9747 } 9748 Builder.restoreIP(OldIP); 9749 return VectorValue; 9750 } 9751 9752 // Process the loop in the VPlan-native vectorization path. This path builds 9753 // VPlan upfront in the vectorization pipeline, which allows to apply 9754 // VPlan-to-VPlan transformations from the very beginning without modifying the 9755 // input LLVM IR. 9756 static bool processLoopInVPlanNativePath( 9757 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 9758 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 9759 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 9760 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 9761 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 9762 LoopVectorizationRequirements &Requirements) { 9763 9764 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 9765 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 9766 return false; 9767 } 9768 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 9769 Function *F = L->getHeader()->getParent(); 9770 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 9771 9772 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9773 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 9774 9775 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 9776 &Hints, IAI); 9777 // Use the planner for outer loop vectorization. 9778 // TODO: CM is not used at this point inside the planner. Turn CM into an 9779 // optional argument if we don't need it in the future. 9780 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 9781 Requirements, ORE); 9782 9783 // Get user vectorization factor. 9784 ElementCount UserVF = Hints.getWidth(); 9785 9786 // Plan how to best vectorize, return the best VF and its cost. 9787 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 9788 9789 // If we are stress testing VPlan builds, do not attempt to generate vector 9790 // code. Masked vector code generation support will follow soon. 9791 // Also, do not attempt to vectorize if no vector code will be produced. 9792 if (VPlanBuildStressTest || EnableVPlanPredication || 9793 VectorizationFactor::Disabled() == VF) 9794 return false; 9795 9796 LVP.setBestPlan(VF.Width, 1); 9797 9798 { 9799 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 9800 F->getParent()->getDataLayout()); 9801 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 9802 &CM, BFI, PSI, Checks); 9803 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 9804 << L->getHeader()->getParent()->getName() << "\"\n"); 9805 LVP.executePlan(LB, DT); 9806 } 9807 9808 // Mark the loop as already vectorized to avoid vectorizing again. 9809 Hints.setAlreadyVectorized(); 9810 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9811 return true; 9812 } 9813 9814 // Emit a remark if there are stores to floats that required a floating point 9815 // extension. If the vectorized loop was generated with floating point there 9816 // will be a performance penalty from the conversion overhead and the change in 9817 // the vector width. 9818 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 9819 SmallVector<Instruction *, 4> Worklist; 9820 for (BasicBlock *BB : L->getBlocks()) { 9821 for (Instruction &Inst : *BB) { 9822 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 9823 if (S->getValueOperand()->getType()->isFloatTy()) 9824 Worklist.push_back(S); 9825 } 9826 } 9827 } 9828 9829 // Traverse the floating point stores upwards searching, for floating point 9830 // conversions. 9831 SmallPtrSet<const Instruction *, 4> Visited; 9832 SmallPtrSet<const Instruction *, 4> EmittedRemark; 9833 while (!Worklist.empty()) { 9834 auto *I = Worklist.pop_back_val(); 9835 if (!L->contains(I)) 9836 continue; 9837 if (!Visited.insert(I).second) 9838 continue; 9839 9840 // Emit a remark if the floating point store required a floating 9841 // point conversion. 9842 // TODO: More work could be done to identify the root cause such as a 9843 // constant or a function return type and point the user to it. 9844 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 9845 ORE->emit([&]() { 9846 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 9847 I->getDebugLoc(), L->getHeader()) 9848 << "floating point conversion changes vector width. " 9849 << "Mixed floating point precision requires an up/down " 9850 << "cast that will negatively impact performance."; 9851 }); 9852 9853 for (Use &Op : I->operands()) 9854 if (auto *OpI = dyn_cast<Instruction>(Op)) 9855 Worklist.push_back(OpI); 9856 } 9857 } 9858 9859 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 9860 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 9861 !EnableLoopInterleaving), 9862 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 9863 !EnableLoopVectorization) {} 9864 9865 bool LoopVectorizePass::processLoop(Loop *L) { 9866 assert((EnableVPlanNativePath || L->isInnermost()) && 9867 "VPlan-native path is not enabled. Only process inner loops."); 9868 9869 #ifndef NDEBUG 9870 const std::string DebugLocStr = getDebugLocString(L); 9871 #endif /* NDEBUG */ 9872 9873 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 9874 << L->getHeader()->getParent()->getName() << "\" from " 9875 << DebugLocStr << "\n"); 9876 9877 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 9878 9879 LLVM_DEBUG( 9880 dbgs() << "LV: Loop hints:" 9881 << " force=" 9882 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 9883 ? "disabled" 9884 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 9885 ? "enabled" 9886 : "?")) 9887 << " width=" << Hints.getWidth() 9888 << " interleave=" << Hints.getInterleave() << "\n"); 9889 9890 // Function containing loop 9891 Function *F = L->getHeader()->getParent(); 9892 9893 // Looking at the diagnostic output is the only way to determine if a loop 9894 // was vectorized (other than looking at the IR or machine code), so it 9895 // is important to generate an optimization remark for each loop. Most of 9896 // these messages are generated as OptimizationRemarkAnalysis. Remarks 9897 // generated as OptimizationRemark and OptimizationRemarkMissed are 9898 // less verbose reporting vectorized loops and unvectorized loops that may 9899 // benefit from vectorization, respectively. 9900 9901 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 9902 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 9903 return false; 9904 } 9905 9906 PredicatedScalarEvolution PSE(*SE, *L); 9907 9908 // Check if it is legal to vectorize the loop. 9909 LoopVectorizationRequirements Requirements; 9910 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 9911 &Requirements, &Hints, DB, AC, BFI, PSI); 9912 if (!LVL.canVectorize(EnableVPlanNativePath)) { 9913 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 9914 Hints.emitRemarkWithHints(); 9915 return false; 9916 } 9917 9918 // Check the function attributes and profiles to find out if this function 9919 // should be optimized for size. 9920 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9921 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 9922 9923 // Entrance to the VPlan-native vectorization path. Outer loops are processed 9924 // here. They may require CFG and instruction level transformations before 9925 // even evaluating whether vectorization is profitable. Since we cannot modify 9926 // the incoming IR, we need to build VPlan upfront in the vectorization 9927 // pipeline. 9928 if (!L->isInnermost()) 9929 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 9930 ORE, BFI, PSI, Hints, Requirements); 9931 9932 assert(L->isInnermost() && "Inner loop expected."); 9933 9934 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 9935 // count by optimizing for size, to minimize overheads. 9936 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 9937 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 9938 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 9939 << "This loop is worth vectorizing only if no scalar " 9940 << "iteration overheads are incurred."); 9941 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 9942 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 9943 else { 9944 LLVM_DEBUG(dbgs() << "\n"); 9945 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 9946 } 9947 } 9948 9949 // Check the function attributes to see if implicit floats are allowed. 9950 // FIXME: This check doesn't seem possibly correct -- what if the loop is 9951 // an integer loop and the vector instructions selected are purely integer 9952 // vector instructions? 9953 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 9954 reportVectorizationFailure( 9955 "Can't vectorize when the NoImplicitFloat attribute is used", 9956 "loop not vectorized due to NoImplicitFloat attribute", 9957 "NoImplicitFloat", ORE, L); 9958 Hints.emitRemarkWithHints(); 9959 return false; 9960 } 9961 9962 // Check if the target supports potentially unsafe FP vectorization. 9963 // FIXME: Add a check for the type of safety issue (denormal, signaling) 9964 // for the target we're vectorizing for, to make sure none of the 9965 // additional fp-math flags can help. 9966 if (Hints.isPotentiallyUnsafe() && 9967 TTI->isFPVectorizationPotentiallyUnsafe()) { 9968 reportVectorizationFailure( 9969 "Potentially unsafe FP op prevents vectorization", 9970 "loop not vectorized due to unsafe FP support.", 9971 "UnsafeFP", ORE, L); 9972 Hints.emitRemarkWithHints(); 9973 return false; 9974 } 9975 9976 if (!LVL.canVectorizeFPMath(EnableStrictReductions)) { 9977 ORE->emit([&]() { 9978 auto *ExactFPMathInst = Requirements.getExactFPInst(); 9979 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 9980 ExactFPMathInst->getDebugLoc(), 9981 ExactFPMathInst->getParent()) 9982 << "loop not vectorized: cannot prove it is safe to reorder " 9983 "floating-point operations"; 9984 }); 9985 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 9986 "reorder floating-point operations\n"); 9987 Hints.emitRemarkWithHints(); 9988 return false; 9989 } 9990 9991 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 9992 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 9993 9994 // If an override option has been passed in for interleaved accesses, use it. 9995 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 9996 UseInterleaved = EnableInterleavedMemAccesses; 9997 9998 // Analyze interleaved memory accesses. 9999 if (UseInterleaved) { 10000 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10001 } 10002 10003 // Use the cost model. 10004 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10005 F, &Hints, IAI); 10006 CM.collectValuesToIgnore(); 10007 10008 // Use the planner for vectorization. 10009 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10010 Requirements, ORE); 10011 10012 // Get user vectorization factor and interleave count. 10013 ElementCount UserVF = Hints.getWidth(); 10014 unsigned UserIC = Hints.getInterleave(); 10015 10016 // Plan how to best vectorize, return the best VF and its cost. 10017 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10018 10019 VectorizationFactor VF = VectorizationFactor::Disabled(); 10020 unsigned IC = 1; 10021 10022 if (MaybeVF) { 10023 VF = *MaybeVF; 10024 // Select the interleave count. 10025 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10026 } 10027 10028 // Identify the diagnostic messages that should be produced. 10029 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10030 bool VectorizeLoop = true, InterleaveLoop = true; 10031 if (VF.Width.isScalar()) { 10032 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10033 VecDiagMsg = std::make_pair( 10034 "VectorizationNotBeneficial", 10035 "the cost-model indicates that vectorization is not beneficial"); 10036 VectorizeLoop = false; 10037 } 10038 10039 if (!MaybeVF && UserIC > 1) { 10040 // Tell the user interleaving was avoided up-front, despite being explicitly 10041 // requested. 10042 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10043 "interleaving should be avoided up front\n"); 10044 IntDiagMsg = std::make_pair( 10045 "InterleavingAvoided", 10046 "Ignoring UserIC, because interleaving was avoided up front"); 10047 InterleaveLoop = false; 10048 } else if (IC == 1 && UserIC <= 1) { 10049 // Tell the user interleaving is not beneficial. 10050 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10051 IntDiagMsg = std::make_pair( 10052 "InterleavingNotBeneficial", 10053 "the cost-model indicates that interleaving is not beneficial"); 10054 InterleaveLoop = false; 10055 if (UserIC == 1) { 10056 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10057 IntDiagMsg.second += 10058 " and is explicitly disabled or interleave count is set to 1"; 10059 } 10060 } else if (IC > 1 && UserIC == 1) { 10061 // Tell the user interleaving is beneficial, but it explicitly disabled. 10062 LLVM_DEBUG( 10063 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10064 IntDiagMsg = std::make_pair( 10065 "InterleavingBeneficialButDisabled", 10066 "the cost-model indicates that interleaving is beneficial " 10067 "but is explicitly disabled or interleave count is set to 1"); 10068 InterleaveLoop = false; 10069 } 10070 10071 // Override IC if user provided an interleave count. 10072 IC = UserIC > 0 ? UserIC : IC; 10073 10074 // Emit diagnostic messages, if any. 10075 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10076 if (!VectorizeLoop && !InterleaveLoop) { 10077 // Do not vectorize or interleaving the loop. 10078 ORE->emit([&]() { 10079 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10080 L->getStartLoc(), L->getHeader()) 10081 << VecDiagMsg.second; 10082 }); 10083 ORE->emit([&]() { 10084 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10085 L->getStartLoc(), L->getHeader()) 10086 << IntDiagMsg.second; 10087 }); 10088 return false; 10089 } else if (!VectorizeLoop && InterleaveLoop) { 10090 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10091 ORE->emit([&]() { 10092 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10093 L->getStartLoc(), L->getHeader()) 10094 << VecDiagMsg.second; 10095 }); 10096 } else if (VectorizeLoop && !InterleaveLoop) { 10097 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10098 << ") in " << DebugLocStr << '\n'); 10099 ORE->emit([&]() { 10100 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10101 L->getStartLoc(), L->getHeader()) 10102 << IntDiagMsg.second; 10103 }); 10104 } else if (VectorizeLoop && InterleaveLoop) { 10105 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10106 << ") in " << DebugLocStr << '\n'); 10107 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10108 } 10109 10110 bool DisableRuntimeUnroll = false; 10111 MDNode *OrigLoopID = L->getLoopID(); 10112 { 10113 // Optimistically generate runtime checks. Drop them if they turn out to not 10114 // be profitable. Limit the scope of Checks, so the cleanup happens 10115 // immediately after vector codegeneration is done. 10116 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10117 F->getParent()->getDataLayout()); 10118 if (!VF.Width.isScalar() || IC > 1) 10119 Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); 10120 LVP.setBestPlan(VF.Width, IC); 10121 10122 using namespace ore; 10123 if (!VectorizeLoop) { 10124 assert(IC > 1 && "interleave count should not be 1 or 0"); 10125 // If we decided that it is not legal to vectorize the loop, then 10126 // interleave it. 10127 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10128 &CM, BFI, PSI, Checks); 10129 LVP.executePlan(Unroller, DT); 10130 10131 ORE->emit([&]() { 10132 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10133 L->getHeader()) 10134 << "interleaved loop (interleaved count: " 10135 << NV("InterleaveCount", IC) << ")"; 10136 }); 10137 } else { 10138 // If we decided that it is *legal* to vectorize the loop, then do it. 10139 10140 // Consider vectorizing the epilogue too if it's profitable. 10141 VectorizationFactor EpilogueVF = 10142 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10143 if (EpilogueVF.Width.isVector()) { 10144 10145 // The first pass vectorizes the main loop and creates a scalar epilogue 10146 // to be vectorized by executing the plan (potentially with a different 10147 // factor) again shortly afterwards. 10148 EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC, 10149 EpilogueVF.Width.getKnownMinValue(), 10150 1); 10151 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10152 EPI, &LVL, &CM, BFI, PSI, Checks); 10153 10154 LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF); 10155 LVP.executePlan(MainILV, DT); 10156 ++LoopsVectorized; 10157 10158 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10159 formLCSSARecursively(*L, *DT, LI, SE); 10160 10161 // Second pass vectorizes the epilogue and adjusts the control flow 10162 // edges from the first pass. 10163 LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF); 10164 EPI.MainLoopVF = EPI.EpilogueVF; 10165 EPI.MainLoopUF = EPI.EpilogueUF; 10166 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10167 ORE, EPI, &LVL, &CM, BFI, PSI, 10168 Checks); 10169 LVP.executePlan(EpilogILV, DT); 10170 ++LoopsEpilogueVectorized; 10171 10172 if (!MainILV.areSafetyChecksAdded()) 10173 DisableRuntimeUnroll = true; 10174 } else { 10175 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10176 &LVL, &CM, BFI, PSI, Checks); 10177 LVP.executePlan(LB, DT); 10178 ++LoopsVectorized; 10179 10180 // Add metadata to disable runtime unrolling a scalar loop when there 10181 // are no runtime checks about strides and memory. A scalar loop that is 10182 // rarely used is not worth unrolling. 10183 if (!LB.areSafetyChecksAdded()) 10184 DisableRuntimeUnroll = true; 10185 } 10186 // Report the vectorization decision. 10187 ORE->emit([&]() { 10188 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10189 L->getHeader()) 10190 << "vectorized loop (vectorization width: " 10191 << NV("VectorizationFactor", VF.Width) 10192 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10193 }); 10194 } 10195 10196 if (ORE->allowExtraAnalysis(LV_NAME)) 10197 checkMixedPrecision(L, ORE); 10198 } 10199 10200 Optional<MDNode *> RemainderLoopID = 10201 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10202 LLVMLoopVectorizeFollowupEpilogue}); 10203 if (RemainderLoopID.hasValue()) { 10204 L->setLoopID(RemainderLoopID.getValue()); 10205 } else { 10206 if (DisableRuntimeUnroll) 10207 AddRuntimeUnrollDisableMetaData(L); 10208 10209 // Mark the loop as already vectorized to avoid vectorizing again. 10210 Hints.setAlreadyVectorized(); 10211 } 10212 10213 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10214 return true; 10215 } 10216 10217 LoopVectorizeResult LoopVectorizePass::runImpl( 10218 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10219 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10220 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10221 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10222 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10223 SE = &SE_; 10224 LI = &LI_; 10225 TTI = &TTI_; 10226 DT = &DT_; 10227 BFI = &BFI_; 10228 TLI = TLI_; 10229 AA = &AA_; 10230 AC = &AC_; 10231 GetLAA = &GetLAA_; 10232 DB = &DB_; 10233 ORE = &ORE_; 10234 PSI = PSI_; 10235 10236 // Don't attempt if 10237 // 1. the target claims to have no vector registers, and 10238 // 2. interleaving won't help ILP. 10239 // 10240 // The second condition is necessary because, even if the target has no 10241 // vector registers, loop vectorization may still enable scalar 10242 // interleaving. 10243 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10244 TTI->getMaxInterleaveFactor(1) < 2) 10245 return LoopVectorizeResult(false, false); 10246 10247 bool Changed = false, CFGChanged = false; 10248 10249 // The vectorizer requires loops to be in simplified form. 10250 // Since simplification may add new inner loops, it has to run before the 10251 // legality and profitability checks. This means running the loop vectorizer 10252 // will simplify all loops, regardless of whether anything end up being 10253 // vectorized. 10254 for (auto &L : *LI) 10255 Changed |= CFGChanged |= 10256 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10257 10258 // Build up a worklist of inner-loops to vectorize. This is necessary as 10259 // the act of vectorizing or partially unrolling a loop creates new loops 10260 // and can invalidate iterators across the loops. 10261 SmallVector<Loop *, 8> Worklist; 10262 10263 for (Loop *L : *LI) 10264 collectSupportedLoops(*L, LI, ORE, Worklist); 10265 10266 LoopsAnalyzed += Worklist.size(); 10267 10268 // Now walk the identified inner loops. 10269 while (!Worklist.empty()) { 10270 Loop *L = Worklist.pop_back_val(); 10271 10272 // For the inner loops we actually process, form LCSSA to simplify the 10273 // transform. 10274 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10275 10276 Changed |= CFGChanged |= processLoop(L); 10277 } 10278 10279 // Process each loop nest in the function. 10280 return LoopVectorizeResult(Changed, CFGChanged); 10281 } 10282 10283 PreservedAnalyses LoopVectorizePass::run(Function &F, 10284 FunctionAnalysisManager &AM) { 10285 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10286 auto &LI = AM.getResult<LoopAnalysis>(F); 10287 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10288 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10289 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10290 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10291 auto &AA = AM.getResult<AAManager>(F); 10292 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10293 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10294 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10295 MemorySSA *MSSA = EnableMSSALoopDependency 10296 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 10297 : nullptr; 10298 10299 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10300 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10301 [&](Loop &L) -> const LoopAccessInfo & { 10302 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10303 TLI, TTI, nullptr, MSSA}; 10304 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10305 }; 10306 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10307 ProfileSummaryInfo *PSI = 10308 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10309 LoopVectorizeResult Result = 10310 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10311 if (!Result.MadeAnyChange) 10312 return PreservedAnalyses::all(); 10313 PreservedAnalyses PA; 10314 10315 // We currently do not preserve loopinfo/dominator analyses with outer loop 10316 // vectorization. Until this is addressed, mark these analyses as preserved 10317 // only for non-VPlan-native path. 10318 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10319 if (!EnableVPlanNativePath) { 10320 PA.preserve<LoopAnalysis>(); 10321 PA.preserve<DominatorTreeAnalysis>(); 10322 } 10323 if (!Result.MadeCFGChange) 10324 PA.preserveSet<CFGAnalyses>(); 10325 return PA; 10326 } 10327