1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallVector.h" 74 #include "llvm/ADT/Statistic.h" 75 #include "llvm/ADT/StringRef.h" 76 #include "llvm/ADT/Twine.h" 77 #include "llvm/ADT/iterator_range.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/BasicAliasAnalysis.h" 80 #include "llvm/Analysis/BlockFrequencyInfo.h" 81 #include "llvm/Analysis/CFG.h" 82 #include "llvm/Analysis/CodeMetrics.h" 83 #include "llvm/Analysis/DemandedBits.h" 84 #include "llvm/Analysis/GlobalsModRef.h" 85 #include "llvm/Analysis/LoopAccessAnalysis.h" 86 #include "llvm/Analysis/LoopAnalysisManager.h" 87 #include "llvm/Analysis/LoopInfo.h" 88 #include "llvm/Analysis/LoopIterator.h" 89 #include "llvm/Analysis/MemorySSA.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/LLVMContext.h" 116 #include "llvm/IR/Metadata.h" 117 #include "llvm/IR/Module.h" 118 #include "llvm/IR/Operator.h" 119 #include "llvm/IR/PatternMatch.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 202 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 203 cl::desc("The maximum allowed number of runtime memory checks with a " 204 "vectorize(enable) pragma.")); 205 206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 207 // that predication is preferred, and this lists all options. I.e., the 208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 209 // and predicate the instructions accordingly. If tail-folding fails, there are 210 // different fallback strategies depending on these values: 211 namespace PreferPredicateTy { 212 enum Option { 213 ScalarEpilogue = 0, 214 PredicateElseScalarEpilogue, 215 PredicateOrDontVectorize 216 }; 217 } // namespace PreferPredicateTy 218 219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 220 "prefer-predicate-over-epilogue", 221 cl::init(PreferPredicateTy::ScalarEpilogue), 222 cl::Hidden, 223 cl::desc("Tail-folding and predication preferences over creating a scalar " 224 "epilogue loop."), 225 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 226 "scalar-epilogue", 227 "Don't tail-predicate loops, create scalar epilogue"), 228 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 229 "predicate-else-scalar-epilogue", 230 "prefer tail-folding, create scalar epilogue if tail " 231 "folding fails."), 232 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 233 "predicate-dont-vectorize", 234 "prefers tail-folding, don't attempt vectorization if " 235 "tail-folding fails."))); 236 237 static cl::opt<bool> MaximizeBandwidth( 238 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 239 cl::desc("Maximize bandwidth when selecting vectorization factor which " 240 "will be determined by the smallest type in loop.")); 241 242 static cl::opt<bool> EnableInterleavedMemAccesses( 243 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 244 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 245 246 /// An interleave-group may need masking if it resides in a block that needs 247 /// predication, or in order to mask away gaps. 248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 249 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 250 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 251 252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 253 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 254 cl::desc("We don't interleave loops with a estimated constant trip count " 255 "below this number")); 256 257 static cl::opt<unsigned> ForceTargetNumScalarRegs( 258 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 259 cl::desc("A flag that overrides the target's number of scalar registers.")); 260 261 static cl::opt<unsigned> ForceTargetNumVectorRegs( 262 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 263 cl::desc("A flag that overrides the target's number of vector registers.")); 264 265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 266 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "scalar loops.")); 269 270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 271 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's max interleave factor for " 273 "vectorized loops.")); 274 275 static cl::opt<unsigned> ForceTargetInstructionCost( 276 "force-target-instruction-cost", cl::init(0), cl::Hidden, 277 cl::desc("A flag that overrides the target's expected cost for " 278 "an instruction to a single constant value. Mostly " 279 "useful for getting consistent testing.")); 280 281 static cl::opt<bool> ForceTargetSupportsScalableVectors( 282 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 283 cl::desc( 284 "Pretend that scalable vectors are supported, even if the target does " 285 "not support them. This flag should only be used for testing.")); 286 287 static cl::opt<unsigned> SmallLoopCost( 288 "small-loop-cost", cl::init(20), cl::Hidden, 289 cl::desc( 290 "The cost of a loop that is considered 'small' by the interleaver.")); 291 292 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 293 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 294 cl::desc("Enable the use of the block frequency analysis to access PGO " 295 "heuristics minimizing code growth in cold regions and being more " 296 "aggressive in hot regions.")); 297 298 // Runtime interleave loops for load/store throughput. 299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 300 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 301 cl::desc( 302 "Enable runtime interleaving until load/store ports are saturated")); 303 304 /// Interleave small loops with scalar reductions. 305 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 306 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 307 cl::desc("Enable interleaving for loops with small iteration counts that " 308 "contain scalar reductions to expose ILP.")); 309 310 /// The number of stores in a loop that are allowed to need predication. 311 static cl::opt<unsigned> NumberOfStoresToPredicate( 312 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 313 cl::desc("Max number of stores to be predicated behind an if.")); 314 315 static cl::opt<bool> EnableIndVarRegisterHeur( 316 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 317 cl::desc("Count the induction variable only once when interleaving")); 318 319 static cl::opt<bool> EnableCondStoresVectorization( 320 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 321 cl::desc("Enable if predication of stores during vectorization.")); 322 323 static cl::opt<unsigned> MaxNestedScalarReductionIC( 324 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 325 cl::desc("The maximum interleave count to use when interleaving a scalar " 326 "reduction in a nested loop.")); 327 328 static cl::opt<bool> 329 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 330 cl::Hidden, 331 cl::desc("Prefer in-loop vector reductions, " 332 "overriding the targets preference.")); 333 334 cl::opt<bool> EnableStrictReductions( 335 "enable-strict-reductions", cl::init(false), cl::Hidden, 336 cl::desc("Enable the vectorisation of loops with in-order (strict) " 337 "FP reductions")); 338 339 static cl::opt<bool> PreferPredicatedReductionSelect( 340 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 341 cl::desc( 342 "Prefer predicating a reduction operation over an after loop select.")); 343 344 cl::opt<bool> EnableVPlanNativePath( 345 "enable-vplan-native-path", cl::init(false), cl::Hidden, 346 cl::desc("Enable VPlan-native vectorization path with " 347 "support for outer loop vectorization.")); 348 349 // FIXME: Remove this switch once we have divergence analysis. Currently we 350 // assume divergent non-backedge branches when this switch is true. 351 cl::opt<bool> EnableVPlanPredication( 352 "enable-vplan-predication", cl::init(false), cl::Hidden, 353 cl::desc("Enable VPlan-native vectorization path predicator with " 354 "support for outer loop vectorization.")); 355 356 // This flag enables the stress testing of the VPlan H-CFG construction in the 357 // VPlan-native vectorization path. It must be used in conjuction with 358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 359 // verification of the H-CFGs built. 360 static cl::opt<bool> VPlanBuildStressTest( 361 "vplan-build-stress-test", cl::init(false), cl::Hidden, 362 cl::desc( 363 "Build VPlan for every supported loop nest in the function and bail " 364 "out right after the build (stress test the VPlan H-CFG construction " 365 "in the VPlan-native vectorization path).")); 366 367 cl::opt<bool> llvm::EnableLoopInterleaving( 368 "interleave-loops", cl::init(true), cl::Hidden, 369 cl::desc("Enable loop interleaving in Loop vectorization passes")); 370 cl::opt<bool> llvm::EnableLoopVectorization( 371 "vectorize-loops", cl::init(true), cl::Hidden, 372 cl::desc("Run the Loop vectorization passes")); 373 374 cl::opt<bool> PrintVPlansInDotFormat( 375 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 376 cl::desc("Use dot format instead of plain text when dumping VPlans")); 377 378 /// A helper function that returns the type of loaded or stored value. 379 static Type *getMemInstValueType(Value *I) { 380 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 381 "Expected Load or Store instruction"); 382 if (auto *LI = dyn_cast<LoadInst>(I)) 383 return LI->getType(); 384 return cast<StoreInst>(I)->getValueOperand()->getType(); 385 } 386 387 /// A helper function that returns true if the given type is irregular. The 388 /// type is irregular if its allocated size doesn't equal the store size of an 389 /// element of the corresponding vector type. 390 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 391 // Determine if an array of N elements of type Ty is "bitcast compatible" 392 // with a <N x Ty> vector. 393 // This is only true if there is no padding between the array elements. 394 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 395 } 396 397 /// A helper function that returns the reciprocal of the block probability of 398 /// predicated blocks. If we return X, we are assuming the predicated block 399 /// will execute once for every X iterations of the loop header. 400 /// 401 /// TODO: We should use actual block probability here, if available. Currently, 402 /// we always assume predicated blocks have a 50% chance of executing. 403 static unsigned getReciprocalPredBlockProb() { return 2; } 404 405 /// A helper function that returns an integer or floating-point constant with 406 /// value C. 407 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 408 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 409 : ConstantFP::get(Ty, C); 410 } 411 412 /// Returns "best known" trip count for the specified loop \p L as defined by 413 /// the following procedure: 414 /// 1) Returns exact trip count if it is known. 415 /// 2) Returns expected trip count according to profile data if any. 416 /// 3) Returns upper bound estimate if it is known. 417 /// 4) Returns None if all of the above failed. 418 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 419 // Check if exact trip count is known. 420 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 421 return ExpectedTC; 422 423 // Check if there is an expected trip count available from profile data. 424 if (LoopVectorizeWithBlockFrequency) 425 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 426 return EstimatedTC; 427 428 // Check if upper bound estimate is known. 429 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 430 return ExpectedTC; 431 432 return None; 433 } 434 435 // Forward declare GeneratedRTChecks. 436 class GeneratedRTChecks; 437 438 namespace llvm { 439 440 /// InnerLoopVectorizer vectorizes loops which contain only one basic 441 /// block to a specified vectorization factor (VF). 442 /// This class performs the widening of scalars into vectors, or multiple 443 /// scalars. This class also implements the following features: 444 /// * It inserts an epilogue loop for handling loops that don't have iteration 445 /// counts that are known to be a multiple of the vectorization factor. 446 /// * It handles the code generation for reduction variables. 447 /// * Scalarization (implementation using scalars) of un-vectorizable 448 /// instructions. 449 /// InnerLoopVectorizer does not perform any vectorization-legality 450 /// checks, and relies on the caller to check for the different legality 451 /// aspects. The InnerLoopVectorizer relies on the 452 /// LoopVectorizationLegality class to provide information about the induction 453 /// and reduction variables that were found to a given vectorization factor. 454 class InnerLoopVectorizer { 455 public: 456 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 457 LoopInfo *LI, DominatorTree *DT, 458 const TargetLibraryInfo *TLI, 459 const TargetTransformInfo *TTI, AssumptionCache *AC, 460 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 461 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 462 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 463 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 464 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 465 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 466 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 467 PSI(PSI), RTChecks(RTChecks) { 468 // Query this against the original loop and save it here because the profile 469 // of the original loop header may change as the transformation happens. 470 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 471 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 472 } 473 474 virtual ~InnerLoopVectorizer() = default; 475 476 /// Create a new empty loop that will contain vectorized instructions later 477 /// on, while the old loop will be used as the scalar remainder. Control flow 478 /// is generated around the vectorized (and scalar epilogue) loops consisting 479 /// of various checks and bypasses. Return the pre-header block of the new 480 /// loop. 481 /// In the case of epilogue vectorization, this function is overriden to 482 /// handle the more complex control flow around the loops. 483 virtual BasicBlock *createVectorizedLoopSkeleton(); 484 485 /// Widen a single instruction within the innermost loop. 486 void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands, 487 VPTransformState &State); 488 489 /// Widen a single call instruction within the innermost loop. 490 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 491 VPTransformState &State); 492 493 /// Widen a single select instruction within the innermost loop. 494 void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, 495 bool InvariantCond, VPTransformState &State); 496 497 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 498 void fixVectorizedLoop(VPTransformState &State); 499 500 // Return true if any runtime check is added. 501 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 502 503 /// A type for vectorized values in the new loop. Each value from the 504 /// original loop, when vectorized, is represented by UF vector values in the 505 /// new unrolled loop, where UF is the unroll factor. 506 using VectorParts = SmallVector<Value *, 2>; 507 508 /// Vectorize a single GetElementPtrInst based on information gathered and 509 /// decisions taken during planning. 510 void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices, 511 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant, 512 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); 513 514 /// Vectorize a single PHINode in a block. This method handles the induction 515 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 516 /// arbitrary length vectors. 517 void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc, 518 VPWidenPHIRecipe *PhiR, VPTransformState &State); 519 520 /// A helper function to scalarize a single Instruction in the innermost loop. 521 /// Generates a sequence of scalar instances for each lane between \p MinLane 522 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 523 /// inclusive. Uses the VPValue operands from \p Operands instead of \p 524 /// Instr's operands. 525 void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands, 526 const VPIteration &Instance, bool IfPredicateInstr, 527 VPTransformState &State); 528 529 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 530 /// is provided, the integer induction variable will first be truncated to 531 /// the corresponding type. 532 void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, 533 VPValue *Def, VPValue *CastDef, 534 VPTransformState &State); 535 536 /// Construct the vector value of a scalarized value \p V one lane at a time. 537 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 538 VPTransformState &State); 539 540 /// Try to vectorize interleaved access group \p Group with the base address 541 /// given in \p Addr, optionally masking the vector operations if \p 542 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 543 /// values in the vectorized loop. 544 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 545 ArrayRef<VPValue *> VPDefs, 546 VPTransformState &State, VPValue *Addr, 547 ArrayRef<VPValue *> StoredValues, 548 VPValue *BlockInMask = nullptr); 549 550 /// Vectorize Load and Store instructions with the base address given in \p 551 /// Addr, optionally masking the vector operations if \p BlockInMask is 552 /// non-null. Use \p State to translate given VPValues to IR values in the 553 /// vectorized loop. 554 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 555 VPValue *Def, VPValue *Addr, 556 VPValue *StoredValue, VPValue *BlockInMask); 557 558 /// Set the debug location in the builder using the debug location in 559 /// the instruction. 560 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 561 562 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 563 void fixNonInductionPHIs(VPTransformState &State); 564 565 /// Create a broadcast instruction. This method generates a broadcast 566 /// instruction (shuffle) for loop invariant values and for the induction 567 /// value. If this is the induction variable then we extend it to N, N+1, ... 568 /// this is needed because each iteration in the loop corresponds to a SIMD 569 /// element. 570 virtual Value *getBroadcastInstrs(Value *V); 571 572 protected: 573 friend class LoopVectorizationPlanner; 574 575 /// A small list of PHINodes. 576 using PhiVector = SmallVector<PHINode *, 4>; 577 578 /// A type for scalarized values in the new loop. Each value from the 579 /// original loop, when scalarized, is represented by UF x VF scalar values 580 /// in the new unrolled loop, where UF is the unroll factor and VF is the 581 /// vectorization factor. 582 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 583 584 /// Set up the values of the IVs correctly when exiting the vector loop. 585 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 586 Value *CountRoundDown, Value *EndValue, 587 BasicBlock *MiddleBlock); 588 589 /// Create a new induction variable inside L. 590 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 591 Value *Step, Instruction *DL); 592 593 /// Handle all cross-iteration phis in the header. 594 void fixCrossIterationPHIs(VPTransformState &State); 595 596 /// Fix a first-order recurrence. This is the second phase of vectorizing 597 /// this phi node. 598 void fixFirstOrderRecurrence(PHINode *Phi, VPTransformState &State); 599 600 /// Fix a reduction cross-iteration phi. This is the second phase of 601 /// vectorizing this phi node. 602 void fixReduction(PHINode *Phi, VPTransformState &State); 603 604 /// Clear NSW/NUW flags from reduction instructions if necessary. 605 void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc, 606 VPTransformState &State); 607 608 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 609 /// means we need to add the appropriate incoming value from the middle 610 /// block as exiting edges from the scalar epilogue loop (if present) are 611 /// already in place, and we exit the vector loop exclusively to the middle 612 /// block. 613 void fixLCSSAPHIs(VPTransformState &State); 614 615 /// Iteratively sink the scalarized operands of a predicated instruction into 616 /// the block that was created for it. 617 void sinkScalarOperands(Instruction *PredInst); 618 619 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 620 /// represented as. 621 void truncateToMinimalBitwidths(VPTransformState &State); 622 623 /// This function adds 624 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 625 /// to each vector element of Val. The sequence starts at StartIndex. 626 /// \p Opcode is relevant for FP induction variable. 627 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 628 Instruction::BinaryOps Opcode = 629 Instruction::BinaryOpsEnd); 630 631 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 632 /// variable on which to base the steps, \p Step is the size of the step, and 633 /// \p EntryVal is the value from the original loop that maps to the steps. 634 /// Note that \p EntryVal doesn't have to be an induction variable - it 635 /// can also be a truncate instruction. 636 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 637 const InductionDescriptor &ID, VPValue *Def, 638 VPValue *CastDef, VPTransformState &State); 639 640 /// Create a vector induction phi node based on an existing scalar one. \p 641 /// EntryVal is the value from the original loop that maps to the vector phi 642 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 643 /// truncate instruction, instead of widening the original IV, we widen a 644 /// version of the IV truncated to \p EntryVal's type. 645 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 646 Value *Step, Value *Start, 647 Instruction *EntryVal, VPValue *Def, 648 VPValue *CastDef, 649 VPTransformState &State); 650 651 /// Returns true if an instruction \p I should be scalarized instead of 652 /// vectorized for the chosen vectorization factor. 653 bool shouldScalarizeInstruction(Instruction *I) const; 654 655 /// Returns true if we should generate a scalar version of \p IV. 656 bool needsScalarInduction(Instruction *IV) const; 657 658 /// If there is a cast involved in the induction variable \p ID, which should 659 /// be ignored in the vectorized loop body, this function records the 660 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 661 /// cast. We had already proved that the casted Phi is equal to the uncasted 662 /// Phi in the vectorized loop (under a runtime guard), and therefore 663 /// there is no need to vectorize the cast - the same value can be used in the 664 /// vector loop for both the Phi and the cast. 665 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 666 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 667 /// 668 /// \p EntryVal is the value from the original loop that maps to the vector 669 /// phi node and is used to distinguish what is the IV currently being 670 /// processed - original one (if \p EntryVal is a phi corresponding to the 671 /// original IV) or the "newly-created" one based on the proof mentioned above 672 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 673 /// latter case \p EntryVal is a TruncInst and we must not record anything for 674 /// that IV, but it's error-prone to expect callers of this routine to care 675 /// about that, hence this explicit parameter. 676 void recordVectorLoopValueForInductionCast( 677 const InductionDescriptor &ID, const Instruction *EntryVal, 678 Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, 679 unsigned Part, unsigned Lane = UINT_MAX); 680 681 /// Generate a shuffle sequence that will reverse the vector Vec. 682 virtual Value *reverseVector(Value *Vec); 683 684 /// Returns (and creates if needed) the original loop trip count. 685 Value *getOrCreateTripCount(Loop *NewLoop); 686 687 /// Returns (and creates if needed) the trip count of the widened loop. 688 Value *getOrCreateVectorTripCount(Loop *NewLoop); 689 690 /// Returns a bitcasted value to the requested vector type. 691 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 692 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 693 const DataLayout &DL); 694 695 /// Emit a bypass check to see if the vector trip count is zero, including if 696 /// it overflows. 697 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 698 699 /// Emit a bypass check to see if all of the SCEV assumptions we've 700 /// had to make are correct. Returns the block containing the checks or 701 /// nullptr if no checks have been added. 702 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); 703 704 /// Emit bypass checks to check any memory assumptions we may have made. 705 /// Returns the block containing the checks or nullptr if no checks have been 706 /// added. 707 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 708 709 /// Compute the transformed value of Index at offset StartValue using step 710 /// StepValue. 711 /// For integer induction, returns StartValue + Index * StepValue. 712 /// For pointer induction, returns StartValue[Index * StepValue]. 713 /// FIXME: The newly created binary instructions should contain nsw/nuw 714 /// flags, which can be found from the original scalar operations. 715 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 716 const DataLayout &DL, 717 const InductionDescriptor &ID) const; 718 719 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 720 /// vector loop preheader, middle block and scalar preheader. Also 721 /// allocate a loop object for the new vector loop and return it. 722 Loop *createVectorLoopSkeleton(StringRef Prefix); 723 724 /// Create new phi nodes for the induction variables to resume iteration count 725 /// in the scalar epilogue, from where the vectorized loop left off (given by 726 /// \p VectorTripCount). 727 /// In cases where the loop skeleton is more complicated (eg. epilogue 728 /// vectorization) and the resume values can come from an additional bypass 729 /// block, the \p AdditionalBypass pair provides information about the bypass 730 /// block and the end value on the edge from bypass to this loop. 731 void createInductionResumeValues( 732 Loop *L, Value *VectorTripCount, 733 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 734 735 /// Complete the loop skeleton by adding debug MDs, creating appropriate 736 /// conditional branches in the middle block, preparing the builder and 737 /// running the verifier. Take in the vector loop \p L as argument, and return 738 /// the preheader of the completed vector loop. 739 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 740 741 /// Add additional metadata to \p To that was not present on \p Orig. 742 /// 743 /// Currently this is used to add the noalias annotations based on the 744 /// inserted memchecks. Use this for instructions that are *cloned* into the 745 /// vector loop. 746 void addNewMetadata(Instruction *To, const Instruction *Orig); 747 748 /// Add metadata from one instruction to another. 749 /// 750 /// This includes both the original MDs from \p From and additional ones (\see 751 /// addNewMetadata). Use this for *newly created* instructions in the vector 752 /// loop. 753 void addMetadata(Instruction *To, Instruction *From); 754 755 /// Similar to the previous function but it adds the metadata to a 756 /// vector of instructions. 757 void addMetadata(ArrayRef<Value *> To, Instruction *From); 758 759 /// Allow subclasses to override and print debug traces before/after vplan 760 /// execution, when trace information is requested. 761 virtual void printDebugTracesAtStart(){}; 762 virtual void printDebugTracesAtEnd(){}; 763 764 /// The original loop. 765 Loop *OrigLoop; 766 767 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 768 /// dynamic knowledge to simplify SCEV expressions and converts them to a 769 /// more usable form. 770 PredicatedScalarEvolution &PSE; 771 772 /// Loop Info. 773 LoopInfo *LI; 774 775 /// Dominator Tree. 776 DominatorTree *DT; 777 778 /// Alias Analysis. 779 AAResults *AA; 780 781 /// Target Library Info. 782 const TargetLibraryInfo *TLI; 783 784 /// Target Transform Info. 785 const TargetTransformInfo *TTI; 786 787 /// Assumption Cache. 788 AssumptionCache *AC; 789 790 /// Interface to emit optimization remarks. 791 OptimizationRemarkEmitter *ORE; 792 793 /// LoopVersioning. It's only set up (non-null) if memchecks were 794 /// used. 795 /// 796 /// This is currently only used to add no-alias metadata based on the 797 /// memchecks. The actually versioning is performed manually. 798 std::unique_ptr<LoopVersioning> LVer; 799 800 /// The vectorization SIMD factor to use. Each vector will have this many 801 /// vector elements. 802 ElementCount VF; 803 804 /// The vectorization unroll factor to use. Each scalar is vectorized to this 805 /// many different vector instructions. 806 unsigned UF; 807 808 /// The builder that we use 809 IRBuilder<> Builder; 810 811 // --- Vectorization state --- 812 813 /// The vector-loop preheader. 814 BasicBlock *LoopVectorPreHeader; 815 816 /// The scalar-loop preheader. 817 BasicBlock *LoopScalarPreHeader; 818 819 /// Middle Block between the vector and the scalar. 820 BasicBlock *LoopMiddleBlock; 821 822 /// The (unique) ExitBlock of the scalar loop. Note that 823 /// there can be multiple exiting edges reaching this block. 824 BasicBlock *LoopExitBlock; 825 826 /// The vector loop body. 827 BasicBlock *LoopVectorBody; 828 829 /// The scalar loop body. 830 BasicBlock *LoopScalarBody; 831 832 /// A list of all bypass blocks. The first block is the entry of the loop. 833 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 834 835 /// The new Induction variable which was added to the new block. 836 PHINode *Induction = nullptr; 837 838 /// The induction variable of the old basic block. 839 PHINode *OldInduction = nullptr; 840 841 /// Store instructions that were predicated. 842 SmallVector<Instruction *, 4> PredicatedInstructions; 843 844 /// Trip count of the original loop. 845 Value *TripCount = nullptr; 846 847 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 848 Value *VectorTripCount = nullptr; 849 850 /// The legality analysis. 851 LoopVectorizationLegality *Legal; 852 853 /// The profitablity analysis. 854 LoopVectorizationCostModel *Cost; 855 856 // Record whether runtime checks are added. 857 bool AddedSafetyChecks = false; 858 859 // Holds the end values for each induction variable. We save the end values 860 // so we can later fix-up the external users of the induction variables. 861 DenseMap<PHINode *, Value *> IVEndValues; 862 863 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 864 // fixed up at the end of vector code generation. 865 SmallVector<PHINode *, 8> OrigPHIsToFix; 866 867 /// BFI and PSI are used to check for profile guided size optimizations. 868 BlockFrequencyInfo *BFI; 869 ProfileSummaryInfo *PSI; 870 871 // Whether this loop should be optimized for size based on profile guided size 872 // optimizatios. 873 bool OptForSizeBasedOnProfile; 874 875 /// Structure to hold information about generated runtime checks, responsible 876 /// for cleaning the checks, if vectorization turns out unprofitable. 877 GeneratedRTChecks &RTChecks; 878 }; 879 880 class InnerLoopUnroller : public InnerLoopVectorizer { 881 public: 882 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 883 LoopInfo *LI, DominatorTree *DT, 884 const TargetLibraryInfo *TLI, 885 const TargetTransformInfo *TTI, AssumptionCache *AC, 886 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 887 LoopVectorizationLegality *LVL, 888 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 889 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 890 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 891 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 892 BFI, PSI, Check) {} 893 894 private: 895 Value *getBroadcastInstrs(Value *V) override; 896 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 897 Instruction::BinaryOps Opcode = 898 Instruction::BinaryOpsEnd) override; 899 Value *reverseVector(Value *Vec) override; 900 }; 901 902 /// Encapsulate information regarding vectorization of a loop and its epilogue. 903 /// This information is meant to be updated and used across two stages of 904 /// epilogue vectorization. 905 struct EpilogueLoopVectorizationInfo { 906 ElementCount MainLoopVF = ElementCount::getFixed(0); 907 unsigned MainLoopUF = 0; 908 ElementCount EpilogueVF = ElementCount::getFixed(0); 909 unsigned EpilogueUF = 0; 910 BasicBlock *MainLoopIterationCountCheck = nullptr; 911 BasicBlock *EpilogueIterationCountCheck = nullptr; 912 BasicBlock *SCEVSafetyCheck = nullptr; 913 BasicBlock *MemSafetyCheck = nullptr; 914 Value *TripCount = nullptr; 915 Value *VectorTripCount = nullptr; 916 917 EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF, 918 unsigned EUF) 919 : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF), 920 EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) { 921 assert(EUF == 1 && 922 "A high UF for the epilogue loop is likely not beneficial."); 923 } 924 }; 925 926 /// An extension of the inner loop vectorizer that creates a skeleton for a 927 /// vectorized loop that has its epilogue (residual) also vectorized. 928 /// The idea is to run the vplan on a given loop twice, firstly to setup the 929 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 930 /// from the first step and vectorize the epilogue. This is achieved by 931 /// deriving two concrete strategy classes from this base class and invoking 932 /// them in succession from the loop vectorizer planner. 933 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 934 public: 935 InnerLoopAndEpilogueVectorizer( 936 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 937 DominatorTree *DT, const TargetLibraryInfo *TLI, 938 const TargetTransformInfo *TTI, AssumptionCache *AC, 939 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 940 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 941 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 942 GeneratedRTChecks &Checks) 943 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 944 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 945 Checks), 946 EPI(EPI) {} 947 948 // Override this function to handle the more complex control flow around the 949 // three loops. 950 BasicBlock *createVectorizedLoopSkeleton() final override { 951 return createEpilogueVectorizedLoopSkeleton(); 952 } 953 954 /// The interface for creating a vectorized skeleton using one of two 955 /// different strategies, each corresponding to one execution of the vplan 956 /// as described above. 957 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 958 959 /// Holds and updates state information required to vectorize the main loop 960 /// and its epilogue in two separate passes. This setup helps us avoid 961 /// regenerating and recomputing runtime safety checks. It also helps us to 962 /// shorten the iteration-count-check path length for the cases where the 963 /// iteration count of the loop is so small that the main vector loop is 964 /// completely skipped. 965 EpilogueLoopVectorizationInfo &EPI; 966 }; 967 968 /// A specialized derived class of inner loop vectorizer that performs 969 /// vectorization of *main* loops in the process of vectorizing loops and their 970 /// epilogues. 971 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 972 public: 973 EpilogueVectorizerMainLoop( 974 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 975 DominatorTree *DT, const TargetLibraryInfo *TLI, 976 const TargetTransformInfo *TTI, AssumptionCache *AC, 977 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 978 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 979 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 980 GeneratedRTChecks &Check) 981 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 982 EPI, LVL, CM, BFI, PSI, Check) {} 983 /// Implements the interface for creating a vectorized skeleton using the 984 /// *main loop* strategy (ie the first pass of vplan execution). 985 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 986 987 protected: 988 /// Emits an iteration count bypass check once for the main loop (when \p 989 /// ForEpilogue is false) and once for the epilogue loop (when \p 990 /// ForEpilogue is true). 991 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 992 bool ForEpilogue); 993 void printDebugTracesAtStart() override; 994 void printDebugTracesAtEnd() override; 995 }; 996 997 // A specialized derived class of inner loop vectorizer that performs 998 // vectorization of *epilogue* loops in the process of vectorizing loops and 999 // their epilogues. 1000 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 1001 public: 1002 EpilogueVectorizerEpilogueLoop( 1003 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 1004 DominatorTree *DT, const TargetLibraryInfo *TLI, 1005 const TargetTransformInfo *TTI, AssumptionCache *AC, 1006 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 1007 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 1008 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 1009 GeneratedRTChecks &Checks) 1010 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1011 EPI, LVL, CM, BFI, PSI, Checks) {} 1012 /// Implements the interface for creating a vectorized skeleton using the 1013 /// *epilogue loop* strategy (ie the second pass of vplan execution). 1014 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1015 1016 protected: 1017 /// Emits an iteration count bypass check after the main vector loop has 1018 /// finished to see if there are any iterations left to execute by either 1019 /// the vector epilogue or the scalar epilogue. 1020 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1021 BasicBlock *Bypass, 1022 BasicBlock *Insert); 1023 void printDebugTracesAtStart() override; 1024 void printDebugTracesAtEnd() override; 1025 }; 1026 } // end namespace llvm 1027 1028 /// Look for a meaningful debug location on the instruction or it's 1029 /// operands. 1030 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1031 if (!I) 1032 return I; 1033 1034 DebugLoc Empty; 1035 if (I->getDebugLoc() != Empty) 1036 return I; 1037 1038 for (Use &Op : I->operands()) { 1039 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1040 if (OpInst->getDebugLoc() != Empty) 1041 return OpInst; 1042 } 1043 1044 return I; 1045 } 1046 1047 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 1048 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 1049 const DILocation *DIL = Inst->getDebugLoc(); 1050 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1051 !isa<DbgInfoIntrinsic>(Inst)) { 1052 assert(!VF.isScalable() && "scalable vectors not yet supported."); 1053 auto NewDIL = 1054 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1055 if (NewDIL) 1056 B.SetCurrentDebugLocation(NewDIL.getValue()); 1057 else 1058 LLVM_DEBUG(dbgs() 1059 << "Failed to create new discriminator: " 1060 << DIL->getFilename() << " Line: " << DIL->getLine()); 1061 } 1062 else 1063 B.SetCurrentDebugLocation(DIL); 1064 } else 1065 B.SetCurrentDebugLocation(DebugLoc()); 1066 } 1067 1068 /// Write a record \p DebugMsg about vectorization failure to the debug 1069 /// output stream. If \p I is passed, it is an instruction that prevents 1070 /// vectorization. 1071 #ifndef NDEBUG 1072 static void debugVectorizationFailure(const StringRef DebugMsg, 1073 Instruction *I) { 1074 dbgs() << "LV: Not vectorizing: " << DebugMsg; 1075 if (I != nullptr) 1076 dbgs() << " " << *I; 1077 else 1078 dbgs() << '.'; 1079 dbgs() << '\n'; 1080 } 1081 #endif 1082 1083 /// Create an analysis remark that explains why vectorization failed 1084 /// 1085 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1086 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1087 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1088 /// the location of the remark. \return the remark object that can be 1089 /// streamed to. 1090 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1091 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1092 Value *CodeRegion = TheLoop->getHeader(); 1093 DebugLoc DL = TheLoop->getStartLoc(); 1094 1095 if (I) { 1096 CodeRegion = I->getParent(); 1097 // If there is no debug location attached to the instruction, revert back to 1098 // using the loop's. 1099 if (I->getDebugLoc()) 1100 DL = I->getDebugLoc(); 1101 } 1102 1103 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 1104 R << "loop not vectorized: "; 1105 return R; 1106 } 1107 1108 /// Return a value for Step multiplied by VF. 1109 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) { 1110 assert(isa<ConstantInt>(Step) && "Expected an integer step"); 1111 Constant *StepVal = ConstantInt::get( 1112 Step->getType(), 1113 cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue()); 1114 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1115 } 1116 1117 namespace llvm { 1118 1119 /// Return the runtime value for VF. 1120 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) { 1121 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1122 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1123 } 1124 1125 void reportVectorizationFailure(const StringRef DebugMsg, 1126 const StringRef OREMsg, const StringRef ORETag, 1127 OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) { 1128 LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I)); 1129 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1130 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), 1131 ORETag, TheLoop, I) << OREMsg); 1132 } 1133 1134 } // end namespace llvm 1135 1136 #ifndef NDEBUG 1137 /// \return string containing a file name and a line # for the given loop. 1138 static std::string getDebugLocString(const Loop *L) { 1139 std::string Result; 1140 if (L) { 1141 raw_string_ostream OS(Result); 1142 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1143 LoopDbgLoc.print(OS); 1144 else 1145 // Just print the module name. 1146 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1147 OS.flush(); 1148 } 1149 return Result; 1150 } 1151 #endif 1152 1153 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1154 const Instruction *Orig) { 1155 // If the loop was versioned with memchecks, add the corresponding no-alias 1156 // metadata. 1157 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1158 LVer->annotateInstWithNoAlias(To, Orig); 1159 } 1160 1161 void InnerLoopVectorizer::addMetadata(Instruction *To, 1162 Instruction *From) { 1163 propagateMetadata(To, From); 1164 addNewMetadata(To, From); 1165 } 1166 1167 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1168 Instruction *From) { 1169 for (Value *V : To) { 1170 if (Instruction *I = dyn_cast<Instruction>(V)) 1171 addMetadata(I, From); 1172 } 1173 } 1174 1175 namespace llvm { 1176 1177 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1178 // lowered. 1179 enum ScalarEpilogueLowering { 1180 1181 // The default: allowing scalar epilogues. 1182 CM_ScalarEpilogueAllowed, 1183 1184 // Vectorization with OptForSize: don't allow epilogues. 1185 CM_ScalarEpilogueNotAllowedOptSize, 1186 1187 // A special case of vectorisation with OptForSize: loops with a very small 1188 // trip count are considered for vectorization under OptForSize, thereby 1189 // making sure the cost of their loop body is dominant, free of runtime 1190 // guards and scalar iteration overheads. 1191 CM_ScalarEpilogueNotAllowedLowTripLoop, 1192 1193 // Loop hint predicate indicating an epilogue is undesired. 1194 CM_ScalarEpilogueNotNeededUsePredicate, 1195 1196 // Directive indicating we must either tail fold or not vectorize 1197 CM_ScalarEpilogueNotAllowedUsePredicate 1198 }; 1199 1200 /// LoopVectorizationCostModel - estimates the expected speedups due to 1201 /// vectorization. 1202 /// In many cases vectorization is not profitable. This can happen because of 1203 /// a number of reasons. In this class we mainly attempt to predict the 1204 /// expected speedup/slowdowns due to the supported instruction set. We use the 1205 /// TargetTransformInfo to query the different backends for the cost of 1206 /// different operations. 1207 class LoopVectorizationCostModel { 1208 public: 1209 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1210 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1211 LoopVectorizationLegality *Legal, 1212 const TargetTransformInfo &TTI, 1213 const TargetLibraryInfo *TLI, DemandedBits *DB, 1214 AssumptionCache *AC, 1215 OptimizationRemarkEmitter *ORE, const Function *F, 1216 const LoopVectorizeHints *Hints, 1217 InterleavedAccessInfo &IAI) 1218 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1219 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1220 Hints(Hints), InterleaveInfo(IAI) {} 1221 1222 /// \return An upper bound for the vectorization factor, or None if 1223 /// vectorization and interleaving should be avoided up front. 1224 Optional<ElementCount> computeMaxVF(ElementCount UserVF, unsigned UserIC); 1225 1226 /// \return True if runtime checks are required for vectorization, and false 1227 /// otherwise. 1228 bool runtimeChecksRequired(); 1229 1230 /// \return The most profitable vectorization factor and the cost of that VF. 1231 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 1232 /// then this vectorization factor will be selected if vectorization is 1233 /// possible. 1234 VectorizationFactor selectVectorizationFactor(ElementCount MaxVF); 1235 VectorizationFactor 1236 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1237 const LoopVectorizationPlanner &LVP); 1238 1239 /// Setup cost-based decisions for user vectorization factor. 1240 void selectUserVectorizationFactor(ElementCount UserVF) { 1241 collectUniformsAndScalars(UserVF); 1242 collectInstsToScalarize(UserVF); 1243 } 1244 1245 /// \return The size (in bits) of the smallest and widest types in the code 1246 /// that needs to be vectorized. We ignore values that remain scalar such as 1247 /// 64 bit loop indices. 1248 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1249 1250 /// \return The desired interleave count. 1251 /// If interleave count has been specified by metadata it will be returned. 1252 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1253 /// are the selected vectorization factor and the cost of the selected VF. 1254 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1255 1256 /// Memory access instruction may be vectorized in more than one way. 1257 /// Form of instruction after vectorization depends on cost. 1258 /// This function takes cost-based decisions for Load/Store instructions 1259 /// and collects them in a map. This decisions map is used for building 1260 /// the lists of loop-uniform and loop-scalar instructions. 1261 /// The calculated cost is saved with widening decision in order to 1262 /// avoid redundant calculations. 1263 void setCostBasedWideningDecision(ElementCount VF); 1264 1265 /// A struct that represents some properties of the register usage 1266 /// of a loop. 1267 struct RegisterUsage { 1268 /// Holds the number of loop invariant values that are used in the loop. 1269 /// The key is ClassID of target-provided register class. 1270 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1271 /// Holds the maximum number of concurrent live intervals in the loop. 1272 /// The key is ClassID of target-provided register class. 1273 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1274 }; 1275 1276 /// \return Returns information about the register usages of the loop for the 1277 /// given vectorization factors. 1278 SmallVector<RegisterUsage, 8> 1279 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1280 1281 /// Collect values we want to ignore in the cost model. 1282 void collectValuesToIgnore(); 1283 1284 /// Split reductions into those that happen in the loop, and those that happen 1285 /// outside. In loop reductions are collected into InLoopReductionChains. 1286 void collectInLoopReductions(); 1287 1288 /// \returns The smallest bitwidth each instruction can be represented with. 1289 /// The vector equivalents of these instructions should be truncated to this 1290 /// type. 1291 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1292 return MinBWs; 1293 } 1294 1295 /// \returns True if it is more profitable to scalarize instruction \p I for 1296 /// vectorization factor \p VF. 1297 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1298 assert(VF.isVector() && 1299 "Profitable to scalarize relevant only for VF > 1."); 1300 1301 // Cost model is not run in the VPlan-native path - return conservative 1302 // result until this changes. 1303 if (EnableVPlanNativePath) 1304 return false; 1305 1306 auto Scalars = InstsToScalarize.find(VF); 1307 assert(Scalars != InstsToScalarize.end() && 1308 "VF not yet analyzed for scalarization profitability"); 1309 return Scalars->second.find(I) != Scalars->second.end(); 1310 } 1311 1312 /// Returns true if \p I is known to be uniform after vectorization. 1313 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1314 if (VF.isScalar()) 1315 return true; 1316 1317 // Cost model is not run in the VPlan-native path - return conservative 1318 // result until this changes. 1319 if (EnableVPlanNativePath) 1320 return false; 1321 1322 auto UniformsPerVF = Uniforms.find(VF); 1323 assert(UniformsPerVF != Uniforms.end() && 1324 "VF not yet analyzed for uniformity"); 1325 return UniformsPerVF->second.count(I); 1326 } 1327 1328 /// Returns true if \p I is known to be scalar after vectorization. 1329 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1330 if (VF.isScalar()) 1331 return true; 1332 1333 // Cost model is not run in the VPlan-native path - return conservative 1334 // result until this changes. 1335 if (EnableVPlanNativePath) 1336 return false; 1337 1338 auto ScalarsPerVF = Scalars.find(VF); 1339 assert(ScalarsPerVF != Scalars.end() && 1340 "Scalar values are not calculated for VF"); 1341 return ScalarsPerVF->second.count(I); 1342 } 1343 1344 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1345 /// for vectorization factor \p VF. 1346 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1347 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1348 !isProfitableToScalarize(I, VF) && 1349 !isScalarAfterVectorization(I, VF); 1350 } 1351 1352 /// Decision that was taken during cost calculation for memory instruction. 1353 enum InstWidening { 1354 CM_Unknown, 1355 CM_Widen, // For consecutive accesses with stride +1. 1356 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1357 CM_Interleave, 1358 CM_GatherScatter, 1359 CM_Scalarize 1360 }; 1361 1362 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1363 /// instruction \p I and vector width \p VF. 1364 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1365 InstructionCost Cost) { 1366 assert(VF.isVector() && "Expected VF >=2"); 1367 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1368 } 1369 1370 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1371 /// interleaving group \p Grp and vector width \p VF. 1372 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1373 ElementCount VF, InstWidening W, 1374 InstructionCost Cost) { 1375 assert(VF.isVector() && "Expected VF >=2"); 1376 /// Broadcast this decicion to all instructions inside the group. 1377 /// But the cost will be assigned to one instruction only. 1378 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1379 if (auto *I = Grp->getMember(i)) { 1380 if (Grp->getInsertPos() == I) 1381 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1382 else 1383 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1384 } 1385 } 1386 } 1387 1388 /// Return the cost model decision for the given instruction \p I and vector 1389 /// width \p VF. Return CM_Unknown if this instruction did not pass 1390 /// through the cost modeling. 1391 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1392 assert(VF.isVector() && "Expected VF to be a vector VF"); 1393 // Cost model is not run in the VPlan-native path - return conservative 1394 // result until this changes. 1395 if (EnableVPlanNativePath) 1396 return CM_GatherScatter; 1397 1398 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1399 auto Itr = WideningDecisions.find(InstOnVF); 1400 if (Itr == WideningDecisions.end()) 1401 return CM_Unknown; 1402 return Itr->second.first; 1403 } 1404 1405 /// Return the vectorization cost for the given instruction \p I and vector 1406 /// width \p VF. 1407 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1408 assert(VF.isVector() && "Expected VF >=2"); 1409 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1410 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1411 "The cost is not calculated"); 1412 return WideningDecisions[InstOnVF].second; 1413 } 1414 1415 /// Return True if instruction \p I is an optimizable truncate whose operand 1416 /// is an induction variable. Such a truncate will be removed by adding a new 1417 /// induction variable with the destination type. 1418 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1419 // If the instruction is not a truncate, return false. 1420 auto *Trunc = dyn_cast<TruncInst>(I); 1421 if (!Trunc) 1422 return false; 1423 1424 // Get the source and destination types of the truncate. 1425 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1426 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1427 1428 // If the truncate is free for the given types, return false. Replacing a 1429 // free truncate with an induction variable would add an induction variable 1430 // update instruction to each iteration of the loop. We exclude from this 1431 // check the primary induction variable since it will need an update 1432 // instruction regardless. 1433 Value *Op = Trunc->getOperand(0); 1434 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1435 return false; 1436 1437 // If the truncated value is not an induction variable, return false. 1438 return Legal->isInductionPhi(Op); 1439 } 1440 1441 /// Collects the instructions to scalarize for each predicated instruction in 1442 /// the loop. 1443 void collectInstsToScalarize(ElementCount VF); 1444 1445 /// Collect Uniform and Scalar values for the given \p VF. 1446 /// The sets depend on CM decision for Load/Store instructions 1447 /// that may be vectorized as interleave, gather-scatter or scalarized. 1448 void collectUniformsAndScalars(ElementCount VF) { 1449 // Do the analysis once. 1450 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1451 return; 1452 setCostBasedWideningDecision(VF); 1453 collectLoopUniforms(VF); 1454 collectLoopScalars(VF); 1455 } 1456 1457 /// Returns true if the target machine supports masked store operation 1458 /// for the given \p DataType and kind of access to \p Ptr. 1459 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1460 return Legal->isConsecutivePtr(Ptr) && 1461 TTI.isLegalMaskedStore(DataType, Alignment); 1462 } 1463 1464 /// Returns true if the target machine supports masked load operation 1465 /// for the given \p DataType and kind of access to \p Ptr. 1466 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1467 return Legal->isConsecutivePtr(Ptr) && 1468 TTI.isLegalMaskedLoad(DataType, Alignment); 1469 } 1470 1471 /// Returns true if the target machine supports masked scatter operation 1472 /// for the given \p DataType. 1473 bool isLegalMaskedScatter(Type *DataType, Align Alignment) const { 1474 return TTI.isLegalMaskedScatter(DataType, Alignment); 1475 } 1476 1477 /// Returns true if the target machine supports masked gather operation 1478 /// for the given \p DataType. 1479 bool isLegalMaskedGather(Type *DataType, Align Alignment) const { 1480 return TTI.isLegalMaskedGather(DataType, Alignment); 1481 } 1482 1483 /// Returns true if the target machine can represent \p V as a masked gather 1484 /// or scatter operation. 1485 bool isLegalGatherOrScatter(Value *V) { 1486 bool LI = isa<LoadInst>(V); 1487 bool SI = isa<StoreInst>(V); 1488 if (!LI && !SI) 1489 return false; 1490 auto *Ty = getMemInstValueType(V); 1491 Align Align = getLoadStoreAlignment(V); 1492 return (LI && isLegalMaskedGather(Ty, Align)) || 1493 (SI && isLegalMaskedScatter(Ty, Align)); 1494 } 1495 1496 /// Returns true if the target machine supports all of the reduction 1497 /// variables found for the given VF. 1498 bool canVectorizeReductions(ElementCount VF) { 1499 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1500 RecurrenceDescriptor RdxDesc = Reduction.second; 1501 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1502 })); 1503 } 1504 1505 /// Returns true if \p I is an instruction that will be scalarized with 1506 /// predication. Such instructions include conditional stores and 1507 /// instructions that may divide by zero. 1508 /// If a non-zero VF has been calculated, we check if I will be scalarized 1509 /// predication for that VF. 1510 bool 1511 isScalarWithPredication(Instruction *I, 1512 ElementCount VF = ElementCount::getFixed(1)) const; 1513 1514 // Returns true if \p I is an instruction that will be predicated either 1515 // through scalar predication or masked load/store or masked gather/scatter. 1516 // Superset of instructions that return true for isScalarWithPredication. 1517 bool isPredicatedInst(Instruction *I) { 1518 if (!blockNeedsPredication(I->getParent())) 1519 return false; 1520 // Loads and stores that need some form of masked operation are predicated 1521 // instructions. 1522 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1523 return Legal->isMaskRequired(I); 1524 return isScalarWithPredication(I); 1525 } 1526 1527 /// Returns true if \p I is a memory instruction with consecutive memory 1528 /// access that can be widened. 1529 bool 1530 memoryInstructionCanBeWidened(Instruction *I, 1531 ElementCount VF = ElementCount::getFixed(1)); 1532 1533 /// Returns true if \p I is a memory instruction in an interleaved-group 1534 /// of memory accesses that can be vectorized with wide vector loads/stores 1535 /// and shuffles. 1536 bool 1537 interleavedAccessCanBeWidened(Instruction *I, 1538 ElementCount VF = ElementCount::getFixed(1)); 1539 1540 /// Check if \p Instr belongs to any interleaved access group. 1541 bool isAccessInterleaved(Instruction *Instr) { 1542 return InterleaveInfo.isInterleaved(Instr); 1543 } 1544 1545 /// Get the interleaved access group that \p Instr belongs to. 1546 const InterleaveGroup<Instruction> * 1547 getInterleavedAccessGroup(Instruction *Instr) { 1548 return InterleaveInfo.getInterleaveGroup(Instr); 1549 } 1550 1551 /// Returns true if we're required to use a scalar epilogue for at least 1552 /// the final iteration of the original loop. 1553 bool requiresScalarEpilogue() const { 1554 if (!isScalarEpilogueAllowed()) 1555 return false; 1556 // If we might exit from anywhere but the latch, must run the exiting 1557 // iteration in scalar form. 1558 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1559 return true; 1560 return InterleaveInfo.requiresScalarEpilogue(); 1561 } 1562 1563 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1564 /// loop hint annotation. 1565 bool isScalarEpilogueAllowed() const { 1566 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1567 } 1568 1569 /// Returns true if all loop blocks should be masked to fold tail loop. 1570 bool foldTailByMasking() const { return FoldTailByMasking; } 1571 1572 bool blockNeedsPredication(BasicBlock *BB) const { 1573 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1574 } 1575 1576 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1577 /// nodes to the chain of instructions representing the reductions. Uses a 1578 /// MapVector to ensure deterministic iteration order. 1579 using ReductionChainMap = 1580 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1581 1582 /// Return the chain of instructions representing an inloop reduction. 1583 const ReductionChainMap &getInLoopReductionChains() const { 1584 return InLoopReductionChains; 1585 } 1586 1587 /// Returns true if the Phi is part of an inloop reduction. 1588 bool isInLoopReduction(PHINode *Phi) const { 1589 return InLoopReductionChains.count(Phi); 1590 } 1591 1592 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1593 /// with factor VF. Return the cost of the instruction, including 1594 /// scalarization overhead if it's needed. 1595 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1596 1597 /// Estimate cost of a call instruction CI if it were vectorized with factor 1598 /// VF. Return the cost of the instruction, including scalarization overhead 1599 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1600 /// scalarized - 1601 /// i.e. either vector version isn't available, or is too expensive. 1602 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1603 bool &NeedToScalarize) const; 1604 1605 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1606 /// that of B. 1607 bool isMoreProfitable(const VectorizationFactor &A, 1608 const VectorizationFactor &B) const; 1609 1610 /// Invalidates decisions already taken by the cost model. 1611 void invalidateCostModelingDecisions() { 1612 WideningDecisions.clear(); 1613 Uniforms.clear(); 1614 Scalars.clear(); 1615 } 1616 1617 private: 1618 unsigned NumPredStores = 0; 1619 1620 /// \return An upper bound for the vectorization factor, a power-of-2 larger 1621 /// than zero. One is returned if vectorization should best be avoided due 1622 /// to cost. 1623 ElementCount computeFeasibleMaxVF(unsigned ConstTripCount, 1624 ElementCount UserVF); 1625 1626 /// The vectorization cost is a combination of the cost itself and a boolean 1627 /// indicating whether any of the contributing operations will actually 1628 /// operate on 1629 /// vector values after type legalization in the backend. If this latter value 1630 /// is 1631 /// false, then all operations will be scalarized (i.e. no vectorization has 1632 /// actually taken place). 1633 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1634 1635 /// Returns the expected execution cost. The unit of the cost does 1636 /// not matter because we use the 'cost' units to compare different 1637 /// vector widths. The cost that is returned is *not* normalized by 1638 /// the factor width. 1639 VectorizationCostTy expectedCost(ElementCount VF); 1640 1641 /// Returns the execution time cost of an instruction for a given vector 1642 /// width. Vector width of one means scalar. 1643 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1644 1645 /// The cost-computation logic from getInstructionCost which provides 1646 /// the vector type as an output parameter. 1647 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1648 Type *&VectorTy); 1649 1650 /// Return the cost of instructions in an inloop reduction pattern, if I is 1651 /// part of that pattern. 1652 InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF, 1653 Type *VectorTy, 1654 TTI::TargetCostKind CostKind); 1655 1656 /// Calculate vectorization cost of memory instruction \p I. 1657 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1658 1659 /// The cost computation for scalarized memory instruction. 1660 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1661 1662 /// The cost computation for interleaving group of memory instructions. 1663 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1664 1665 /// The cost computation for Gather/Scatter instruction. 1666 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1667 1668 /// The cost computation for widening instruction \p I with consecutive 1669 /// memory access. 1670 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1671 1672 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1673 /// Load: scalar load + broadcast. 1674 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1675 /// element) 1676 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1677 1678 /// Estimate the overhead of scalarizing an instruction. This is a 1679 /// convenience wrapper for the type-based getScalarizationOverhead API. 1680 InstructionCost getScalarizationOverhead(Instruction *I, 1681 ElementCount VF) const; 1682 1683 /// Returns whether the instruction is a load or store and will be a emitted 1684 /// as a vector operation. 1685 bool isConsecutiveLoadOrStore(Instruction *I); 1686 1687 /// Returns true if an artificially high cost for emulated masked memrefs 1688 /// should be used. 1689 bool useEmulatedMaskMemRefHack(Instruction *I); 1690 1691 /// Map of scalar integer values to the smallest bitwidth they can be legally 1692 /// represented as. The vector equivalents of these values should be truncated 1693 /// to this type. 1694 MapVector<Instruction *, uint64_t> MinBWs; 1695 1696 /// A type representing the costs for instructions if they were to be 1697 /// scalarized rather than vectorized. The entries are Instruction-Cost 1698 /// pairs. 1699 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1700 1701 /// A set containing all BasicBlocks that are known to present after 1702 /// vectorization as a predicated block. 1703 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1704 1705 /// Records whether it is allowed to have the original scalar loop execute at 1706 /// least once. This may be needed as a fallback loop in case runtime 1707 /// aliasing/dependence checks fail, or to handle the tail/remainder 1708 /// iterations when the trip count is unknown or doesn't divide by the VF, 1709 /// or as a peel-loop to handle gaps in interleave-groups. 1710 /// Under optsize and when the trip count is very small we don't allow any 1711 /// iterations to execute in the scalar loop. 1712 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1713 1714 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1715 bool FoldTailByMasking = false; 1716 1717 /// A map holding scalar costs for different vectorization factors. The 1718 /// presence of a cost for an instruction in the mapping indicates that the 1719 /// instruction will be scalarized when vectorizing with the associated 1720 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1721 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1722 1723 /// Holds the instructions known to be uniform after vectorization. 1724 /// The data is collected per VF. 1725 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1726 1727 /// Holds the instructions known to be scalar after vectorization. 1728 /// The data is collected per VF. 1729 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1730 1731 /// Holds the instructions (address computations) that are forced to be 1732 /// scalarized. 1733 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1734 1735 /// PHINodes of the reductions that should be expanded in-loop along with 1736 /// their associated chains of reduction operations, in program order from top 1737 /// (PHI) to bottom 1738 ReductionChainMap InLoopReductionChains; 1739 1740 /// A Map of inloop reduction operations and their immediate chain operand. 1741 /// FIXME: This can be removed once reductions can be costed correctly in 1742 /// vplan. This was added to allow quick lookup to the inloop operations, 1743 /// without having to loop through InLoopReductionChains. 1744 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1745 1746 /// Returns the expected difference in cost from scalarizing the expression 1747 /// feeding a predicated instruction \p PredInst. The instructions to 1748 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1749 /// non-negative return value implies the expression will be scalarized. 1750 /// Currently, only single-use chains are considered for scalarization. 1751 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1752 ElementCount VF); 1753 1754 /// Collect the instructions that are uniform after vectorization. An 1755 /// instruction is uniform if we represent it with a single scalar value in 1756 /// the vectorized loop corresponding to each vector iteration. Examples of 1757 /// uniform instructions include pointer operands of consecutive or 1758 /// interleaved memory accesses. Note that although uniformity implies an 1759 /// instruction will be scalar, the reverse is not true. In general, a 1760 /// scalarized instruction will be represented by VF scalar values in the 1761 /// vectorized loop, each corresponding to an iteration of the original 1762 /// scalar loop. 1763 void collectLoopUniforms(ElementCount VF); 1764 1765 /// Collect the instructions that are scalar after vectorization. An 1766 /// instruction is scalar if it is known to be uniform or will be scalarized 1767 /// during vectorization. Non-uniform scalarized instructions will be 1768 /// represented by VF values in the vectorized loop, each corresponding to an 1769 /// iteration of the original scalar loop. 1770 void collectLoopScalars(ElementCount VF); 1771 1772 /// Keeps cost model vectorization decision and cost for instructions. 1773 /// Right now it is used for memory instructions only. 1774 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1775 std::pair<InstWidening, InstructionCost>>; 1776 1777 DecisionList WideningDecisions; 1778 1779 /// Returns true if \p V is expected to be vectorized and it needs to be 1780 /// extracted. 1781 bool needsExtract(Value *V, ElementCount VF) const { 1782 Instruction *I = dyn_cast<Instruction>(V); 1783 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1784 TheLoop->isLoopInvariant(I)) 1785 return false; 1786 1787 // Assume we can vectorize V (and hence we need extraction) if the 1788 // scalars are not computed yet. This can happen, because it is called 1789 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1790 // the scalars are collected. That should be a safe assumption in most 1791 // cases, because we check if the operands have vectorizable types 1792 // beforehand in LoopVectorizationLegality. 1793 return Scalars.find(VF) == Scalars.end() || 1794 !isScalarAfterVectorization(I, VF); 1795 }; 1796 1797 /// Returns a range containing only operands needing to be extracted. 1798 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1799 ElementCount VF) const { 1800 return SmallVector<Value *, 4>(make_filter_range( 1801 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1802 } 1803 1804 /// Determines if we have the infrastructure to vectorize loop \p L and its 1805 /// epilogue, assuming the main loop is vectorized by \p VF. 1806 bool isCandidateForEpilogueVectorization(const Loop &L, 1807 const ElementCount VF) const; 1808 1809 /// Returns true if epilogue vectorization is considered profitable, and 1810 /// false otherwise. 1811 /// \p VF is the vectorization factor chosen for the original loop. 1812 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1813 1814 public: 1815 /// The loop that we evaluate. 1816 Loop *TheLoop; 1817 1818 /// Predicated scalar evolution analysis. 1819 PredicatedScalarEvolution &PSE; 1820 1821 /// Loop Info analysis. 1822 LoopInfo *LI; 1823 1824 /// Vectorization legality. 1825 LoopVectorizationLegality *Legal; 1826 1827 /// Vector target information. 1828 const TargetTransformInfo &TTI; 1829 1830 /// Target Library Info. 1831 const TargetLibraryInfo *TLI; 1832 1833 /// Demanded bits analysis. 1834 DemandedBits *DB; 1835 1836 /// Assumption cache. 1837 AssumptionCache *AC; 1838 1839 /// Interface to emit optimization remarks. 1840 OptimizationRemarkEmitter *ORE; 1841 1842 const Function *TheFunction; 1843 1844 /// Loop Vectorize Hint. 1845 const LoopVectorizeHints *Hints; 1846 1847 /// The interleave access information contains groups of interleaved accesses 1848 /// with the same stride and close to each other. 1849 InterleavedAccessInfo &InterleaveInfo; 1850 1851 /// Values to ignore in the cost model. 1852 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1853 1854 /// Values to ignore in the cost model when VF > 1. 1855 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1856 1857 /// Profitable vector factors. 1858 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1859 }; 1860 } // end namespace llvm 1861 1862 /// Helper struct to manage generating runtime checks for vectorization. 1863 /// 1864 /// The runtime checks are created up-front in temporary blocks to allow better 1865 /// estimating the cost and un-linked from the existing IR. After deciding to 1866 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1867 /// temporary blocks are completely removed. 1868 class GeneratedRTChecks { 1869 /// Basic block which contains the generated SCEV checks, if any. 1870 BasicBlock *SCEVCheckBlock = nullptr; 1871 1872 /// The value representing the result of the generated SCEV checks. If it is 1873 /// nullptr, either no SCEV checks have been generated or they have been used. 1874 Value *SCEVCheckCond = nullptr; 1875 1876 /// Basic block which contains the generated memory runtime checks, if any. 1877 BasicBlock *MemCheckBlock = nullptr; 1878 1879 /// The value representing the result of the generated memory runtime checks. 1880 /// If it is nullptr, either no memory runtime checks have been generated or 1881 /// they have been used. 1882 Instruction *MemRuntimeCheckCond = nullptr; 1883 1884 DominatorTree *DT; 1885 LoopInfo *LI; 1886 1887 SCEVExpander SCEVExp; 1888 SCEVExpander MemCheckExp; 1889 1890 public: 1891 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1892 const DataLayout &DL) 1893 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1894 MemCheckExp(SE, DL, "scev.check") {} 1895 1896 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1897 /// accurately estimate the cost of the runtime checks. The blocks are 1898 /// un-linked from the IR and is added back during vector code generation. If 1899 /// there is no vector code generation, the check blocks are removed 1900 /// completely. 1901 void Create(Loop *L, const LoopAccessInfo &LAI, 1902 const SCEVUnionPredicate &UnionPred) { 1903 1904 BasicBlock *LoopHeader = L->getHeader(); 1905 BasicBlock *Preheader = L->getLoopPreheader(); 1906 1907 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1908 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1909 // may be used by SCEVExpander. The blocks will be un-linked from their 1910 // predecessors and removed from LI & DT at the end of the function. 1911 if (!UnionPred.isAlwaysTrue()) { 1912 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1913 nullptr, "vector.scevcheck"); 1914 1915 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1916 &UnionPred, SCEVCheckBlock->getTerminator()); 1917 } 1918 1919 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1920 if (RtPtrChecking.Need) { 1921 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1922 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1923 "vector.memcheck"); 1924 1925 std::tie(std::ignore, MemRuntimeCheckCond) = 1926 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 1927 RtPtrChecking.getChecks(), MemCheckExp); 1928 assert(MemRuntimeCheckCond && 1929 "no RT checks generated although RtPtrChecking " 1930 "claimed checks are required"); 1931 } 1932 1933 if (!MemCheckBlock && !SCEVCheckBlock) 1934 return; 1935 1936 // Unhook the temporary block with the checks, update various places 1937 // accordingly. 1938 if (SCEVCheckBlock) 1939 SCEVCheckBlock->replaceAllUsesWith(Preheader); 1940 if (MemCheckBlock) 1941 MemCheckBlock->replaceAllUsesWith(Preheader); 1942 1943 if (SCEVCheckBlock) { 1944 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1945 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 1946 Preheader->getTerminator()->eraseFromParent(); 1947 } 1948 if (MemCheckBlock) { 1949 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1950 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 1951 Preheader->getTerminator()->eraseFromParent(); 1952 } 1953 1954 DT->changeImmediateDominator(LoopHeader, Preheader); 1955 if (MemCheckBlock) { 1956 DT->eraseNode(MemCheckBlock); 1957 LI->removeBlock(MemCheckBlock); 1958 } 1959 if (SCEVCheckBlock) { 1960 DT->eraseNode(SCEVCheckBlock); 1961 LI->removeBlock(SCEVCheckBlock); 1962 } 1963 } 1964 1965 /// Remove the created SCEV & memory runtime check blocks & instructions, if 1966 /// unused. 1967 ~GeneratedRTChecks() { 1968 SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT); 1969 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT); 1970 if (!SCEVCheckCond) 1971 SCEVCleaner.markResultUsed(); 1972 1973 if (!MemRuntimeCheckCond) 1974 MemCheckCleaner.markResultUsed(); 1975 1976 if (MemRuntimeCheckCond) { 1977 auto &SE = *MemCheckExp.getSE(); 1978 // Memory runtime check generation creates compares that use expanded 1979 // values. Remove them before running the SCEVExpanderCleaners. 1980 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 1981 if (MemCheckExp.isInsertedInstruction(&I)) 1982 continue; 1983 SE.forgetValue(&I); 1984 SE.eraseValueFromMap(&I); 1985 I.eraseFromParent(); 1986 } 1987 } 1988 MemCheckCleaner.cleanup(); 1989 SCEVCleaner.cleanup(); 1990 1991 if (SCEVCheckCond) 1992 SCEVCheckBlock->eraseFromParent(); 1993 if (MemRuntimeCheckCond) 1994 MemCheckBlock->eraseFromParent(); 1995 } 1996 1997 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 1998 /// adjusts the branches to branch to the vector preheader or \p Bypass, 1999 /// depending on the generated condition. 2000 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, 2001 BasicBlock *LoopVectorPreHeader, 2002 BasicBlock *LoopExitBlock) { 2003 if (!SCEVCheckCond) 2004 return nullptr; 2005 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2006 if (C->isZero()) 2007 return nullptr; 2008 2009 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2010 2011 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2012 // Create new preheader for vector loop. 2013 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2014 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2015 2016 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2017 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2018 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2019 SCEVCheckBlock); 2020 2021 DT->addNewBlock(SCEVCheckBlock, Pred); 2022 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2023 2024 ReplaceInstWithInst( 2025 SCEVCheckBlock->getTerminator(), 2026 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2027 // Mark the check as used, to prevent it from being removed during cleanup. 2028 SCEVCheckCond = nullptr; 2029 return SCEVCheckBlock; 2030 } 2031 2032 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2033 /// the branches to branch to the vector preheader or \p Bypass, depending on 2034 /// the generated condition. 2035 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, 2036 BasicBlock *LoopVectorPreHeader) { 2037 // Check if we generated code that checks in runtime if arrays overlap. 2038 if (!MemRuntimeCheckCond) 2039 return nullptr; 2040 2041 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2042 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2043 MemCheckBlock); 2044 2045 DT->addNewBlock(MemCheckBlock, Pred); 2046 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2047 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2048 2049 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2050 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2051 2052 ReplaceInstWithInst( 2053 MemCheckBlock->getTerminator(), 2054 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2055 MemCheckBlock->getTerminator()->setDebugLoc( 2056 Pred->getTerminator()->getDebugLoc()); 2057 2058 // Mark the check as used, to prevent it from being removed during cleanup. 2059 MemRuntimeCheckCond = nullptr; 2060 return MemCheckBlock; 2061 } 2062 }; 2063 2064 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2065 // vectorization. The loop needs to be annotated with #pragma omp simd 2066 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2067 // vector length information is not provided, vectorization is not considered 2068 // explicit. Interleave hints are not allowed either. These limitations will be 2069 // relaxed in the future. 2070 // Please, note that we are currently forced to abuse the pragma 'clang 2071 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2072 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2073 // provides *explicit vectorization hints* (LV can bypass legal checks and 2074 // assume that vectorization is legal). However, both hints are implemented 2075 // using the same metadata (llvm.loop.vectorize, processed by 2076 // LoopVectorizeHints). This will be fixed in the future when the native IR 2077 // representation for pragma 'omp simd' is introduced. 2078 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2079 OptimizationRemarkEmitter *ORE) { 2080 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2081 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2082 2083 // Only outer loops with an explicit vectorization hint are supported. 2084 // Unannotated outer loops are ignored. 2085 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2086 return false; 2087 2088 Function *Fn = OuterLp->getHeader()->getParent(); 2089 if (!Hints.allowVectorization(Fn, OuterLp, 2090 true /*VectorizeOnlyWhenForced*/)) { 2091 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2092 return false; 2093 } 2094 2095 if (Hints.getInterleave() > 1) { 2096 // TODO: Interleave support is future work. 2097 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2098 "outer loops.\n"); 2099 Hints.emitRemarkWithHints(); 2100 return false; 2101 } 2102 2103 return true; 2104 } 2105 2106 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2107 OptimizationRemarkEmitter *ORE, 2108 SmallVectorImpl<Loop *> &V) { 2109 // Collect inner loops and outer loops without irreducible control flow. For 2110 // now, only collect outer loops that have explicit vectorization hints. If we 2111 // are stress testing the VPlan H-CFG construction, we collect the outermost 2112 // loop of every loop nest. 2113 if (L.isInnermost() || VPlanBuildStressTest || 2114 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2115 LoopBlocksRPO RPOT(&L); 2116 RPOT.perform(LI); 2117 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2118 V.push_back(&L); 2119 // TODO: Collect inner loops inside marked outer loops in case 2120 // vectorization fails for the outer loop. Do not invoke 2121 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2122 // already known to be reducible. We can use an inherited attribute for 2123 // that. 2124 return; 2125 } 2126 } 2127 for (Loop *InnerL : L) 2128 collectSupportedLoops(*InnerL, LI, ORE, V); 2129 } 2130 2131 namespace { 2132 2133 /// The LoopVectorize Pass. 2134 struct LoopVectorize : public FunctionPass { 2135 /// Pass identification, replacement for typeid 2136 static char ID; 2137 2138 LoopVectorizePass Impl; 2139 2140 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2141 bool VectorizeOnlyWhenForced = false) 2142 : FunctionPass(ID), 2143 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2144 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2145 } 2146 2147 bool runOnFunction(Function &F) override { 2148 if (skipFunction(F)) 2149 return false; 2150 2151 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2152 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2153 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2154 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2155 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2156 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2157 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2158 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2159 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2160 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2161 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2162 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2163 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2164 2165 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2166 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2167 2168 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2169 GetLAA, *ORE, PSI).MadeAnyChange; 2170 } 2171 2172 void getAnalysisUsage(AnalysisUsage &AU) const override { 2173 AU.addRequired<AssumptionCacheTracker>(); 2174 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2175 AU.addRequired<DominatorTreeWrapperPass>(); 2176 AU.addRequired<LoopInfoWrapperPass>(); 2177 AU.addRequired<ScalarEvolutionWrapperPass>(); 2178 AU.addRequired<TargetTransformInfoWrapperPass>(); 2179 AU.addRequired<AAResultsWrapperPass>(); 2180 AU.addRequired<LoopAccessLegacyAnalysis>(); 2181 AU.addRequired<DemandedBitsWrapperPass>(); 2182 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2183 AU.addRequired<InjectTLIMappingsLegacy>(); 2184 2185 // We currently do not preserve loopinfo/dominator analyses with outer loop 2186 // vectorization. Until this is addressed, mark these analyses as preserved 2187 // only for non-VPlan-native path. 2188 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2189 if (!EnableVPlanNativePath) { 2190 AU.addPreserved<LoopInfoWrapperPass>(); 2191 AU.addPreserved<DominatorTreeWrapperPass>(); 2192 } 2193 2194 AU.addPreserved<BasicAAWrapperPass>(); 2195 AU.addPreserved<GlobalsAAWrapperPass>(); 2196 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2197 } 2198 }; 2199 2200 } // end anonymous namespace 2201 2202 //===----------------------------------------------------------------------===// 2203 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2204 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2205 //===----------------------------------------------------------------------===// 2206 2207 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2208 // We need to place the broadcast of invariant variables outside the loop, 2209 // but only if it's proven safe to do so. Else, broadcast will be inside 2210 // vector loop body. 2211 Instruction *Instr = dyn_cast<Instruction>(V); 2212 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2213 (!Instr || 2214 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2215 // Place the code for broadcasting invariant variables in the new preheader. 2216 IRBuilder<>::InsertPointGuard Guard(Builder); 2217 if (SafeToHoist) 2218 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2219 2220 // Broadcast the scalar into all locations in the vector. 2221 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2222 2223 return Shuf; 2224 } 2225 2226 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2227 const InductionDescriptor &II, Value *Step, Value *Start, 2228 Instruction *EntryVal, VPValue *Def, VPValue *CastDef, 2229 VPTransformState &State) { 2230 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2231 "Expected either an induction phi-node or a truncate of it!"); 2232 2233 // Construct the initial value of the vector IV in the vector loop preheader 2234 auto CurrIP = Builder.saveIP(); 2235 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2236 if (isa<TruncInst>(EntryVal)) { 2237 assert(Start->getType()->isIntegerTy() && 2238 "Truncation requires an integer type"); 2239 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2240 Step = Builder.CreateTrunc(Step, TruncType); 2241 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2242 } 2243 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2244 Value *SteppedStart = 2245 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 2246 2247 // We create vector phi nodes for both integer and floating-point induction 2248 // variables. Here, we determine the kind of arithmetic we will perform. 2249 Instruction::BinaryOps AddOp; 2250 Instruction::BinaryOps MulOp; 2251 if (Step->getType()->isIntegerTy()) { 2252 AddOp = Instruction::Add; 2253 MulOp = Instruction::Mul; 2254 } else { 2255 AddOp = II.getInductionOpcode(); 2256 MulOp = Instruction::FMul; 2257 } 2258 2259 // Multiply the vectorization factor by the step using integer or 2260 // floating-point arithmetic as appropriate. 2261 Type *StepType = Step->getType(); 2262 if (Step->getType()->isFloatingPointTy()) 2263 StepType = IntegerType::get(StepType->getContext(), 2264 StepType->getScalarSizeInBits()); 2265 Value *RuntimeVF = getRuntimeVF(Builder, StepType, VF); 2266 if (Step->getType()->isFloatingPointTy()) 2267 RuntimeVF = Builder.CreateSIToFP(RuntimeVF, Step->getType()); 2268 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 2269 2270 // Create a vector splat to use in the induction update. 2271 // 2272 // FIXME: If the step is non-constant, we create the vector splat with 2273 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2274 // handle a constant vector splat. 2275 Value *SplatVF = isa<Constant>(Mul) 2276 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2277 : Builder.CreateVectorSplat(VF, Mul); 2278 Builder.restoreIP(CurrIP); 2279 2280 // We may need to add the step a number of times, depending on the unroll 2281 // factor. The last of those goes into the PHI. 2282 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2283 &*LoopVectorBody->getFirstInsertionPt()); 2284 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2285 Instruction *LastInduction = VecInd; 2286 for (unsigned Part = 0; Part < UF; ++Part) { 2287 State.set(Def, LastInduction, Part); 2288 2289 if (isa<TruncInst>(EntryVal)) 2290 addMetadata(LastInduction, EntryVal); 2291 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, 2292 State, Part); 2293 2294 LastInduction = cast<Instruction>( 2295 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 2296 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2297 } 2298 2299 // Move the last step to the end of the latch block. This ensures consistent 2300 // placement of all induction updates. 2301 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2302 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2303 auto *ICmp = cast<Instruction>(Br->getCondition()); 2304 LastInduction->moveBefore(ICmp); 2305 LastInduction->setName("vec.ind.next"); 2306 2307 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2308 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2309 } 2310 2311 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2312 return Cost->isScalarAfterVectorization(I, VF) || 2313 Cost->isProfitableToScalarize(I, VF); 2314 } 2315 2316 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2317 if (shouldScalarizeInstruction(IV)) 2318 return true; 2319 auto isScalarInst = [&](User *U) -> bool { 2320 auto *I = cast<Instruction>(U); 2321 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2322 }; 2323 return llvm::any_of(IV->users(), isScalarInst); 2324 } 2325 2326 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2327 const InductionDescriptor &ID, const Instruction *EntryVal, 2328 Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, 2329 unsigned Part, unsigned Lane) { 2330 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2331 "Expected either an induction phi-node or a truncate of it!"); 2332 2333 // This induction variable is not the phi from the original loop but the 2334 // newly-created IV based on the proof that casted Phi is equal to the 2335 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2336 // re-uses the same InductionDescriptor that original IV uses but we don't 2337 // have to do any recording in this case - that is done when original IV is 2338 // processed. 2339 if (isa<TruncInst>(EntryVal)) 2340 return; 2341 2342 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 2343 if (Casts.empty()) 2344 return; 2345 // Only the first Cast instruction in the Casts vector is of interest. 2346 // The rest of the Casts (if exist) have no uses outside the 2347 // induction update chain itself. 2348 if (Lane < UINT_MAX) 2349 State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); 2350 else 2351 State.set(CastDef, VectorLoopVal, Part); 2352 } 2353 2354 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2355 TruncInst *Trunc, VPValue *Def, 2356 VPValue *CastDef, 2357 VPTransformState &State) { 2358 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2359 "Primary induction variable must have an integer type"); 2360 2361 auto II = Legal->getInductionVars().find(IV); 2362 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2363 2364 auto ID = II->second; 2365 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2366 2367 // The value from the original loop to which we are mapping the new induction 2368 // variable. 2369 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2370 2371 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2372 2373 // Generate code for the induction step. Note that induction steps are 2374 // required to be loop-invariant 2375 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2376 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2377 "Induction step should be loop invariant"); 2378 if (PSE.getSE()->isSCEVable(IV->getType())) { 2379 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2380 return Exp.expandCodeFor(Step, Step->getType(), 2381 LoopVectorPreHeader->getTerminator()); 2382 } 2383 return cast<SCEVUnknown>(Step)->getValue(); 2384 }; 2385 2386 // The scalar value to broadcast. This is derived from the canonical 2387 // induction variable. If a truncation type is given, truncate the canonical 2388 // induction variable and step. Otherwise, derive these values from the 2389 // induction descriptor. 2390 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2391 Value *ScalarIV = Induction; 2392 if (IV != OldInduction) { 2393 ScalarIV = IV->getType()->isIntegerTy() 2394 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2395 : Builder.CreateCast(Instruction::SIToFP, Induction, 2396 IV->getType()); 2397 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2398 ScalarIV->setName("offset.idx"); 2399 } 2400 if (Trunc) { 2401 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2402 assert(Step->getType()->isIntegerTy() && 2403 "Truncation requires an integer step"); 2404 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2405 Step = Builder.CreateTrunc(Step, TruncType); 2406 } 2407 return ScalarIV; 2408 }; 2409 2410 // Create the vector values from the scalar IV, in the absence of creating a 2411 // vector IV. 2412 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2413 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2414 for (unsigned Part = 0; Part < UF; ++Part) { 2415 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2416 Value *EntryPart = 2417 getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step, 2418 ID.getInductionOpcode()); 2419 State.set(Def, EntryPart, Part); 2420 if (Trunc) 2421 addMetadata(EntryPart, Trunc); 2422 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, 2423 State, Part); 2424 } 2425 }; 2426 2427 // Fast-math-flags propagate from the original induction instruction. 2428 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 2429 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 2430 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 2431 2432 // Now do the actual transformations, and start with creating the step value. 2433 Value *Step = CreateStepValue(ID.getStep()); 2434 if (VF.isZero() || VF.isScalar()) { 2435 Value *ScalarIV = CreateScalarIV(Step); 2436 CreateSplatIV(ScalarIV, Step); 2437 return; 2438 } 2439 2440 // Determine if we want a scalar version of the induction variable. This is 2441 // true if the induction variable itself is not widened, or if it has at 2442 // least one user in the loop that is not widened. 2443 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2444 if (!NeedsScalarIV) { 2445 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2446 State); 2447 return; 2448 } 2449 2450 // Try to create a new independent vector induction variable. If we can't 2451 // create the phi node, we will splat the scalar induction variable in each 2452 // loop iteration. 2453 if (!shouldScalarizeInstruction(EntryVal)) { 2454 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2455 State); 2456 Value *ScalarIV = CreateScalarIV(Step); 2457 // Create scalar steps that can be used by instructions we will later 2458 // scalarize. Note that the addition of the scalar steps will not increase 2459 // the number of instructions in the loop in the common case prior to 2460 // InstCombine. We will be trading one vector extract for each scalar step. 2461 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2462 return; 2463 } 2464 2465 // All IV users are scalar instructions, so only emit a scalar IV, not a 2466 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2467 // predicate used by the masked loads/stores. 2468 Value *ScalarIV = CreateScalarIV(Step); 2469 if (!Cost->isScalarEpilogueAllowed()) 2470 CreateSplatIV(ScalarIV, Step); 2471 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2472 } 2473 2474 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2475 Instruction::BinaryOps BinOp) { 2476 // Create and check the types. 2477 auto *ValVTy = cast<VectorType>(Val->getType()); 2478 ElementCount VLen = ValVTy->getElementCount(); 2479 2480 Type *STy = Val->getType()->getScalarType(); 2481 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2482 "Induction Step must be an integer or FP"); 2483 assert(Step->getType() == STy && "Step has wrong type"); 2484 2485 SmallVector<Constant *, 8> Indices; 2486 2487 // Create a vector of consecutive numbers from zero to VF. 2488 VectorType *InitVecValVTy = ValVTy; 2489 Type *InitVecValSTy = STy; 2490 if (STy->isFloatingPointTy()) { 2491 InitVecValSTy = 2492 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2493 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2494 } 2495 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2496 2497 // Add on StartIdx 2498 Value *StartIdxSplat = Builder.CreateVectorSplat( 2499 VLen, ConstantInt::get(InitVecValSTy, StartIdx)); 2500 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2501 2502 if (STy->isIntegerTy()) { 2503 Step = Builder.CreateVectorSplat(VLen, Step); 2504 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2505 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2506 // which can be found from the original scalar operations. 2507 Step = Builder.CreateMul(InitVec, Step); 2508 return Builder.CreateAdd(Val, Step, "induction"); 2509 } 2510 2511 // Floating point induction. 2512 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2513 "Binary Opcode should be specified for FP induction"); 2514 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2515 Step = Builder.CreateVectorSplat(VLen, Step); 2516 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2517 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2518 } 2519 2520 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2521 Instruction *EntryVal, 2522 const InductionDescriptor &ID, 2523 VPValue *Def, VPValue *CastDef, 2524 VPTransformState &State) { 2525 // We shouldn't have to build scalar steps if we aren't vectorizing. 2526 assert(VF.isVector() && "VF should be greater than one"); 2527 // Get the value type and ensure it and the step have the same integer type. 2528 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2529 assert(ScalarIVTy == Step->getType() && 2530 "Val and Step should have the same type"); 2531 2532 // We build scalar steps for both integer and floating-point induction 2533 // variables. Here, we determine the kind of arithmetic we will perform. 2534 Instruction::BinaryOps AddOp; 2535 Instruction::BinaryOps MulOp; 2536 if (ScalarIVTy->isIntegerTy()) { 2537 AddOp = Instruction::Add; 2538 MulOp = Instruction::Mul; 2539 } else { 2540 AddOp = ID.getInductionOpcode(); 2541 MulOp = Instruction::FMul; 2542 } 2543 2544 // Determine the number of scalars we need to generate for each unroll 2545 // iteration. If EntryVal is uniform, we only need to generate the first 2546 // lane. Otherwise, we generate all VF values. 2547 bool IsUniform = 2548 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF); 2549 unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue(); 2550 // Compute the scalar steps and save the results in State. 2551 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2552 ScalarIVTy->getScalarSizeInBits()); 2553 Type *VecIVTy = nullptr; 2554 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2555 if (!IsUniform && VF.isScalable()) { 2556 VecIVTy = VectorType::get(ScalarIVTy, VF); 2557 UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF)); 2558 SplatStep = Builder.CreateVectorSplat(VF, Step); 2559 SplatIV = Builder.CreateVectorSplat(VF, ScalarIV); 2560 } 2561 2562 for (unsigned Part = 0; Part < UF; ++Part) { 2563 Value *StartIdx0 = 2564 createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF); 2565 2566 if (!IsUniform && VF.isScalable()) { 2567 auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0); 2568 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2569 if (ScalarIVTy->isFloatingPointTy()) 2570 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2571 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2572 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2573 State.set(Def, Add, Part); 2574 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2575 Part); 2576 // It's useful to record the lane values too for the known minimum number 2577 // of elements so we do those below. This improves the code quality when 2578 // trying to extract the first element, for example. 2579 } 2580 2581 if (ScalarIVTy->isFloatingPointTy()) 2582 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2583 2584 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2585 Value *StartIdx = Builder.CreateBinOp( 2586 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2587 // The step returned by `createStepForVF` is a runtime-evaluated value 2588 // when VF is scalable. Otherwise, it should be folded into a Constant. 2589 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2590 "Expected StartIdx to be folded to a constant when VF is not " 2591 "scalable"); 2592 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2593 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2594 State.set(Def, Add, VPIteration(Part, Lane)); 2595 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2596 Part, Lane); 2597 } 2598 } 2599 } 2600 2601 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2602 const VPIteration &Instance, 2603 VPTransformState &State) { 2604 Value *ScalarInst = State.get(Def, Instance); 2605 Value *VectorValue = State.get(Def, Instance.Part); 2606 VectorValue = Builder.CreateInsertElement( 2607 VectorValue, ScalarInst, 2608 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2609 State.set(Def, VectorValue, Instance.Part); 2610 } 2611 2612 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2613 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2614 return Builder.CreateVectorReverse(Vec, "reverse"); 2615 } 2616 2617 // Return whether we allow using masked interleave-groups (for dealing with 2618 // strided loads/stores that reside in predicated blocks, or for dealing 2619 // with gaps). 2620 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2621 // If an override option has been passed in for interleaved accesses, use it. 2622 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2623 return EnableMaskedInterleavedMemAccesses; 2624 2625 return TTI.enableMaskedInterleavedAccessVectorization(); 2626 } 2627 2628 // Try to vectorize the interleave group that \p Instr belongs to. 2629 // 2630 // E.g. Translate following interleaved load group (factor = 3): 2631 // for (i = 0; i < N; i+=3) { 2632 // R = Pic[i]; // Member of index 0 2633 // G = Pic[i+1]; // Member of index 1 2634 // B = Pic[i+2]; // Member of index 2 2635 // ... // do something to R, G, B 2636 // } 2637 // To: 2638 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2639 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2640 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2641 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2642 // 2643 // Or translate following interleaved store group (factor = 3): 2644 // for (i = 0; i < N; i+=3) { 2645 // ... do something to R, G, B 2646 // Pic[i] = R; // Member of index 0 2647 // Pic[i+1] = G; // Member of index 1 2648 // Pic[i+2] = B; // Member of index 2 2649 // } 2650 // To: 2651 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2652 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2653 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2654 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2655 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2656 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2657 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2658 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2659 VPValue *BlockInMask) { 2660 Instruction *Instr = Group->getInsertPos(); 2661 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2662 2663 // Prepare for the vector type of the interleaved load/store. 2664 Type *ScalarTy = getMemInstValueType(Instr); 2665 unsigned InterleaveFactor = Group->getFactor(); 2666 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2667 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2668 2669 // Prepare for the new pointers. 2670 SmallVector<Value *, 2> AddrParts; 2671 unsigned Index = Group->getIndex(Instr); 2672 2673 // TODO: extend the masked interleaved-group support to reversed access. 2674 assert((!BlockInMask || !Group->isReverse()) && 2675 "Reversed masked interleave-group not supported."); 2676 2677 // If the group is reverse, adjust the index to refer to the last vector lane 2678 // instead of the first. We adjust the index from the first vector lane, 2679 // rather than directly getting the pointer for lane VF - 1, because the 2680 // pointer operand of the interleaved access is supposed to be uniform. For 2681 // uniform instructions, we're only required to generate a value for the 2682 // first vector lane in each unroll iteration. 2683 if (Group->isReverse()) 2684 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2685 2686 for (unsigned Part = 0; Part < UF; Part++) { 2687 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2688 setDebugLocFromInst(Builder, AddrPart); 2689 2690 // Notice current instruction could be any index. Need to adjust the address 2691 // to the member of index 0. 2692 // 2693 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2694 // b = A[i]; // Member of index 0 2695 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2696 // 2697 // E.g. A[i+1] = a; // Member of index 1 2698 // A[i] = b; // Member of index 0 2699 // A[i+2] = c; // Member of index 2 (Current instruction) 2700 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2701 2702 bool InBounds = false; 2703 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2704 InBounds = gep->isInBounds(); 2705 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2706 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2707 2708 // Cast to the vector pointer type. 2709 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2710 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2711 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2712 } 2713 2714 setDebugLocFromInst(Builder, Instr); 2715 Value *PoisonVec = PoisonValue::get(VecTy); 2716 2717 Value *MaskForGaps = nullptr; 2718 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2719 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2720 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2721 } 2722 2723 // Vectorize the interleaved load group. 2724 if (isa<LoadInst>(Instr)) { 2725 // For each unroll part, create a wide load for the group. 2726 SmallVector<Value *, 2> NewLoads; 2727 for (unsigned Part = 0; Part < UF; Part++) { 2728 Instruction *NewLoad; 2729 if (BlockInMask || MaskForGaps) { 2730 assert(useMaskedInterleavedAccesses(*TTI) && 2731 "masked interleaved groups are not allowed."); 2732 Value *GroupMask = MaskForGaps; 2733 if (BlockInMask) { 2734 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2735 Value *ShuffledMask = Builder.CreateShuffleVector( 2736 BlockInMaskPart, 2737 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2738 "interleaved.mask"); 2739 GroupMask = MaskForGaps 2740 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2741 MaskForGaps) 2742 : ShuffledMask; 2743 } 2744 NewLoad = 2745 Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(), 2746 GroupMask, PoisonVec, "wide.masked.vec"); 2747 } 2748 else 2749 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2750 Group->getAlign(), "wide.vec"); 2751 Group->addMetadata(NewLoad); 2752 NewLoads.push_back(NewLoad); 2753 } 2754 2755 // For each member in the group, shuffle out the appropriate data from the 2756 // wide loads. 2757 unsigned J = 0; 2758 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2759 Instruction *Member = Group->getMember(I); 2760 2761 // Skip the gaps in the group. 2762 if (!Member) 2763 continue; 2764 2765 auto StrideMask = 2766 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2767 for (unsigned Part = 0; Part < UF; Part++) { 2768 Value *StridedVec = Builder.CreateShuffleVector( 2769 NewLoads[Part], StrideMask, "strided.vec"); 2770 2771 // If this member has different type, cast the result type. 2772 if (Member->getType() != ScalarTy) { 2773 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2774 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2775 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2776 } 2777 2778 if (Group->isReverse()) 2779 StridedVec = reverseVector(StridedVec); 2780 2781 State.set(VPDefs[J], StridedVec, Part); 2782 } 2783 ++J; 2784 } 2785 return; 2786 } 2787 2788 // The sub vector type for current instruction. 2789 auto *SubVT = VectorType::get(ScalarTy, VF); 2790 2791 // Vectorize the interleaved store group. 2792 for (unsigned Part = 0; Part < UF; Part++) { 2793 // Collect the stored vector from each member. 2794 SmallVector<Value *, 4> StoredVecs; 2795 for (unsigned i = 0; i < InterleaveFactor; i++) { 2796 // Interleaved store group doesn't allow a gap, so each index has a member 2797 assert(Group->getMember(i) && "Fail to get a member from an interleaved store group"); 2798 2799 Value *StoredVec = State.get(StoredValues[i], Part); 2800 2801 if (Group->isReverse()) 2802 StoredVec = reverseVector(StoredVec); 2803 2804 // If this member has different type, cast it to a unified type. 2805 2806 if (StoredVec->getType() != SubVT) 2807 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2808 2809 StoredVecs.push_back(StoredVec); 2810 } 2811 2812 // Concatenate all vectors into a wide vector. 2813 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2814 2815 // Interleave the elements in the wide vector. 2816 Value *IVec = Builder.CreateShuffleVector( 2817 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2818 "interleaved.vec"); 2819 2820 Instruction *NewStoreInstr; 2821 if (BlockInMask) { 2822 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2823 Value *ShuffledMask = Builder.CreateShuffleVector( 2824 BlockInMaskPart, 2825 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2826 "interleaved.mask"); 2827 NewStoreInstr = Builder.CreateMaskedStore( 2828 IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); 2829 } 2830 else 2831 NewStoreInstr = 2832 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2833 2834 Group->addMetadata(NewStoreInstr); 2835 } 2836 } 2837 2838 void InnerLoopVectorizer::vectorizeMemoryInstruction( 2839 Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, 2840 VPValue *StoredValue, VPValue *BlockInMask) { 2841 // Attempt to issue a wide load. 2842 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2843 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2844 2845 assert((LI || SI) && "Invalid Load/Store instruction"); 2846 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2847 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2848 2849 LoopVectorizationCostModel::InstWidening Decision = 2850 Cost->getWideningDecision(Instr, VF); 2851 assert((Decision == LoopVectorizationCostModel::CM_Widen || 2852 Decision == LoopVectorizationCostModel::CM_Widen_Reverse || 2853 Decision == LoopVectorizationCostModel::CM_GatherScatter) && 2854 "CM decision is not to widen the memory instruction"); 2855 2856 Type *ScalarDataTy = getMemInstValueType(Instr); 2857 2858 auto *DataTy = VectorType::get(ScalarDataTy, VF); 2859 const Align Alignment = getLoadStoreAlignment(Instr); 2860 2861 // Determine if the pointer operand of the access is either consecutive or 2862 // reverse consecutive. 2863 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2864 bool ConsecutiveStride = 2865 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2866 bool CreateGatherScatter = 2867 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2868 2869 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2870 // gather/scatter. Otherwise Decision should have been to Scalarize. 2871 assert((ConsecutiveStride || CreateGatherScatter) && 2872 "The instruction should be scalarized"); 2873 (void)ConsecutiveStride; 2874 2875 VectorParts BlockInMaskParts(UF); 2876 bool isMaskRequired = BlockInMask; 2877 if (isMaskRequired) 2878 for (unsigned Part = 0; Part < UF; ++Part) 2879 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2880 2881 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2882 // Calculate the pointer for the specific unroll-part. 2883 GetElementPtrInst *PartPtr = nullptr; 2884 2885 bool InBounds = false; 2886 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2887 InBounds = gep->isInBounds(); 2888 if (Reverse) { 2889 // If the address is consecutive but reversed, then the 2890 // wide store needs to start at the last vector element. 2891 // RunTimeVF = VScale * VF.getKnownMinValue() 2892 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 2893 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF); 2894 // NumElt = -Part * RunTimeVF 2895 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 2896 // LastLane = 1 - RunTimeVF 2897 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 2898 PartPtr = 2899 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 2900 PartPtr->setIsInBounds(InBounds); 2901 PartPtr = cast<GetElementPtrInst>( 2902 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 2903 PartPtr->setIsInBounds(InBounds); 2904 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2905 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2906 } else { 2907 Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF); 2908 PartPtr = cast<GetElementPtrInst>( 2909 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 2910 PartPtr->setIsInBounds(InBounds); 2911 } 2912 2913 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2914 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2915 }; 2916 2917 // Handle Stores: 2918 if (SI) { 2919 setDebugLocFromInst(Builder, SI); 2920 2921 for (unsigned Part = 0; Part < UF; ++Part) { 2922 Instruction *NewSI = nullptr; 2923 Value *StoredVal = State.get(StoredValue, Part); 2924 if (CreateGatherScatter) { 2925 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2926 Value *VectorGep = State.get(Addr, Part); 2927 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2928 MaskPart); 2929 } else { 2930 if (Reverse) { 2931 // If we store to reverse consecutive memory locations, then we need 2932 // to reverse the order of elements in the stored value. 2933 StoredVal = reverseVector(StoredVal); 2934 // We don't want to update the value in the map as it might be used in 2935 // another expression. So don't call resetVectorValue(StoredVal). 2936 } 2937 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 2938 if (isMaskRequired) 2939 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2940 BlockInMaskParts[Part]); 2941 else 2942 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2943 } 2944 addMetadata(NewSI, SI); 2945 } 2946 return; 2947 } 2948 2949 // Handle loads. 2950 assert(LI && "Must have a load instruction"); 2951 setDebugLocFromInst(Builder, LI); 2952 for (unsigned Part = 0; Part < UF; ++Part) { 2953 Value *NewLI; 2954 if (CreateGatherScatter) { 2955 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2956 Value *VectorGep = State.get(Addr, Part); 2957 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2958 nullptr, "wide.masked.gather"); 2959 addMetadata(NewLI, LI); 2960 } else { 2961 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 2962 if (isMaskRequired) 2963 NewLI = Builder.CreateMaskedLoad( 2964 VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy), 2965 "wide.masked.load"); 2966 else 2967 NewLI = 2968 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2969 2970 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2971 addMetadata(NewLI, LI); 2972 if (Reverse) 2973 NewLI = reverseVector(NewLI); 2974 } 2975 2976 State.set(Def, NewLI, Part); 2977 } 2978 } 2979 2980 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def, 2981 VPUser &User, 2982 const VPIteration &Instance, 2983 bool IfPredicateInstr, 2984 VPTransformState &State) { 2985 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2986 2987 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2988 // the first lane and part. 2989 if (isa<NoAliasScopeDeclInst>(Instr)) 2990 if (!Instance.isFirstIteration()) 2991 return; 2992 2993 setDebugLocFromInst(Builder, Instr); 2994 2995 // Does this instruction return a value ? 2996 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2997 2998 Instruction *Cloned = Instr->clone(); 2999 if (!IsVoidRetTy) 3000 Cloned->setName(Instr->getName() + ".cloned"); 3001 3002 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 3003 Builder.GetInsertPoint()); 3004 // Replace the operands of the cloned instructions with their scalar 3005 // equivalents in the new loop. 3006 for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { 3007 auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); 3008 auto InputInstance = Instance; 3009 if (!Operand || !OrigLoop->contains(Operand) || 3010 (Cost->isUniformAfterVectorization(Operand, State.VF))) 3011 InputInstance.Lane = VPLane::getFirstLane(); 3012 auto *NewOp = State.get(User.getOperand(op), InputInstance); 3013 Cloned->setOperand(op, NewOp); 3014 } 3015 addNewMetadata(Cloned, Instr); 3016 3017 // Place the cloned scalar in the new loop. 3018 Builder.Insert(Cloned); 3019 3020 State.set(Def, Cloned, Instance); 3021 3022 // If we just cloned a new assumption, add it the assumption cache. 3023 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 3024 AC->registerAssumption(II); 3025 3026 // End if-block. 3027 if (IfPredicateInstr) 3028 PredicatedInstructions.push_back(Cloned); 3029 } 3030 3031 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3032 Value *End, Value *Step, 3033 Instruction *DL) { 3034 BasicBlock *Header = L->getHeader(); 3035 BasicBlock *Latch = L->getLoopLatch(); 3036 // As we're just creating this loop, it's possible no latch exists 3037 // yet. If so, use the header as this will be a single block loop. 3038 if (!Latch) 3039 Latch = Header; 3040 3041 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 3042 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3043 setDebugLocFromInst(Builder, OldInst); 3044 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 3045 3046 Builder.SetInsertPoint(Latch->getTerminator()); 3047 setDebugLocFromInst(Builder, OldInst); 3048 3049 // Create i+1 and fill the PHINode. 3050 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 3051 Induction->addIncoming(Start, L->getLoopPreheader()); 3052 Induction->addIncoming(Next, Latch); 3053 // Create the compare. 3054 Value *ICmp = Builder.CreateICmpEQ(Next, End); 3055 Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 3056 3057 // Now we have two terminators. Remove the old one from the block. 3058 Latch->getTerminator()->eraseFromParent(); 3059 3060 return Induction; 3061 } 3062 3063 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3064 if (TripCount) 3065 return TripCount; 3066 3067 assert(L && "Create Trip Count for null loop."); 3068 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3069 // Find the loop boundaries. 3070 ScalarEvolution *SE = PSE.getSE(); 3071 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3072 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 3073 "Invalid loop count"); 3074 3075 Type *IdxTy = Legal->getWidestInductionType(); 3076 assert(IdxTy && "No type for induction"); 3077 3078 // The exit count might have the type of i64 while the phi is i32. This can 3079 // happen if we have an induction variable that is sign extended before the 3080 // compare. The only way that we get a backedge taken count is that the 3081 // induction variable was signed and as such will not overflow. In such a case 3082 // truncation is legal. 3083 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3084 IdxTy->getPrimitiveSizeInBits()) 3085 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3086 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3087 3088 // Get the total trip count from the count by adding 1. 3089 const SCEV *ExitCount = SE->getAddExpr( 3090 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3091 3092 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3093 3094 // Expand the trip count and place the new instructions in the preheader. 3095 // Notice that the pre-header does not change, only the loop body. 3096 SCEVExpander Exp(*SE, DL, "induction"); 3097 3098 // Count holds the overall loop count (N). 3099 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3100 L->getLoopPreheader()->getTerminator()); 3101 3102 if (TripCount->getType()->isPointerTy()) 3103 TripCount = 3104 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3105 L->getLoopPreheader()->getTerminator()); 3106 3107 return TripCount; 3108 } 3109 3110 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3111 if (VectorTripCount) 3112 return VectorTripCount; 3113 3114 Value *TC = getOrCreateTripCount(L); 3115 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3116 3117 Type *Ty = TC->getType(); 3118 // This is where we can make the step a runtime constant. 3119 Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF); 3120 3121 // If the tail is to be folded by masking, round the number of iterations N 3122 // up to a multiple of Step instead of rounding down. This is done by first 3123 // adding Step-1 and then rounding down. Note that it's ok if this addition 3124 // overflows: the vector induction variable will eventually wrap to zero given 3125 // that it starts at zero and its Step is a power of two; the loop will then 3126 // exit, with the last early-exit vector comparison also producing all-true. 3127 if (Cost->foldTailByMasking()) { 3128 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3129 "VF*UF must be a power of 2 when folding tail by masking"); 3130 assert(!VF.isScalable() && 3131 "Tail folding not yet supported for scalable vectors"); 3132 TC = Builder.CreateAdd( 3133 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 3134 } 3135 3136 // Now we need to generate the expression for the part of the loop that the 3137 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3138 // iterations are not required for correctness, or N - Step, otherwise. Step 3139 // is equal to the vectorization factor (number of SIMD elements) times the 3140 // unroll factor (number of SIMD instructions). 3141 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3142 3143 // There are two cases where we need to ensure (at least) the last iteration 3144 // runs in the scalar remainder loop. Thus, if the step evenly divides 3145 // the trip count, we set the remainder to be equal to the step. If the step 3146 // does not evenly divide the trip count, no adjustment is necessary since 3147 // there will already be scalar iterations. Note that the minimum iterations 3148 // check ensures that N >= Step. The cases are: 3149 // 1) If there is a non-reversed interleaved group that may speculatively 3150 // access memory out-of-bounds. 3151 // 2) If any instruction may follow a conditionally taken exit. That is, if 3152 // the loop contains multiple exiting blocks, or a single exiting block 3153 // which is not the latch. 3154 if (VF.isVector() && Cost->requiresScalarEpilogue()) { 3155 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3156 R = Builder.CreateSelect(IsZero, Step, R); 3157 } 3158 3159 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3160 3161 return VectorTripCount; 3162 } 3163 3164 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3165 const DataLayout &DL) { 3166 // Verify that V is a vector type with same number of elements as DstVTy. 3167 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3168 unsigned VF = DstFVTy->getNumElements(); 3169 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3170 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3171 Type *SrcElemTy = SrcVecTy->getElementType(); 3172 Type *DstElemTy = DstFVTy->getElementType(); 3173 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3174 "Vector elements must have same size"); 3175 3176 // Do a direct cast if element types are castable. 3177 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3178 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3179 } 3180 // V cannot be directly casted to desired vector type. 3181 // May happen when V is a floating point vector but DstVTy is a vector of 3182 // pointers or vice-versa. Handle this using a two-step bitcast using an 3183 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3184 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3185 "Only one type should be a pointer type"); 3186 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3187 "Only one type should be a floating point type"); 3188 Type *IntTy = 3189 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3190 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3191 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3192 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3193 } 3194 3195 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3196 BasicBlock *Bypass) { 3197 Value *Count = getOrCreateTripCount(L); 3198 // Reuse existing vector loop preheader for TC checks. 3199 // Note that new preheader block is generated for vector loop. 3200 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3201 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3202 3203 // Generate code to check if the loop's trip count is less than VF * UF, or 3204 // equal to it in case a scalar epilogue is required; this implies that the 3205 // vector trip count is zero. This check also covers the case where adding one 3206 // to the backedge-taken count overflowed leading to an incorrect trip count 3207 // of zero. In this case we will also jump to the scalar loop. 3208 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 3209 : ICmpInst::ICMP_ULT; 3210 3211 // If tail is to be folded, vector loop takes care of all iterations. 3212 Value *CheckMinIters = Builder.getFalse(); 3213 if (!Cost->foldTailByMasking()) { 3214 Value *Step = 3215 createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF); 3216 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3217 } 3218 // Create new preheader for vector loop. 3219 LoopVectorPreHeader = 3220 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3221 "vector.ph"); 3222 3223 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3224 DT->getNode(Bypass)->getIDom()) && 3225 "TC check is expected to dominate Bypass"); 3226 3227 // Update dominator for Bypass & LoopExit. 3228 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3229 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3230 3231 ReplaceInstWithInst( 3232 TCCheckBlock->getTerminator(), 3233 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3234 LoopBypassBlocks.push_back(TCCheckBlock); 3235 } 3236 3237 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3238 3239 BasicBlock *const SCEVCheckBlock = 3240 RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); 3241 if (!SCEVCheckBlock) 3242 return nullptr; 3243 3244 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3245 (OptForSizeBasedOnProfile && 3246 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3247 "Cannot SCEV check stride or overflow when optimizing for size"); 3248 3249 3250 // Update dominator only if this is first RT check. 3251 if (LoopBypassBlocks.empty()) { 3252 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3253 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3254 } 3255 3256 LoopBypassBlocks.push_back(SCEVCheckBlock); 3257 AddedSafetyChecks = true; 3258 return SCEVCheckBlock; 3259 } 3260 3261 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 3262 BasicBlock *Bypass) { 3263 // VPlan-native path does not do any analysis for runtime checks currently. 3264 if (EnableVPlanNativePath) 3265 return nullptr; 3266 3267 BasicBlock *const MemCheckBlock = 3268 RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); 3269 3270 // Check if we generated code that checks in runtime if arrays overlap. We put 3271 // the checks into a separate block to make the more common case of few 3272 // elements faster. 3273 if (!MemCheckBlock) 3274 return nullptr; 3275 3276 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3277 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3278 "Cannot emit memory checks when optimizing for size, unless forced " 3279 "to vectorize."); 3280 ORE->emit([&]() { 3281 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3282 L->getStartLoc(), L->getHeader()) 3283 << "Code-size may be reduced by not forcing " 3284 "vectorization, or by source-code modifications " 3285 "eliminating the need for runtime checks " 3286 "(e.g., adding 'restrict')."; 3287 }); 3288 } 3289 3290 LoopBypassBlocks.push_back(MemCheckBlock); 3291 3292 AddedSafetyChecks = true; 3293 3294 // We currently don't use LoopVersioning for the actual loop cloning but we 3295 // still use it to add the noalias metadata. 3296 LVer = std::make_unique<LoopVersioning>( 3297 *Legal->getLAI(), 3298 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3299 DT, PSE.getSE()); 3300 LVer->prepareNoAliasMetadata(); 3301 return MemCheckBlock; 3302 } 3303 3304 Value *InnerLoopVectorizer::emitTransformedIndex( 3305 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3306 const InductionDescriptor &ID) const { 3307 3308 SCEVExpander Exp(*SE, DL, "induction"); 3309 auto Step = ID.getStep(); 3310 auto StartValue = ID.getStartValue(); 3311 assert(Index->getType() == Step->getType() && 3312 "Index type does not match StepValue type"); 3313 3314 // Note: the IR at this point is broken. We cannot use SE to create any new 3315 // SCEV and then expand it, hoping that SCEV's simplification will give us 3316 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3317 // lead to various SCEV crashes. So all we can do is to use builder and rely 3318 // on InstCombine for future simplifications. Here we handle some trivial 3319 // cases only. 3320 auto CreateAdd = [&B](Value *X, Value *Y) { 3321 assert(X->getType() == Y->getType() && "Types don't match!"); 3322 if (auto *CX = dyn_cast<ConstantInt>(X)) 3323 if (CX->isZero()) 3324 return Y; 3325 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3326 if (CY->isZero()) 3327 return X; 3328 return B.CreateAdd(X, Y); 3329 }; 3330 3331 auto CreateMul = [&B](Value *X, Value *Y) { 3332 assert(X->getType() == Y->getType() && "Types don't match!"); 3333 if (auto *CX = dyn_cast<ConstantInt>(X)) 3334 if (CX->isOne()) 3335 return Y; 3336 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3337 if (CY->isOne()) 3338 return X; 3339 return B.CreateMul(X, Y); 3340 }; 3341 3342 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3343 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3344 // the DomTree is not kept up-to-date for additional blocks generated in the 3345 // vector loop. By using the header as insertion point, we guarantee that the 3346 // expanded instructions dominate all their uses. 3347 auto GetInsertPoint = [this, &B]() { 3348 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3349 if (InsertBB != LoopVectorBody && 3350 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3351 return LoopVectorBody->getTerminator(); 3352 return &*B.GetInsertPoint(); 3353 }; 3354 3355 switch (ID.getKind()) { 3356 case InductionDescriptor::IK_IntInduction: { 3357 assert(Index->getType() == StartValue->getType() && 3358 "Index type does not match StartValue type"); 3359 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3360 return B.CreateSub(StartValue, Index); 3361 auto *Offset = CreateMul( 3362 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3363 return CreateAdd(StartValue, Offset); 3364 } 3365 case InductionDescriptor::IK_PtrInduction: { 3366 assert(isa<SCEVConstant>(Step) && 3367 "Expected constant step for pointer induction"); 3368 return B.CreateGEP( 3369 StartValue->getType()->getPointerElementType(), StartValue, 3370 CreateMul(Index, 3371 Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()))); 3372 } 3373 case InductionDescriptor::IK_FpInduction: { 3374 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3375 auto InductionBinOp = ID.getInductionBinOp(); 3376 assert(InductionBinOp && 3377 (InductionBinOp->getOpcode() == Instruction::FAdd || 3378 InductionBinOp->getOpcode() == Instruction::FSub) && 3379 "Original bin op should be defined for FP induction"); 3380 3381 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3382 Value *MulExp = B.CreateFMul(StepValue, Index); 3383 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3384 "induction"); 3385 } 3386 case InductionDescriptor::IK_NoInduction: 3387 return nullptr; 3388 } 3389 llvm_unreachable("invalid enum"); 3390 } 3391 3392 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3393 LoopScalarBody = OrigLoop->getHeader(); 3394 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3395 LoopExitBlock = OrigLoop->getUniqueExitBlock(); 3396 assert(LoopExitBlock && "Must have an exit block"); 3397 assert(LoopVectorPreHeader && "Invalid loop structure"); 3398 3399 LoopMiddleBlock = 3400 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3401 LI, nullptr, Twine(Prefix) + "middle.block"); 3402 LoopScalarPreHeader = 3403 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3404 nullptr, Twine(Prefix) + "scalar.ph"); 3405 3406 // Set up branch from middle block to the exit and scalar preheader blocks. 3407 // completeLoopSkeleton will update the condition to use an iteration check, 3408 // if required to decide whether to execute the remainder. 3409 BranchInst *BrInst = 3410 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue()); 3411 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3412 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3413 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3414 3415 // We intentionally don't let SplitBlock to update LoopInfo since 3416 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3417 // LoopVectorBody is explicitly added to the correct place few lines later. 3418 LoopVectorBody = 3419 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3420 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3421 3422 // Update dominator for loop exit. 3423 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3424 3425 // Create and register the new vector loop. 3426 Loop *Lp = LI->AllocateLoop(); 3427 Loop *ParentLoop = OrigLoop->getParentLoop(); 3428 3429 // Insert the new loop into the loop nest and register the new basic blocks 3430 // before calling any utilities such as SCEV that require valid LoopInfo. 3431 if (ParentLoop) { 3432 ParentLoop->addChildLoop(Lp); 3433 } else { 3434 LI->addTopLevelLoop(Lp); 3435 } 3436 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3437 return Lp; 3438 } 3439 3440 void InnerLoopVectorizer::createInductionResumeValues( 3441 Loop *L, Value *VectorTripCount, 3442 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3443 assert(VectorTripCount && L && "Expected valid arguments"); 3444 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3445 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3446 "Inconsistent information about additional bypass."); 3447 // We are going to resume the execution of the scalar loop. 3448 // Go over all of the induction variables that we found and fix the 3449 // PHIs that are left in the scalar version of the loop. 3450 // The starting values of PHI nodes depend on the counter of the last 3451 // iteration in the vectorized loop. 3452 // If we come from a bypass edge then we need to start from the original 3453 // start value. 3454 for (auto &InductionEntry : Legal->getInductionVars()) { 3455 PHINode *OrigPhi = InductionEntry.first; 3456 InductionDescriptor II = InductionEntry.second; 3457 3458 // Create phi nodes to merge from the backedge-taken check block. 3459 PHINode *BCResumeVal = 3460 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3461 LoopScalarPreHeader->getTerminator()); 3462 // Copy original phi DL over to the new one. 3463 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3464 Value *&EndValue = IVEndValues[OrigPhi]; 3465 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3466 if (OrigPhi == OldInduction) { 3467 // We know what the end value is. 3468 EndValue = VectorTripCount; 3469 } else { 3470 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3471 3472 // Fast-math-flags propagate from the original induction instruction. 3473 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3474 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3475 3476 Type *StepType = II.getStep()->getType(); 3477 Instruction::CastOps CastOp = 3478 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3479 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3480 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3481 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3482 EndValue->setName("ind.end"); 3483 3484 // Compute the end value for the additional bypass (if applicable). 3485 if (AdditionalBypass.first) { 3486 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3487 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3488 StepType, true); 3489 CRD = 3490 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3491 EndValueFromAdditionalBypass = 3492 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3493 EndValueFromAdditionalBypass->setName("ind.end"); 3494 } 3495 } 3496 // The new PHI merges the original incoming value, in case of a bypass, 3497 // or the value at the end of the vectorized loop. 3498 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3499 3500 // Fix the scalar body counter (PHI node). 3501 // The old induction's phi node in the scalar body needs the truncated 3502 // value. 3503 for (BasicBlock *BB : LoopBypassBlocks) 3504 BCResumeVal->addIncoming(II.getStartValue(), BB); 3505 3506 if (AdditionalBypass.first) 3507 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3508 EndValueFromAdditionalBypass); 3509 3510 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3511 } 3512 } 3513 3514 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3515 MDNode *OrigLoopID) { 3516 assert(L && "Expected valid loop."); 3517 3518 // The trip counts should be cached by now. 3519 Value *Count = getOrCreateTripCount(L); 3520 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3521 3522 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3523 3524 // Add a check in the middle block to see if we have completed 3525 // all of the iterations in the first vector loop. 3526 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3527 // If tail is to be folded, we know we don't need to run the remainder. 3528 if (!Cost->foldTailByMasking()) { 3529 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3530 Count, VectorTripCount, "cmp.n", 3531 LoopMiddleBlock->getTerminator()); 3532 3533 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3534 // of the corresponding compare because they may have ended up with 3535 // different line numbers and we want to avoid awkward line stepping while 3536 // debugging. Eg. if the compare has got a line number inside the loop. 3537 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3538 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3539 } 3540 3541 // Get ready to start creating new instructions into the vectorized body. 3542 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3543 "Inconsistent vector loop preheader"); 3544 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3545 3546 Optional<MDNode *> VectorizedLoopID = 3547 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3548 LLVMLoopVectorizeFollowupVectorized}); 3549 if (VectorizedLoopID.hasValue()) { 3550 L->setLoopID(VectorizedLoopID.getValue()); 3551 3552 // Do not setAlreadyVectorized if loop attributes have been defined 3553 // explicitly. 3554 return LoopVectorPreHeader; 3555 } 3556 3557 // Keep all loop hints from the original loop on the vector loop (we'll 3558 // replace the vectorizer-specific hints below). 3559 if (MDNode *LID = OrigLoop->getLoopID()) 3560 L->setLoopID(LID); 3561 3562 LoopVectorizeHints Hints(L, true, *ORE); 3563 Hints.setAlreadyVectorized(); 3564 3565 #ifdef EXPENSIVE_CHECKS 3566 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3567 LI->verify(*DT); 3568 #endif 3569 3570 return LoopVectorPreHeader; 3571 } 3572 3573 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3574 /* 3575 In this function we generate a new loop. The new loop will contain 3576 the vectorized instructions while the old loop will continue to run the 3577 scalar remainder. 3578 3579 [ ] <-- loop iteration number check. 3580 / | 3581 / v 3582 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3583 | / | 3584 | / v 3585 || [ ] <-- vector pre header. 3586 |/ | 3587 | v 3588 | [ ] \ 3589 | [ ]_| <-- vector loop. 3590 | | 3591 | v 3592 | -[ ] <--- middle-block. 3593 | / | 3594 | / v 3595 -|- >[ ] <--- new preheader. 3596 | | 3597 | v 3598 | [ ] \ 3599 | [ ]_| <-- old scalar loop to handle remainder. 3600 \ | 3601 \ v 3602 >[ ] <-- exit block. 3603 ... 3604 */ 3605 3606 // Get the metadata of the original loop before it gets modified. 3607 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3608 3609 // Create an empty vector loop, and prepare basic blocks for the runtime 3610 // checks. 3611 Loop *Lp = createVectorLoopSkeleton(""); 3612 3613 // Now, compare the new count to zero. If it is zero skip the vector loop and 3614 // jump to the scalar loop. This check also covers the case where the 3615 // backedge-taken count is uint##_max: adding one to it will overflow leading 3616 // to an incorrect trip count of zero. In this (rare) case we will also jump 3617 // to the scalar loop. 3618 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3619 3620 // Generate the code to check any assumptions that we've made for SCEV 3621 // expressions. 3622 emitSCEVChecks(Lp, LoopScalarPreHeader); 3623 3624 // Generate the code that checks in runtime if arrays overlap. We put the 3625 // checks into a separate block to make the more common case of few elements 3626 // faster. 3627 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3628 3629 // Some loops have a single integer induction variable, while other loops 3630 // don't. One example is c++ iterators that often have multiple pointer 3631 // induction variables. In the code below we also support a case where we 3632 // don't have a single induction variable. 3633 // 3634 // We try to obtain an induction variable from the original loop as hard 3635 // as possible. However if we don't find one that: 3636 // - is an integer 3637 // - counts from zero, stepping by one 3638 // - is the size of the widest induction variable type 3639 // then we create a new one. 3640 OldInduction = Legal->getPrimaryInduction(); 3641 Type *IdxTy = Legal->getWidestInductionType(); 3642 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3643 // The loop step is equal to the vectorization factor (num of SIMD elements) 3644 // times the unroll factor (num of SIMD instructions). 3645 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3646 Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF); 3647 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3648 Induction = 3649 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3650 getDebugLocFromInstOrOperands(OldInduction)); 3651 3652 // Emit phis for the new starting index of the scalar loop. 3653 createInductionResumeValues(Lp, CountRoundDown); 3654 3655 return completeLoopSkeleton(Lp, OrigLoopID); 3656 } 3657 3658 // Fix up external users of the induction variable. At this point, we are 3659 // in LCSSA form, with all external PHIs that use the IV having one input value, 3660 // coming from the remainder loop. We need those PHIs to also have a correct 3661 // value for the IV when arriving directly from the middle block. 3662 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3663 const InductionDescriptor &II, 3664 Value *CountRoundDown, Value *EndValue, 3665 BasicBlock *MiddleBlock) { 3666 // There are two kinds of external IV usages - those that use the value 3667 // computed in the last iteration (the PHI) and those that use the penultimate 3668 // value (the value that feeds into the phi from the loop latch). 3669 // We allow both, but they, obviously, have different values. 3670 3671 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3672 3673 DenseMap<Value *, Value *> MissingVals; 3674 3675 // An external user of the last iteration's value should see the value that 3676 // the remainder loop uses to initialize its own IV. 3677 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3678 for (User *U : PostInc->users()) { 3679 Instruction *UI = cast<Instruction>(U); 3680 if (!OrigLoop->contains(UI)) { 3681 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3682 MissingVals[UI] = EndValue; 3683 } 3684 } 3685 3686 // An external user of the penultimate value need to see EndValue - Step. 3687 // The simplest way to get this is to recompute it from the constituent SCEVs, 3688 // that is Start + (Step * (CRD - 1)). 3689 for (User *U : OrigPhi->users()) { 3690 auto *UI = cast<Instruction>(U); 3691 if (!OrigLoop->contains(UI)) { 3692 const DataLayout &DL = 3693 OrigLoop->getHeader()->getModule()->getDataLayout(); 3694 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3695 3696 IRBuilder<> B(MiddleBlock->getTerminator()); 3697 3698 // Fast-math-flags propagate from the original induction instruction. 3699 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3700 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3701 3702 Value *CountMinusOne = B.CreateSub( 3703 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3704 Value *CMO = 3705 !II.getStep()->getType()->isIntegerTy() 3706 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3707 II.getStep()->getType()) 3708 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3709 CMO->setName("cast.cmo"); 3710 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3711 Escape->setName("ind.escape"); 3712 MissingVals[UI] = Escape; 3713 } 3714 } 3715 3716 for (auto &I : MissingVals) { 3717 PHINode *PHI = cast<PHINode>(I.first); 3718 // One corner case we have to handle is two IVs "chasing" each-other, 3719 // that is %IV2 = phi [...], [ %IV1, %latch ] 3720 // In this case, if IV1 has an external use, we need to avoid adding both 3721 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3722 // don't already have an incoming value for the middle block. 3723 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3724 PHI->addIncoming(I.second, MiddleBlock); 3725 } 3726 } 3727 3728 namespace { 3729 3730 struct CSEDenseMapInfo { 3731 static bool canHandle(const Instruction *I) { 3732 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3733 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3734 } 3735 3736 static inline Instruction *getEmptyKey() { 3737 return DenseMapInfo<Instruction *>::getEmptyKey(); 3738 } 3739 3740 static inline Instruction *getTombstoneKey() { 3741 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3742 } 3743 3744 static unsigned getHashValue(const Instruction *I) { 3745 assert(canHandle(I) && "Unknown instruction!"); 3746 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3747 I->value_op_end())); 3748 } 3749 3750 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3751 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3752 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3753 return LHS == RHS; 3754 return LHS->isIdenticalTo(RHS); 3755 } 3756 }; 3757 3758 } // end anonymous namespace 3759 3760 ///Perform cse of induction variable instructions. 3761 static void cse(BasicBlock *BB) { 3762 // Perform simple cse. 3763 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3764 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3765 Instruction *In = &*I++; 3766 3767 if (!CSEDenseMapInfo::canHandle(In)) 3768 continue; 3769 3770 // Check if we can replace this instruction with any of the 3771 // visited instructions. 3772 if (Instruction *V = CSEMap.lookup(In)) { 3773 In->replaceAllUsesWith(V); 3774 In->eraseFromParent(); 3775 continue; 3776 } 3777 3778 CSEMap[In] = In; 3779 } 3780 } 3781 3782 InstructionCost 3783 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3784 bool &NeedToScalarize) const { 3785 Function *F = CI->getCalledFunction(); 3786 Type *ScalarRetTy = CI->getType(); 3787 SmallVector<Type *, 4> Tys, ScalarTys; 3788 for (auto &ArgOp : CI->arg_operands()) 3789 ScalarTys.push_back(ArgOp->getType()); 3790 3791 // Estimate cost of scalarized vector call. The source operands are assumed 3792 // to be vectors, so we need to extract individual elements from there, 3793 // execute VF scalar calls, and then gather the result into the vector return 3794 // value. 3795 InstructionCost ScalarCallCost = 3796 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3797 if (VF.isScalar()) 3798 return ScalarCallCost; 3799 3800 // Compute corresponding vector type for return value and arguments. 3801 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3802 for (Type *ScalarTy : ScalarTys) 3803 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3804 3805 // Compute costs of unpacking argument values for the scalar calls and 3806 // packing the return values to a vector. 3807 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3808 3809 InstructionCost Cost = 3810 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3811 3812 // If we can't emit a vector call for this function, then the currently found 3813 // cost is the cost we need to return. 3814 NeedToScalarize = true; 3815 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3816 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3817 3818 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3819 return Cost; 3820 3821 // If the corresponding vector cost is cheaper, return its cost. 3822 InstructionCost VectorCallCost = 3823 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3824 if (VectorCallCost < Cost) { 3825 NeedToScalarize = false; 3826 Cost = VectorCallCost; 3827 } 3828 return Cost; 3829 } 3830 3831 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3832 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3833 return Elt; 3834 return VectorType::get(Elt, VF); 3835 } 3836 3837 InstructionCost 3838 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3839 ElementCount VF) const { 3840 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3841 assert(ID && "Expected intrinsic call!"); 3842 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3843 FastMathFlags FMF; 3844 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3845 FMF = FPMO->getFastMathFlags(); 3846 3847 SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end()); 3848 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3849 SmallVector<Type *> ParamTys; 3850 std::transform(FTy->param_begin(), FTy->param_end(), 3851 std::back_inserter(ParamTys), 3852 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3853 3854 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3855 dyn_cast<IntrinsicInst>(CI)); 3856 return TTI.getIntrinsicInstrCost(CostAttrs, 3857 TargetTransformInfo::TCK_RecipThroughput); 3858 } 3859 3860 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3861 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3862 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3863 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3864 } 3865 3866 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3867 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3868 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3869 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3870 } 3871 3872 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3873 // For every instruction `I` in MinBWs, truncate the operands, create a 3874 // truncated version of `I` and reextend its result. InstCombine runs 3875 // later and will remove any ext/trunc pairs. 3876 SmallPtrSet<Value *, 4> Erased; 3877 for (const auto &KV : Cost->getMinimalBitwidths()) { 3878 // If the value wasn't vectorized, we must maintain the original scalar 3879 // type. The absence of the value from State indicates that it 3880 // wasn't vectorized. 3881 VPValue *Def = State.Plan->getVPValue(KV.first); 3882 if (!State.hasAnyVectorValue(Def)) 3883 continue; 3884 for (unsigned Part = 0; Part < UF; ++Part) { 3885 Value *I = State.get(Def, Part); 3886 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3887 continue; 3888 Type *OriginalTy = I->getType(); 3889 Type *ScalarTruncatedTy = 3890 IntegerType::get(OriginalTy->getContext(), KV.second); 3891 auto *TruncatedTy = FixedVectorType::get( 3892 ScalarTruncatedTy, 3893 cast<FixedVectorType>(OriginalTy)->getNumElements()); 3894 if (TruncatedTy == OriginalTy) 3895 continue; 3896 3897 IRBuilder<> B(cast<Instruction>(I)); 3898 auto ShrinkOperand = [&](Value *V) -> Value * { 3899 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3900 if (ZI->getSrcTy() == TruncatedTy) 3901 return ZI->getOperand(0); 3902 return B.CreateZExtOrTrunc(V, TruncatedTy); 3903 }; 3904 3905 // The actual instruction modification depends on the instruction type, 3906 // unfortunately. 3907 Value *NewI = nullptr; 3908 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3909 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3910 ShrinkOperand(BO->getOperand(1))); 3911 3912 // Any wrapping introduced by shrinking this operation shouldn't be 3913 // considered undefined behavior. So, we can't unconditionally copy 3914 // arithmetic wrapping flags to NewI. 3915 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3916 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3917 NewI = 3918 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3919 ShrinkOperand(CI->getOperand(1))); 3920 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3921 NewI = B.CreateSelect(SI->getCondition(), 3922 ShrinkOperand(SI->getTrueValue()), 3923 ShrinkOperand(SI->getFalseValue())); 3924 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3925 switch (CI->getOpcode()) { 3926 default: 3927 llvm_unreachable("Unhandled cast!"); 3928 case Instruction::Trunc: 3929 NewI = ShrinkOperand(CI->getOperand(0)); 3930 break; 3931 case Instruction::SExt: 3932 NewI = B.CreateSExtOrTrunc( 3933 CI->getOperand(0), 3934 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3935 break; 3936 case Instruction::ZExt: 3937 NewI = B.CreateZExtOrTrunc( 3938 CI->getOperand(0), 3939 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3940 break; 3941 } 3942 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3943 auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType()) 3944 ->getNumElements(); 3945 auto *O0 = B.CreateZExtOrTrunc( 3946 SI->getOperand(0), 3947 FixedVectorType::get(ScalarTruncatedTy, Elements0)); 3948 auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType()) 3949 ->getNumElements(); 3950 auto *O1 = B.CreateZExtOrTrunc( 3951 SI->getOperand(1), 3952 FixedVectorType::get(ScalarTruncatedTy, Elements1)); 3953 3954 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3955 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3956 // Don't do anything with the operands, just extend the result. 3957 continue; 3958 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3959 auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType()) 3960 ->getNumElements(); 3961 auto *O0 = B.CreateZExtOrTrunc( 3962 IE->getOperand(0), 3963 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3964 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3965 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3966 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3967 auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType()) 3968 ->getNumElements(); 3969 auto *O0 = B.CreateZExtOrTrunc( 3970 EE->getOperand(0), 3971 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3972 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3973 } else { 3974 // If we don't know what to do, be conservative and don't do anything. 3975 continue; 3976 } 3977 3978 // Lastly, extend the result. 3979 NewI->takeName(cast<Instruction>(I)); 3980 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3981 I->replaceAllUsesWith(Res); 3982 cast<Instruction>(I)->eraseFromParent(); 3983 Erased.insert(I); 3984 State.reset(Def, Res, Part); 3985 } 3986 } 3987 3988 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3989 for (const auto &KV : Cost->getMinimalBitwidths()) { 3990 // If the value wasn't vectorized, we must maintain the original scalar 3991 // type. The absence of the value from State indicates that it 3992 // wasn't vectorized. 3993 VPValue *Def = State.Plan->getVPValue(KV.first); 3994 if (!State.hasAnyVectorValue(Def)) 3995 continue; 3996 for (unsigned Part = 0; Part < UF; ++Part) { 3997 Value *I = State.get(Def, Part); 3998 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3999 if (Inst && Inst->use_empty()) { 4000 Value *NewI = Inst->getOperand(0); 4001 Inst->eraseFromParent(); 4002 State.reset(Def, NewI, Part); 4003 } 4004 } 4005 } 4006 } 4007 4008 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 4009 // Insert truncates and extends for any truncated instructions as hints to 4010 // InstCombine. 4011 if (VF.isVector()) 4012 truncateToMinimalBitwidths(State); 4013 4014 // Fix widened non-induction PHIs by setting up the PHI operands. 4015 if (OrigPHIsToFix.size()) { 4016 assert(EnableVPlanNativePath && 4017 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 4018 fixNonInductionPHIs(State); 4019 } 4020 4021 // At this point every instruction in the original loop is widened to a 4022 // vector form. Now we need to fix the recurrences in the loop. These PHI 4023 // nodes are currently empty because we did not want to introduce cycles. 4024 // This is the second stage of vectorizing recurrences. 4025 fixCrossIterationPHIs(State); 4026 4027 // Forget the original basic block. 4028 PSE.getSE()->forgetLoop(OrigLoop); 4029 4030 // Fix-up external users of the induction variables. 4031 for (auto &Entry : Legal->getInductionVars()) 4032 fixupIVUsers(Entry.first, Entry.second, 4033 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4034 IVEndValues[Entry.first], LoopMiddleBlock); 4035 4036 fixLCSSAPHIs(State); 4037 for (Instruction *PI : PredicatedInstructions) 4038 sinkScalarOperands(&*PI); 4039 4040 // Remove redundant induction instructions. 4041 cse(LoopVectorBody); 4042 4043 // Set/update profile weights for the vector and remainder loops as original 4044 // loop iterations are now distributed among them. Note that original loop 4045 // represented by LoopScalarBody becomes remainder loop after vectorization. 4046 // 4047 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 4048 // end up getting slightly roughened result but that should be OK since 4049 // profile is not inherently precise anyway. Note also possible bypass of 4050 // vector code caused by legality checks is ignored, assigning all the weight 4051 // to the vector loop, optimistically. 4052 // 4053 // For scalable vectorization we can't know at compile time how many iterations 4054 // of the loop are handled in one vector iteration, so instead assume a pessimistic 4055 // vscale of '1'. 4056 setProfileInfoAfterUnrolling( 4057 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 4058 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 4059 } 4060 4061 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 4062 // In order to support recurrences we need to be able to vectorize Phi nodes. 4063 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4064 // stage #2: We now need to fix the recurrences by adding incoming edges to 4065 // the currently empty PHI nodes. At this point every instruction in the 4066 // original loop is widened to a vector form so we can use them to construct 4067 // the incoming edges. 4068 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 4069 // Handle first-order recurrences and reductions that need to be fixed. 4070 if (Legal->isFirstOrderRecurrence(&Phi)) 4071 fixFirstOrderRecurrence(&Phi, State); 4072 else if (Legal->isReductionVariable(&Phi)) 4073 fixReduction(&Phi, State); 4074 } 4075 } 4076 4077 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi, 4078 VPTransformState &State) { 4079 // This is the second phase of vectorizing first-order recurrences. An 4080 // overview of the transformation is described below. Suppose we have the 4081 // following loop. 4082 // 4083 // for (int i = 0; i < n; ++i) 4084 // b[i] = a[i] - a[i - 1]; 4085 // 4086 // There is a first-order recurrence on "a". For this loop, the shorthand 4087 // scalar IR looks like: 4088 // 4089 // scalar.ph: 4090 // s_init = a[-1] 4091 // br scalar.body 4092 // 4093 // scalar.body: 4094 // i = phi [0, scalar.ph], [i+1, scalar.body] 4095 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4096 // s2 = a[i] 4097 // b[i] = s2 - s1 4098 // br cond, scalar.body, ... 4099 // 4100 // In this example, s1 is a recurrence because it's value depends on the 4101 // previous iteration. In the first phase of vectorization, we created a 4102 // temporary value for s1. We now complete the vectorization and produce the 4103 // shorthand vector IR shown below (for VF = 4, UF = 1). 4104 // 4105 // vector.ph: 4106 // v_init = vector(..., ..., ..., a[-1]) 4107 // br vector.body 4108 // 4109 // vector.body 4110 // i = phi [0, vector.ph], [i+4, vector.body] 4111 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4112 // v2 = a[i, i+1, i+2, i+3]; 4113 // v3 = vector(v1(3), v2(0, 1, 2)) 4114 // b[i, i+1, i+2, i+3] = v2 - v3 4115 // br cond, vector.body, middle.block 4116 // 4117 // middle.block: 4118 // x = v2(3) 4119 // br scalar.ph 4120 // 4121 // scalar.ph: 4122 // s_init = phi [x, middle.block], [a[-1], otherwise] 4123 // br scalar.body 4124 // 4125 // After execution completes the vector loop, we extract the next value of 4126 // the recurrence (x) to use as the initial value in the scalar loop. 4127 4128 // Get the original loop preheader and single loop latch. 4129 auto *Preheader = OrigLoop->getLoopPreheader(); 4130 auto *Latch = OrigLoop->getLoopLatch(); 4131 4132 // Get the initial and previous values of the scalar recurrence. 4133 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4134 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4135 4136 // Create a vector from the initial value. 4137 auto *VectorInit = ScalarInit; 4138 if (VF.isVector()) { 4139 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4140 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 4141 VectorInit = Builder.CreateInsertElement( 4142 PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4143 Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init"); 4144 } 4145 4146 VPValue *PhiDef = State.Plan->getVPValue(Phi); 4147 VPValue *PreviousDef = State.Plan->getVPValue(Previous); 4148 // We constructed a temporary phi node in the first phase of vectorization. 4149 // This phi node will eventually be deleted. 4150 Builder.SetInsertPoint(cast<Instruction>(State.get(PhiDef, 0))); 4151 4152 // Create a phi node for the new recurrence. The current value will either be 4153 // the initial value inserted into a vector or loop-varying vector value. 4154 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4155 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4156 4157 // Get the vectorized previous value of the last part UF - 1. It appears last 4158 // among all unrolled iterations, due to the order of their construction. 4159 Value *PreviousLastPart = State.get(PreviousDef, UF - 1); 4160 4161 // Find and set the insertion point after the previous value if it is an 4162 // instruction. 4163 BasicBlock::iterator InsertPt; 4164 // Note that the previous value may have been constant-folded so it is not 4165 // guaranteed to be an instruction in the vector loop. 4166 // FIXME: Loop invariant values do not form recurrences. We should deal with 4167 // them earlier. 4168 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart)) 4169 InsertPt = LoopVectorBody->getFirstInsertionPt(); 4170 else { 4171 Instruction *PreviousInst = cast<Instruction>(PreviousLastPart); 4172 if (isa<PHINode>(PreviousLastPart)) 4173 // If the previous value is a phi node, we should insert after all the phi 4174 // nodes in the block containing the PHI to avoid breaking basic block 4175 // verification. Note that the basic block may be different to 4176 // LoopVectorBody, in case we predicate the loop. 4177 InsertPt = PreviousInst->getParent()->getFirstInsertionPt(); 4178 else 4179 InsertPt = ++PreviousInst->getIterator(); 4180 } 4181 Builder.SetInsertPoint(&*InsertPt); 4182 4183 // We will construct a vector for the recurrence by combining the values for 4184 // the current and previous iterations. This is the required shuffle mask. 4185 assert(!VF.isScalable()); 4186 SmallVector<int, 8> ShuffleMask(VF.getKnownMinValue()); 4187 ShuffleMask[0] = VF.getKnownMinValue() - 1; 4188 for (unsigned I = 1; I < VF.getKnownMinValue(); ++I) 4189 ShuffleMask[I] = I + VF.getKnownMinValue() - 1; 4190 4191 // The vector from which to take the initial value for the current iteration 4192 // (actual or unrolled). Initially, this is the vector phi node. 4193 Value *Incoming = VecPhi; 4194 4195 // Shuffle the current and previous vector and update the vector parts. 4196 for (unsigned Part = 0; Part < UF; ++Part) { 4197 Value *PreviousPart = State.get(PreviousDef, Part); 4198 Value *PhiPart = State.get(PhiDef, Part); 4199 auto *Shuffle = 4200 VF.isVector() 4201 ? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask) 4202 : Incoming; 4203 PhiPart->replaceAllUsesWith(Shuffle); 4204 cast<Instruction>(PhiPart)->eraseFromParent(); 4205 State.reset(PhiDef, Shuffle, Part); 4206 Incoming = PreviousPart; 4207 } 4208 4209 // Fix the latch value of the new recurrence in the vector loop. 4210 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4211 4212 // Extract the last vector element in the middle block. This will be the 4213 // initial value for the recurrence when jumping to the scalar loop. 4214 auto *ExtractForScalar = Incoming; 4215 if (VF.isVector()) { 4216 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4217 ExtractForScalar = Builder.CreateExtractElement( 4218 ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1), 4219 "vector.recur.extract"); 4220 } 4221 // Extract the second last element in the middle block if the 4222 // Phi is used outside the loop. We need to extract the phi itself 4223 // and not the last element (the phi update in the current iteration). This 4224 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4225 // when the scalar loop is not run at all. 4226 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4227 if (VF.isVector()) 4228 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4229 Incoming, Builder.getInt32(VF.getKnownMinValue() - 2), 4230 "vector.recur.extract.for.phi"); 4231 // When loop is unrolled without vectorizing, initialize 4232 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 4233 // `Incoming`. This is analogous to the vectorized case above: extracting the 4234 // second last element when VF > 1. 4235 else if (UF > 1) 4236 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4237 4238 // Fix the initial value of the original recurrence in the scalar loop. 4239 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4240 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4241 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4242 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4243 Start->addIncoming(Incoming, BB); 4244 } 4245 4246 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4247 Phi->setName("scalar.recur"); 4248 4249 // Finally, fix users of the recurrence outside the loop. The users will need 4250 // either the last value of the scalar recurrence or the last value of the 4251 // vector recurrence we extracted in the middle block. Since the loop is in 4252 // LCSSA form, we just need to find all the phi nodes for the original scalar 4253 // recurrence in the exit block, and then add an edge for the middle block. 4254 // Note that LCSSA does not imply single entry when the original scalar loop 4255 // had multiple exiting edges (as we always run the last iteration in the 4256 // scalar epilogue); in that case, the exiting path through middle will be 4257 // dynamically dead and the value picked for the phi doesn't matter. 4258 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4259 if (any_of(LCSSAPhi.incoming_values(), 4260 [Phi](Value *V) { return V == Phi; })) 4261 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4262 } 4263 4264 static bool useOrderedReductions(RecurrenceDescriptor &RdxDesc) { 4265 return EnableStrictReductions && RdxDesc.isOrdered(); 4266 } 4267 4268 void InnerLoopVectorizer::fixReduction(PHINode *Phi, VPTransformState &State) { 4269 // Get it's reduction variable descriptor. 4270 assert(Legal->isReductionVariable(Phi) && 4271 "Unable to find the reduction variable"); 4272 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 4273 4274 RecurKind RK = RdxDesc.getRecurrenceKind(); 4275 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4276 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4277 setDebugLocFromInst(Builder, ReductionStartValue); 4278 bool IsInLoopReductionPhi = Cost->isInLoopReduction(Phi); 4279 4280 VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst); 4281 // This is the vector-clone of the value that leaves the loop. 4282 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4283 4284 // Wrap flags are in general invalid after vectorization, clear them. 4285 clearReductionWrapFlags(RdxDesc, State); 4286 4287 // Fix the vector-loop phi. 4288 4289 // Reductions do not have to start at zero. They can start with 4290 // any loop invariant values. 4291 BasicBlock *Latch = OrigLoop->getLoopLatch(); 4292 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 4293 4294 for (unsigned Part = 0; Part < UF; ++Part) { 4295 Value *VecRdxPhi = State.get(State.Plan->getVPValue(Phi), Part); 4296 Value *Val = State.get(State.Plan->getVPValue(LoopVal), Part); 4297 if (IsInLoopReductionPhi && useOrderedReductions(RdxDesc) && 4298 State.VF.isVector()) 4299 Val = State.get(State.Plan->getVPValue(LoopVal), UF - 1); 4300 cast<PHINode>(VecRdxPhi) 4301 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4302 } 4303 4304 // Before each round, move the insertion point right between 4305 // the PHIs and the values we are going to write. 4306 // This allows us to write both PHINodes and the extractelement 4307 // instructions. 4308 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4309 4310 setDebugLocFromInst(Builder, LoopExitInst); 4311 4312 Type *PhiTy = Phi->getType(); 4313 // If tail is folded by masking, the vector value to leave the loop should be 4314 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4315 // instead of the former. For an inloop reduction the reduction will already 4316 // be predicated, and does not need to be handled here. 4317 if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) { 4318 for (unsigned Part = 0; Part < UF; ++Part) { 4319 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4320 Value *Sel = nullptr; 4321 for (User *U : VecLoopExitInst->users()) { 4322 if (isa<SelectInst>(U)) { 4323 assert(!Sel && "Reduction exit feeding two selects"); 4324 Sel = U; 4325 } else 4326 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4327 } 4328 assert(Sel && "Reduction exit feeds no select"); 4329 State.reset(LoopExitInstDef, Sel, Part); 4330 4331 // If the target can create a predicated operator for the reduction at no 4332 // extra cost in the loop (for example a predicated vadd), it can be 4333 // cheaper for the select to remain in the loop than be sunk out of it, 4334 // and so use the select value for the phi instead of the old 4335 // LoopExitValue. 4336 if (PreferPredicatedReductionSelect || 4337 TTI->preferPredicatedReductionSelect( 4338 RdxDesc.getOpcode(), PhiTy, 4339 TargetTransformInfo::ReductionFlags())) { 4340 auto *VecRdxPhi = 4341 cast<PHINode>(State.get(State.Plan->getVPValue(Phi), Part)); 4342 VecRdxPhi->setIncomingValueForBlock( 4343 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4344 } 4345 } 4346 } 4347 4348 // If the vector reduction can be performed in a smaller type, we truncate 4349 // then extend the loop exit value to enable InstCombine to evaluate the 4350 // entire expression in the smaller type. 4351 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 4352 assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!"); 4353 assert(!VF.isScalable() && "scalable vectors not yet supported."); 4354 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4355 Builder.SetInsertPoint( 4356 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4357 VectorParts RdxParts(UF); 4358 for (unsigned Part = 0; Part < UF; ++Part) { 4359 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4360 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4361 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4362 : Builder.CreateZExt(Trunc, VecTy); 4363 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 4364 UI != RdxParts[Part]->user_end();) 4365 if (*UI != Trunc) { 4366 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 4367 RdxParts[Part] = Extnd; 4368 } else { 4369 ++UI; 4370 } 4371 } 4372 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4373 for (unsigned Part = 0; Part < UF; ++Part) { 4374 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4375 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4376 } 4377 } 4378 4379 // Reduce all of the unrolled parts into a single vector. 4380 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4381 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4382 4383 // The middle block terminator has already been assigned a DebugLoc here (the 4384 // OrigLoop's single latch terminator). We want the whole middle block to 4385 // appear to execute on this line because: (a) it is all compiler generated, 4386 // (b) these instructions are always executed after evaluating the latch 4387 // conditional branch, and (c) other passes may add new predecessors which 4388 // terminate on this line. This is the easiest way to ensure we don't 4389 // accidentally cause an extra step back into the loop while debugging. 4390 setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator()); 4391 if (IsInLoopReductionPhi && useOrderedReductions(RdxDesc)) 4392 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 4393 else { 4394 // Floating-point operations should have some FMF to enable the reduction. 4395 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4396 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4397 for (unsigned Part = 1; Part < UF; ++Part) { 4398 Value *RdxPart = State.get(LoopExitInstDef, Part); 4399 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4400 ReducedPartRdx = Builder.CreateBinOp( 4401 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4402 } else { 4403 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4404 } 4405 } 4406 } 4407 4408 // Create the reduction after the loop. Note that inloop reductions create the 4409 // target reduction in the loop using a Reduction recipe. 4410 if (VF.isVector() && !IsInLoopReductionPhi) { 4411 ReducedPartRdx = 4412 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx); 4413 // If the reduction can be performed in a smaller type, we need to extend 4414 // the reduction to the wider type before we branch to the original loop. 4415 if (PhiTy != RdxDesc.getRecurrenceType()) 4416 ReducedPartRdx = RdxDesc.isSigned() 4417 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4418 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4419 } 4420 4421 // Create a phi node that merges control-flow from the backedge-taken check 4422 // block and the middle block. 4423 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4424 LoopScalarPreHeader->getTerminator()); 4425 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4426 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4427 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4428 4429 // Now, we need to fix the users of the reduction variable 4430 // inside and outside of the scalar remainder loop. 4431 4432 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4433 // in the exit blocks. See comment on analogous loop in 4434 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4435 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4436 if (any_of(LCSSAPhi.incoming_values(), 4437 [LoopExitInst](Value *V) { return V == LoopExitInst; })) 4438 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4439 4440 // Fix the scalar loop reduction variable with the incoming reduction sum 4441 // from the vector body and from the backedge value. 4442 int IncomingEdgeBlockIdx = 4443 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4444 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4445 // Pick the other block. 4446 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4447 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4448 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4449 } 4450 4451 void InnerLoopVectorizer::clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc, 4452 VPTransformState &State) { 4453 RecurKind RK = RdxDesc.getRecurrenceKind(); 4454 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4455 return; 4456 4457 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4458 assert(LoopExitInstr && "null loop exit instruction"); 4459 SmallVector<Instruction *, 8> Worklist; 4460 SmallPtrSet<Instruction *, 8> Visited; 4461 Worklist.push_back(LoopExitInstr); 4462 Visited.insert(LoopExitInstr); 4463 4464 while (!Worklist.empty()) { 4465 Instruction *Cur = Worklist.pop_back_val(); 4466 if (isa<OverflowingBinaryOperator>(Cur)) 4467 for (unsigned Part = 0; Part < UF; ++Part) { 4468 Value *V = State.get(State.Plan->getVPValue(Cur), Part); 4469 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4470 } 4471 4472 for (User *U : Cur->users()) { 4473 Instruction *UI = cast<Instruction>(U); 4474 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4475 Visited.insert(UI).second) 4476 Worklist.push_back(UI); 4477 } 4478 } 4479 } 4480 4481 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4482 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4483 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4484 // Some phis were already hand updated by the reduction and recurrence 4485 // code above, leave them alone. 4486 continue; 4487 4488 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4489 // Non-instruction incoming values will have only one value. 4490 4491 VPLane Lane = VPLane::getFirstLane(); 4492 if (isa<Instruction>(IncomingValue) && 4493 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4494 VF)) 4495 Lane = VPLane::getLastLaneForVF(VF); 4496 4497 // Can be a loop invariant incoming value or the last scalar value to be 4498 // extracted from the vectorized loop. 4499 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4500 Value *lastIncomingValue = 4501 OrigLoop->isLoopInvariant(IncomingValue) 4502 ? IncomingValue 4503 : State.get(State.Plan->getVPValue(IncomingValue), 4504 VPIteration(UF - 1, Lane)); 4505 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4506 } 4507 } 4508 4509 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4510 // The basic block and loop containing the predicated instruction. 4511 auto *PredBB = PredInst->getParent(); 4512 auto *VectorLoop = LI->getLoopFor(PredBB); 4513 4514 // Initialize a worklist with the operands of the predicated instruction. 4515 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4516 4517 // Holds instructions that we need to analyze again. An instruction may be 4518 // reanalyzed if we don't yet know if we can sink it or not. 4519 SmallVector<Instruction *, 8> InstsToReanalyze; 4520 4521 // Returns true if a given use occurs in the predicated block. Phi nodes use 4522 // their operands in their corresponding predecessor blocks. 4523 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4524 auto *I = cast<Instruction>(U.getUser()); 4525 BasicBlock *BB = I->getParent(); 4526 if (auto *Phi = dyn_cast<PHINode>(I)) 4527 BB = Phi->getIncomingBlock( 4528 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4529 return BB == PredBB; 4530 }; 4531 4532 // Iteratively sink the scalarized operands of the predicated instruction 4533 // into the block we created for it. When an instruction is sunk, it's 4534 // operands are then added to the worklist. The algorithm ends after one pass 4535 // through the worklist doesn't sink a single instruction. 4536 bool Changed; 4537 do { 4538 // Add the instructions that need to be reanalyzed to the worklist, and 4539 // reset the changed indicator. 4540 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4541 InstsToReanalyze.clear(); 4542 Changed = false; 4543 4544 while (!Worklist.empty()) { 4545 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4546 4547 // We can't sink an instruction if it is a phi node, is already in the 4548 // predicated block, is not in the loop, or may have side effects. 4549 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4550 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4551 continue; 4552 4553 // It's legal to sink the instruction if all its uses occur in the 4554 // predicated block. Otherwise, there's nothing to do yet, and we may 4555 // need to reanalyze the instruction. 4556 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4557 InstsToReanalyze.push_back(I); 4558 continue; 4559 } 4560 4561 // Move the instruction to the beginning of the predicated block, and add 4562 // it's operands to the worklist. 4563 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4564 Worklist.insert(I->op_begin(), I->op_end()); 4565 4566 // The sinking may have enabled other instructions to be sunk, so we will 4567 // need to iterate. 4568 Changed = true; 4569 } 4570 } while (Changed); 4571 } 4572 4573 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4574 for (PHINode *OrigPhi : OrigPHIsToFix) { 4575 VPWidenPHIRecipe *VPPhi = 4576 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4577 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4578 // Make sure the builder has a valid insert point. 4579 Builder.SetInsertPoint(NewPhi); 4580 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4581 VPValue *Inc = VPPhi->getIncomingValue(i); 4582 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4583 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4584 } 4585 } 4586 } 4587 4588 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, 4589 VPUser &Operands, unsigned UF, 4590 ElementCount VF, bool IsPtrLoopInvariant, 4591 SmallBitVector &IsIndexLoopInvariant, 4592 VPTransformState &State) { 4593 // Construct a vector GEP by widening the operands of the scalar GEP as 4594 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4595 // results in a vector of pointers when at least one operand of the GEP 4596 // is vector-typed. Thus, to keep the representation compact, we only use 4597 // vector-typed operands for loop-varying values. 4598 4599 if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4600 // If we are vectorizing, but the GEP has only loop-invariant operands, 4601 // the GEP we build (by only using vector-typed operands for 4602 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4603 // produce a vector of pointers, we need to either arbitrarily pick an 4604 // operand to broadcast, or broadcast a clone of the original GEP. 4605 // Here, we broadcast a clone of the original. 4606 // 4607 // TODO: If at some point we decide to scalarize instructions having 4608 // loop-invariant operands, this special case will no longer be 4609 // required. We would add the scalarization decision to 4610 // collectLoopScalars() and teach getVectorValue() to broadcast 4611 // the lane-zero scalar value. 4612 auto *Clone = Builder.Insert(GEP->clone()); 4613 for (unsigned Part = 0; Part < UF; ++Part) { 4614 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4615 State.set(VPDef, EntryPart, Part); 4616 addMetadata(EntryPart, GEP); 4617 } 4618 } else { 4619 // If the GEP has at least one loop-varying operand, we are sure to 4620 // produce a vector of pointers. But if we are only unrolling, we want 4621 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4622 // produce with the code below will be scalar (if VF == 1) or vector 4623 // (otherwise). Note that for the unroll-only case, we still maintain 4624 // values in the vector mapping with initVector, as we do for other 4625 // instructions. 4626 for (unsigned Part = 0; Part < UF; ++Part) { 4627 // The pointer operand of the new GEP. If it's loop-invariant, we 4628 // won't broadcast it. 4629 auto *Ptr = IsPtrLoopInvariant 4630 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 4631 : State.get(Operands.getOperand(0), Part); 4632 4633 // Collect all the indices for the new GEP. If any index is 4634 // loop-invariant, we won't broadcast it. 4635 SmallVector<Value *, 4> Indices; 4636 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { 4637 VPValue *Operand = Operands.getOperand(I); 4638 if (IsIndexLoopInvariant[I - 1]) 4639 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 4640 else 4641 Indices.push_back(State.get(Operand, Part)); 4642 } 4643 4644 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4645 // but it should be a vector, otherwise. 4646 auto *NewGEP = 4647 GEP->isInBounds() 4648 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4649 Indices) 4650 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4651 assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) && 4652 "NewGEP is not a pointer vector"); 4653 State.set(VPDef, NewGEP, Part); 4654 addMetadata(NewGEP, GEP); 4655 } 4656 } 4657 } 4658 4659 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4660 RecurrenceDescriptor *RdxDesc, 4661 VPWidenPHIRecipe *PhiR, 4662 VPTransformState &State) { 4663 PHINode *P = cast<PHINode>(PN); 4664 if (EnableVPlanNativePath) { 4665 // Currently we enter here in the VPlan-native path for non-induction 4666 // PHIs where all control flow is uniform. We simply widen these PHIs. 4667 // Create a vector phi with no operands - the vector phi operands will be 4668 // set at the end of vector code generation. 4669 Type *VecTy = (State.VF.isScalar()) 4670 ? PN->getType() 4671 : VectorType::get(PN->getType(), State.VF); 4672 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4673 State.set(PhiR, VecPhi, 0); 4674 OrigPHIsToFix.push_back(P); 4675 4676 return; 4677 } 4678 4679 assert(PN->getParent() == OrigLoop->getHeader() && 4680 "Non-header phis should have been handled elsewhere"); 4681 4682 VPValue *StartVPV = PhiR->getStartValue(); 4683 Value *StartV = StartVPV ? StartVPV->getLiveInIRValue() : nullptr; 4684 // In order to support recurrences we need to be able to vectorize Phi nodes. 4685 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4686 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4687 // this value when we vectorize all of the instructions that use the PHI. 4688 if (RdxDesc || Legal->isFirstOrderRecurrence(P)) { 4689 Value *Iden = nullptr; 4690 bool ScalarPHI = 4691 (State.VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN)); 4692 Type *VecTy = 4693 ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF); 4694 4695 if (RdxDesc) { 4696 assert(Legal->isReductionVariable(P) && StartV && 4697 "RdxDesc should only be set for reduction variables; in that case " 4698 "a StartV is also required"); 4699 RecurKind RK = RdxDesc->getRecurrenceKind(); 4700 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) { 4701 // MinMax reduction have the start value as their identify. 4702 if (ScalarPHI) { 4703 Iden = StartV; 4704 } else { 4705 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4706 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4707 StartV = Iden = 4708 Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident"); 4709 } 4710 } else { 4711 Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity( 4712 RK, VecTy->getScalarType(), RdxDesc->getFastMathFlags()); 4713 Iden = IdenC; 4714 4715 if (!ScalarPHI) { 4716 Iden = ConstantVector::getSplat(State.VF, IdenC); 4717 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4718 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4719 Constant *Zero = Builder.getInt32(0); 4720 StartV = Builder.CreateInsertElement(Iden, StartV, Zero); 4721 } 4722 } 4723 } 4724 4725 for (unsigned Part = 0; Part < State.UF; ++Part) { 4726 // This is phase one of vectorizing PHIs. 4727 Value *EntryPart = PHINode::Create( 4728 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4729 State.set(PhiR, EntryPart, Part); 4730 if (StartV) { 4731 // Make sure to add the reduction start value only to the 4732 // first unroll part. 4733 Value *StartVal = (Part == 0) ? StartV : Iden; 4734 cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader); 4735 } 4736 } 4737 return; 4738 } 4739 4740 assert(!Legal->isReductionVariable(P) && 4741 "reductions should be handled above"); 4742 4743 setDebugLocFromInst(Builder, P); 4744 4745 // This PHINode must be an induction variable. 4746 // Make sure that we know about it. 4747 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4748 4749 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4750 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4751 4752 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4753 // which can be found from the original scalar operations. 4754 switch (II.getKind()) { 4755 case InductionDescriptor::IK_NoInduction: 4756 llvm_unreachable("Unknown induction"); 4757 case InductionDescriptor::IK_IntInduction: 4758 case InductionDescriptor::IK_FpInduction: 4759 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4760 case InductionDescriptor::IK_PtrInduction: { 4761 // Handle the pointer induction variable case. 4762 assert(P->getType()->isPointerTy() && "Unexpected type."); 4763 4764 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4765 // This is the normalized GEP that starts counting at zero. 4766 Value *PtrInd = 4767 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4768 // Determine the number of scalars we need to generate for each unroll 4769 // iteration. If the instruction is uniform, we only need to generate the 4770 // first lane. Otherwise, we generate all VF values. 4771 bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); 4772 assert((IsUniform || !VF.isScalable()) && 4773 "Currently unsupported for scalable vectors"); 4774 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 4775 4776 Value *RuntimeVF = getRuntimeVF(Builder, PtrInd->getType(), VF); 4777 for (unsigned Part = 0; Part < UF; ++Part) { 4778 Value *PartStart = Builder.CreateMul( 4779 RuntimeVF, ConstantInt::get(PtrInd->getType(), Part)); 4780 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4781 Value *Idx = Builder.CreateAdd( 4782 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 4783 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4784 Value *SclrGep = 4785 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4786 SclrGep->setName("next.gep"); 4787 State.set(PhiR, SclrGep, VPIteration(Part, Lane)); 4788 } 4789 } 4790 return; 4791 } 4792 assert(isa<SCEVConstant>(II.getStep()) && 4793 "Induction step not a SCEV constant!"); 4794 Type *PhiType = II.getStep()->getType(); 4795 4796 // Build a pointer phi 4797 Value *ScalarStartValue = II.getStartValue(); 4798 Type *ScStValueType = ScalarStartValue->getType(); 4799 PHINode *NewPointerPhi = 4800 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4801 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4802 4803 // A pointer induction, performed by using a gep 4804 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4805 Instruction *InductionLoc = LoopLatch->getTerminator(); 4806 const SCEV *ScalarStep = II.getStep(); 4807 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4808 Value *ScalarStepValue = 4809 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4810 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); 4811 Value *NumUnrolledElems = 4812 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 4813 Value *InductionGEP = GetElementPtrInst::Create( 4814 ScStValueType->getPointerElementType(), NewPointerPhi, 4815 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 4816 InductionLoc); 4817 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4818 4819 // Create UF many actual address geps that use the pointer 4820 // phi as base and a vectorized version of the step value 4821 // (<step*0, ..., step*N>) as offset. 4822 for (unsigned Part = 0; Part < State.UF; ++Part) { 4823 Type *VecPhiType = VectorType::get(PhiType, State.VF); 4824 Value *StartOffsetScalar = 4825 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 4826 Value *StartOffset = 4827 Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 4828 // Create a vector of consecutive numbers from zero to VF. 4829 StartOffset = 4830 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); 4831 4832 Value *GEP = Builder.CreateGEP( 4833 ScStValueType->getPointerElementType(), NewPointerPhi, 4834 Builder.CreateMul( 4835 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), 4836 "vector.gep")); 4837 State.set(PhiR, GEP, Part); 4838 } 4839 } 4840 } 4841 } 4842 4843 /// A helper function for checking whether an integer division-related 4844 /// instruction may divide by zero (in which case it must be predicated if 4845 /// executed conditionally in the scalar code). 4846 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4847 /// Non-zero divisors that are non compile-time constants will not be 4848 /// converted into multiplication, so we will still end up scalarizing 4849 /// the division, but can do so w/o predication. 4850 static bool mayDivideByZero(Instruction &I) { 4851 assert((I.getOpcode() == Instruction::UDiv || 4852 I.getOpcode() == Instruction::SDiv || 4853 I.getOpcode() == Instruction::URem || 4854 I.getOpcode() == Instruction::SRem) && 4855 "Unexpected instruction"); 4856 Value *Divisor = I.getOperand(1); 4857 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4858 return !CInt || CInt->isZero(); 4859 } 4860 4861 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def, 4862 VPUser &User, 4863 VPTransformState &State) { 4864 switch (I.getOpcode()) { 4865 case Instruction::Call: 4866 case Instruction::Br: 4867 case Instruction::PHI: 4868 case Instruction::GetElementPtr: 4869 case Instruction::Select: 4870 llvm_unreachable("This instruction is handled by a different recipe."); 4871 case Instruction::UDiv: 4872 case Instruction::SDiv: 4873 case Instruction::SRem: 4874 case Instruction::URem: 4875 case Instruction::Add: 4876 case Instruction::FAdd: 4877 case Instruction::Sub: 4878 case Instruction::FSub: 4879 case Instruction::FNeg: 4880 case Instruction::Mul: 4881 case Instruction::FMul: 4882 case Instruction::FDiv: 4883 case Instruction::FRem: 4884 case Instruction::Shl: 4885 case Instruction::LShr: 4886 case Instruction::AShr: 4887 case Instruction::And: 4888 case Instruction::Or: 4889 case Instruction::Xor: { 4890 // Just widen unops and binops. 4891 setDebugLocFromInst(Builder, &I); 4892 4893 for (unsigned Part = 0; Part < UF; ++Part) { 4894 SmallVector<Value *, 2> Ops; 4895 for (VPValue *VPOp : User.operands()) 4896 Ops.push_back(State.get(VPOp, Part)); 4897 4898 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4899 4900 if (auto *VecOp = dyn_cast<Instruction>(V)) 4901 VecOp->copyIRFlags(&I); 4902 4903 // Use this vector value for all users of the original instruction. 4904 State.set(Def, V, Part); 4905 addMetadata(V, &I); 4906 } 4907 4908 break; 4909 } 4910 case Instruction::ICmp: 4911 case Instruction::FCmp: { 4912 // Widen compares. Generate vector compares. 4913 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4914 auto *Cmp = cast<CmpInst>(&I); 4915 setDebugLocFromInst(Builder, Cmp); 4916 for (unsigned Part = 0; Part < UF; ++Part) { 4917 Value *A = State.get(User.getOperand(0), Part); 4918 Value *B = State.get(User.getOperand(1), Part); 4919 Value *C = nullptr; 4920 if (FCmp) { 4921 // Propagate fast math flags. 4922 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4923 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4924 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4925 } else { 4926 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4927 } 4928 State.set(Def, C, Part); 4929 addMetadata(C, &I); 4930 } 4931 4932 break; 4933 } 4934 4935 case Instruction::ZExt: 4936 case Instruction::SExt: 4937 case Instruction::FPToUI: 4938 case Instruction::FPToSI: 4939 case Instruction::FPExt: 4940 case Instruction::PtrToInt: 4941 case Instruction::IntToPtr: 4942 case Instruction::SIToFP: 4943 case Instruction::UIToFP: 4944 case Instruction::Trunc: 4945 case Instruction::FPTrunc: 4946 case Instruction::BitCast: { 4947 auto *CI = cast<CastInst>(&I); 4948 setDebugLocFromInst(Builder, CI); 4949 4950 /// Vectorize casts. 4951 Type *DestTy = 4952 (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); 4953 4954 for (unsigned Part = 0; Part < UF; ++Part) { 4955 Value *A = State.get(User.getOperand(0), Part); 4956 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4957 State.set(Def, Cast, Part); 4958 addMetadata(Cast, &I); 4959 } 4960 break; 4961 } 4962 default: 4963 // This instruction is not vectorized by simple widening. 4964 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4965 llvm_unreachable("Unhandled instruction!"); 4966 } // end of switch. 4967 } 4968 4969 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4970 VPUser &ArgOperands, 4971 VPTransformState &State) { 4972 assert(!isa<DbgInfoIntrinsic>(I) && 4973 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4974 setDebugLocFromInst(Builder, &I); 4975 4976 Module *M = I.getParent()->getParent()->getParent(); 4977 auto *CI = cast<CallInst>(&I); 4978 4979 SmallVector<Type *, 4> Tys; 4980 for (Value *ArgOperand : CI->arg_operands()) 4981 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4982 4983 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4984 4985 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4986 // version of the instruction. 4987 // Is it beneficial to perform intrinsic call compared to lib call? 4988 bool NeedToScalarize = false; 4989 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4990 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4991 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4992 assert((UseVectorIntrinsic || !NeedToScalarize) && 4993 "Instruction should be scalarized elsewhere."); 4994 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4995 "Either the intrinsic cost or vector call cost must be valid"); 4996 4997 for (unsigned Part = 0; Part < UF; ++Part) { 4998 SmallVector<Value *, 4> Args; 4999 for (auto &I : enumerate(ArgOperands.operands())) { 5000 // Some intrinsics have a scalar argument - don't replace it with a 5001 // vector. 5002 Value *Arg; 5003 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 5004 Arg = State.get(I.value(), Part); 5005 else 5006 Arg = State.get(I.value(), VPIteration(0, 0)); 5007 Args.push_back(Arg); 5008 } 5009 5010 Function *VectorF; 5011 if (UseVectorIntrinsic) { 5012 // Use vector version of the intrinsic. 5013 Type *TysForDecl[] = {CI->getType()}; 5014 if (VF.isVector()) 5015 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 5016 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 5017 assert(VectorF && "Can't retrieve vector intrinsic."); 5018 } else { 5019 // Use vector version of the function call. 5020 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 5021 #ifndef NDEBUG 5022 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 5023 "Can't create vector function."); 5024 #endif 5025 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 5026 } 5027 SmallVector<OperandBundleDef, 1> OpBundles; 5028 CI->getOperandBundlesAsDefs(OpBundles); 5029 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 5030 5031 if (isa<FPMathOperator>(V)) 5032 V->copyFastMathFlags(CI); 5033 5034 State.set(Def, V, Part); 5035 addMetadata(V, &I); 5036 } 5037 } 5038 5039 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, 5040 VPUser &Operands, 5041 bool InvariantCond, 5042 VPTransformState &State) { 5043 setDebugLocFromInst(Builder, &I); 5044 5045 // The condition can be loop invariant but still defined inside the 5046 // loop. This means that we can't just use the original 'cond' value. 5047 // We have to take the 'vectorized' value and pick the first lane. 5048 // Instcombine will make this a no-op. 5049 auto *InvarCond = InvariantCond 5050 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 5051 : nullptr; 5052 5053 for (unsigned Part = 0; Part < UF; ++Part) { 5054 Value *Cond = 5055 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); 5056 Value *Op0 = State.get(Operands.getOperand(1), Part); 5057 Value *Op1 = State.get(Operands.getOperand(2), Part); 5058 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); 5059 State.set(VPDef, Sel, Part); 5060 addMetadata(Sel, &I); 5061 } 5062 } 5063 5064 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 5065 // We should not collect Scalars more than once per VF. Right now, this 5066 // function is called from collectUniformsAndScalars(), which already does 5067 // this check. Collecting Scalars for VF=1 does not make any sense. 5068 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 5069 "This function should not be visited twice for the same VF"); 5070 5071 SmallSetVector<Instruction *, 8> Worklist; 5072 5073 // These sets are used to seed the analysis with pointers used by memory 5074 // accesses that will remain scalar. 5075 SmallSetVector<Instruction *, 8> ScalarPtrs; 5076 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 5077 auto *Latch = TheLoop->getLoopLatch(); 5078 5079 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 5080 // The pointer operands of loads and stores will be scalar as long as the 5081 // memory access is not a gather or scatter operation. The value operand of a 5082 // store will remain scalar if the store is scalarized. 5083 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 5084 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 5085 assert(WideningDecision != CM_Unknown && 5086 "Widening decision should be ready at this moment"); 5087 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 5088 if (Ptr == Store->getValueOperand()) 5089 return WideningDecision == CM_Scalarize; 5090 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 5091 "Ptr is neither a value or pointer operand"); 5092 return WideningDecision != CM_GatherScatter; 5093 }; 5094 5095 // A helper that returns true if the given value is a bitcast or 5096 // getelementptr instruction contained in the loop. 5097 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 5098 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 5099 isa<GetElementPtrInst>(V)) && 5100 !TheLoop->isLoopInvariant(V); 5101 }; 5102 5103 auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { 5104 if (!isa<PHINode>(Ptr) || 5105 !Legal->getInductionVars().count(cast<PHINode>(Ptr))) 5106 return false; 5107 auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; 5108 if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) 5109 return false; 5110 return isScalarUse(MemAccess, Ptr); 5111 }; 5112 5113 // A helper that evaluates a memory access's use of a pointer. If the 5114 // pointer is actually the pointer induction of a loop, it is being 5115 // inserted into Worklist. If the use will be a scalar use, and the 5116 // pointer is only used by memory accesses, we place the pointer in 5117 // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. 5118 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5119 if (isScalarPtrInduction(MemAccess, Ptr)) { 5120 Worklist.insert(cast<Instruction>(Ptr)); 5121 Instruction *Update = cast<Instruction>( 5122 cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); 5123 Worklist.insert(Update); 5124 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr 5125 << "\n"); 5126 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update 5127 << "\n"); 5128 return; 5129 } 5130 // We only care about bitcast and getelementptr instructions contained in 5131 // the loop. 5132 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5133 return; 5134 5135 // If the pointer has already been identified as scalar (e.g., if it was 5136 // also identified as uniform), there's nothing to do. 5137 auto *I = cast<Instruction>(Ptr); 5138 if (Worklist.count(I)) 5139 return; 5140 5141 // If the use of the pointer will be a scalar use, and all users of the 5142 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 5143 // place the pointer in PossibleNonScalarPtrs. 5144 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 5145 return isa<LoadInst>(U) || isa<StoreInst>(U); 5146 })) 5147 ScalarPtrs.insert(I); 5148 else 5149 PossibleNonScalarPtrs.insert(I); 5150 }; 5151 5152 // We seed the scalars analysis with three classes of instructions: (1) 5153 // instructions marked uniform-after-vectorization and (2) bitcast, 5154 // getelementptr and (pointer) phi instructions used by memory accesses 5155 // requiring a scalar use. 5156 // 5157 // (1) Add to the worklist all instructions that have been identified as 5158 // uniform-after-vectorization. 5159 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5160 5161 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5162 // memory accesses requiring a scalar use. The pointer operands of loads and 5163 // stores will be scalar as long as the memory accesses is not a gather or 5164 // scatter operation. The value operand of a store will remain scalar if the 5165 // store is scalarized. 5166 for (auto *BB : TheLoop->blocks()) 5167 for (auto &I : *BB) { 5168 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5169 evaluatePtrUse(Load, Load->getPointerOperand()); 5170 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5171 evaluatePtrUse(Store, Store->getPointerOperand()); 5172 evaluatePtrUse(Store, Store->getValueOperand()); 5173 } 5174 } 5175 for (auto *I : ScalarPtrs) 5176 if (!PossibleNonScalarPtrs.count(I)) { 5177 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5178 Worklist.insert(I); 5179 } 5180 5181 // Insert the forced scalars. 5182 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5183 // induction variable when the PHI user is scalarized. 5184 auto ForcedScalar = ForcedScalars.find(VF); 5185 if (ForcedScalar != ForcedScalars.end()) 5186 for (auto *I : ForcedScalar->second) 5187 Worklist.insert(I); 5188 5189 // Expand the worklist by looking through any bitcasts and getelementptr 5190 // instructions we've already identified as scalar. This is similar to the 5191 // expansion step in collectLoopUniforms(); however, here we're only 5192 // expanding to include additional bitcasts and getelementptr instructions. 5193 unsigned Idx = 0; 5194 while (Idx != Worklist.size()) { 5195 Instruction *Dst = Worklist[Idx++]; 5196 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5197 continue; 5198 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5199 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 5200 auto *J = cast<Instruction>(U); 5201 return !TheLoop->contains(J) || Worklist.count(J) || 5202 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5203 isScalarUse(J, Src)); 5204 })) { 5205 Worklist.insert(Src); 5206 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5207 } 5208 } 5209 5210 // An induction variable will remain scalar if all users of the induction 5211 // variable and induction variable update remain scalar. 5212 for (auto &Induction : Legal->getInductionVars()) { 5213 auto *Ind = Induction.first; 5214 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5215 5216 // If tail-folding is applied, the primary induction variable will be used 5217 // to feed a vector compare. 5218 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 5219 continue; 5220 5221 // Determine if all users of the induction variable are scalar after 5222 // vectorization. 5223 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5224 auto *I = cast<Instruction>(U); 5225 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5226 }); 5227 if (!ScalarInd) 5228 continue; 5229 5230 // Determine if all users of the induction variable update instruction are 5231 // scalar after vectorization. 5232 auto ScalarIndUpdate = 5233 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5234 auto *I = cast<Instruction>(U); 5235 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5236 }); 5237 if (!ScalarIndUpdate) 5238 continue; 5239 5240 // The induction variable and its update instruction will remain scalar. 5241 Worklist.insert(Ind); 5242 Worklist.insert(IndUpdate); 5243 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5244 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 5245 << "\n"); 5246 } 5247 5248 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5249 } 5250 5251 bool LoopVectorizationCostModel::isScalarWithPredication( 5252 Instruction *I, ElementCount VF) const { 5253 if (!blockNeedsPredication(I->getParent())) 5254 return false; 5255 switch(I->getOpcode()) { 5256 default: 5257 break; 5258 case Instruction::Load: 5259 case Instruction::Store: { 5260 if (!Legal->isMaskRequired(I)) 5261 return false; 5262 auto *Ptr = getLoadStorePointerOperand(I); 5263 auto *Ty = getMemInstValueType(I); 5264 // We have already decided how to vectorize this instruction, get that 5265 // result. 5266 if (VF.isVector()) { 5267 InstWidening WideningDecision = getWideningDecision(I, VF); 5268 assert(WideningDecision != CM_Unknown && 5269 "Widening decision should be ready at this moment"); 5270 return WideningDecision == CM_Scalarize; 5271 } 5272 const Align Alignment = getLoadStoreAlignment(I); 5273 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 5274 isLegalMaskedGather(Ty, Alignment)) 5275 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 5276 isLegalMaskedScatter(Ty, Alignment)); 5277 } 5278 case Instruction::UDiv: 5279 case Instruction::SDiv: 5280 case Instruction::SRem: 5281 case Instruction::URem: 5282 return mayDivideByZero(*I); 5283 } 5284 return false; 5285 } 5286 5287 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5288 Instruction *I, ElementCount VF) { 5289 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5290 assert(getWideningDecision(I, VF) == CM_Unknown && 5291 "Decision should not be set yet."); 5292 auto *Group = getInterleavedAccessGroup(I); 5293 assert(Group && "Must have a group."); 5294 5295 // If the instruction's allocated size doesn't equal it's type size, it 5296 // requires padding and will be scalarized. 5297 auto &DL = I->getModule()->getDataLayout(); 5298 auto *ScalarTy = getMemInstValueType(I); 5299 if (hasIrregularType(ScalarTy, DL)) 5300 return false; 5301 5302 // Check if masking is required. 5303 // A Group may need masking for one of two reasons: it resides in a block that 5304 // needs predication, or it was decided to use masking to deal with gaps. 5305 bool PredicatedAccessRequiresMasking = 5306 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 5307 bool AccessWithGapsRequiresMasking = 5308 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 5309 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 5310 return true; 5311 5312 // If masked interleaving is required, we expect that the user/target had 5313 // enabled it, because otherwise it either wouldn't have been created or 5314 // it should have been invalidated by the CostModel. 5315 assert(useMaskedInterleavedAccesses(TTI) && 5316 "Masked interleave-groups for predicated accesses are not enabled."); 5317 5318 auto *Ty = getMemInstValueType(I); 5319 const Align Alignment = getLoadStoreAlignment(I); 5320 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5321 : TTI.isLegalMaskedStore(Ty, Alignment); 5322 } 5323 5324 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5325 Instruction *I, ElementCount VF) { 5326 // Get and ensure we have a valid memory instruction. 5327 LoadInst *LI = dyn_cast<LoadInst>(I); 5328 StoreInst *SI = dyn_cast<StoreInst>(I); 5329 assert((LI || SI) && "Invalid memory instruction"); 5330 5331 auto *Ptr = getLoadStorePointerOperand(I); 5332 5333 // In order to be widened, the pointer should be consecutive, first of all. 5334 if (!Legal->isConsecutivePtr(Ptr)) 5335 return false; 5336 5337 // If the instruction is a store located in a predicated block, it will be 5338 // scalarized. 5339 if (isScalarWithPredication(I)) 5340 return false; 5341 5342 // If the instruction's allocated size doesn't equal it's type size, it 5343 // requires padding and will be scalarized. 5344 auto &DL = I->getModule()->getDataLayout(); 5345 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5346 if (hasIrregularType(ScalarTy, DL)) 5347 return false; 5348 5349 return true; 5350 } 5351 5352 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5353 // We should not collect Uniforms more than once per VF. Right now, 5354 // this function is called from collectUniformsAndScalars(), which 5355 // already does this check. Collecting Uniforms for VF=1 does not make any 5356 // sense. 5357 5358 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5359 "This function should not be visited twice for the same VF"); 5360 5361 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5362 // not analyze again. Uniforms.count(VF) will return 1. 5363 Uniforms[VF].clear(); 5364 5365 // We now know that the loop is vectorizable! 5366 // Collect instructions inside the loop that will remain uniform after 5367 // vectorization. 5368 5369 // Global values, params and instructions outside of current loop are out of 5370 // scope. 5371 auto isOutOfScope = [&](Value *V) -> bool { 5372 Instruction *I = dyn_cast<Instruction>(V); 5373 return (!I || !TheLoop->contains(I)); 5374 }; 5375 5376 SetVector<Instruction *> Worklist; 5377 BasicBlock *Latch = TheLoop->getLoopLatch(); 5378 5379 // Instructions that are scalar with predication must not be considered 5380 // uniform after vectorization, because that would create an erroneous 5381 // replicating region where only a single instance out of VF should be formed. 5382 // TODO: optimize such seldom cases if found important, see PR40816. 5383 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5384 if (isOutOfScope(I)) { 5385 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5386 << *I << "\n"); 5387 return; 5388 } 5389 if (isScalarWithPredication(I, VF)) { 5390 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5391 << *I << "\n"); 5392 return; 5393 } 5394 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5395 Worklist.insert(I); 5396 }; 5397 5398 // Start with the conditional branch. If the branch condition is an 5399 // instruction contained in the loop that is only used by the branch, it is 5400 // uniform. 5401 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5402 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5403 addToWorklistIfAllowed(Cmp); 5404 5405 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5406 InstWidening WideningDecision = getWideningDecision(I, VF); 5407 assert(WideningDecision != CM_Unknown && 5408 "Widening decision should be ready at this moment"); 5409 5410 // A uniform memory op is itself uniform. We exclude uniform stores 5411 // here as they demand the last lane, not the first one. 5412 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5413 assert(WideningDecision == CM_Scalarize); 5414 return true; 5415 } 5416 5417 return (WideningDecision == CM_Widen || 5418 WideningDecision == CM_Widen_Reverse || 5419 WideningDecision == CM_Interleave); 5420 }; 5421 5422 5423 // Returns true if Ptr is the pointer operand of a memory access instruction 5424 // I, and I is known to not require scalarization. 5425 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5426 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5427 }; 5428 5429 // Holds a list of values which are known to have at least one uniform use. 5430 // Note that there may be other uses which aren't uniform. A "uniform use" 5431 // here is something which only demands lane 0 of the unrolled iterations; 5432 // it does not imply that all lanes produce the same value (e.g. this is not 5433 // the usual meaning of uniform) 5434 SetVector<Value *> HasUniformUse; 5435 5436 // Scan the loop for instructions which are either a) known to have only 5437 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5438 for (auto *BB : TheLoop->blocks()) 5439 for (auto &I : *BB) { 5440 // If there's no pointer operand, there's nothing to do. 5441 auto *Ptr = getLoadStorePointerOperand(&I); 5442 if (!Ptr) 5443 continue; 5444 5445 // A uniform memory op is itself uniform. We exclude uniform stores 5446 // here as they demand the last lane, not the first one. 5447 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5448 addToWorklistIfAllowed(&I); 5449 5450 if (isUniformDecision(&I, VF)) { 5451 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5452 HasUniformUse.insert(Ptr); 5453 } 5454 } 5455 5456 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5457 // demanding) users. Since loops are assumed to be in LCSSA form, this 5458 // disallows uses outside the loop as well. 5459 for (auto *V : HasUniformUse) { 5460 if (isOutOfScope(V)) 5461 continue; 5462 auto *I = cast<Instruction>(V); 5463 auto UsersAreMemAccesses = 5464 llvm::all_of(I->users(), [&](User *U) -> bool { 5465 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5466 }); 5467 if (UsersAreMemAccesses) 5468 addToWorklistIfAllowed(I); 5469 } 5470 5471 // Expand Worklist in topological order: whenever a new instruction 5472 // is added , its users should be already inside Worklist. It ensures 5473 // a uniform instruction will only be used by uniform instructions. 5474 unsigned idx = 0; 5475 while (idx != Worklist.size()) { 5476 Instruction *I = Worklist[idx++]; 5477 5478 for (auto OV : I->operand_values()) { 5479 // isOutOfScope operands cannot be uniform instructions. 5480 if (isOutOfScope(OV)) 5481 continue; 5482 // First order recurrence Phi's should typically be considered 5483 // non-uniform. 5484 auto *OP = dyn_cast<PHINode>(OV); 5485 if (OP && Legal->isFirstOrderRecurrence(OP)) 5486 continue; 5487 // If all the users of the operand are uniform, then add the 5488 // operand into the uniform worklist. 5489 auto *OI = cast<Instruction>(OV); 5490 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5491 auto *J = cast<Instruction>(U); 5492 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5493 })) 5494 addToWorklistIfAllowed(OI); 5495 } 5496 } 5497 5498 // For an instruction to be added into Worklist above, all its users inside 5499 // the loop should also be in Worklist. However, this condition cannot be 5500 // true for phi nodes that form a cyclic dependence. We must process phi 5501 // nodes separately. An induction variable will remain uniform if all users 5502 // of the induction variable and induction variable update remain uniform. 5503 // The code below handles both pointer and non-pointer induction variables. 5504 for (auto &Induction : Legal->getInductionVars()) { 5505 auto *Ind = Induction.first; 5506 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5507 5508 // Determine if all users of the induction variable are uniform after 5509 // vectorization. 5510 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5511 auto *I = cast<Instruction>(U); 5512 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5513 isVectorizedMemAccessUse(I, Ind); 5514 }); 5515 if (!UniformInd) 5516 continue; 5517 5518 // Determine if all users of the induction variable update instruction are 5519 // uniform after vectorization. 5520 auto UniformIndUpdate = 5521 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5522 auto *I = cast<Instruction>(U); 5523 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5524 isVectorizedMemAccessUse(I, IndUpdate); 5525 }); 5526 if (!UniformIndUpdate) 5527 continue; 5528 5529 // The induction variable and its update instruction will remain uniform. 5530 addToWorklistIfAllowed(Ind); 5531 addToWorklistIfAllowed(IndUpdate); 5532 } 5533 5534 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5535 } 5536 5537 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5538 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5539 5540 if (Legal->getRuntimePointerChecking()->Need) { 5541 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5542 "runtime pointer checks needed. Enable vectorization of this " 5543 "loop with '#pragma clang loop vectorize(enable)' when " 5544 "compiling with -Os/-Oz", 5545 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5546 return true; 5547 } 5548 5549 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5550 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5551 "runtime SCEV checks needed. Enable vectorization of this " 5552 "loop with '#pragma clang loop vectorize(enable)' when " 5553 "compiling with -Os/-Oz", 5554 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5555 return true; 5556 } 5557 5558 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5559 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5560 reportVectorizationFailure("Runtime stride check for small trip count", 5561 "runtime stride == 1 checks needed. Enable vectorization of " 5562 "this loop without such check by compiling with -Os/-Oz", 5563 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5564 return true; 5565 } 5566 5567 return false; 5568 } 5569 5570 Optional<ElementCount> 5571 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5572 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5573 // TODO: It may by useful to do since it's still likely to be dynamically 5574 // uniform if the target can skip. 5575 reportVectorizationFailure( 5576 "Not inserting runtime ptr check for divergent target", 5577 "runtime pointer checks needed. Not enabled for divergent target", 5578 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5579 return None; 5580 } 5581 5582 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5583 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5584 if (TC == 1) { 5585 reportVectorizationFailure("Single iteration (non) loop", 5586 "loop trip count is one, irrelevant for vectorization", 5587 "SingleIterationLoop", ORE, TheLoop); 5588 return None; 5589 } 5590 5591 switch (ScalarEpilogueStatus) { 5592 case CM_ScalarEpilogueAllowed: 5593 return computeFeasibleMaxVF(TC, UserVF); 5594 case CM_ScalarEpilogueNotAllowedUsePredicate: 5595 LLVM_FALLTHROUGH; 5596 case CM_ScalarEpilogueNotNeededUsePredicate: 5597 LLVM_DEBUG( 5598 dbgs() << "LV: vector predicate hint/switch found.\n" 5599 << "LV: Not allowing scalar epilogue, creating predicated " 5600 << "vector loop.\n"); 5601 break; 5602 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5603 // fallthrough as a special case of OptForSize 5604 case CM_ScalarEpilogueNotAllowedOptSize: 5605 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5606 LLVM_DEBUG( 5607 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5608 else 5609 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5610 << "count.\n"); 5611 5612 // Bail if runtime checks are required, which are not good when optimising 5613 // for size. 5614 if (runtimeChecksRequired()) 5615 return None; 5616 5617 break; 5618 } 5619 5620 // The only loops we can vectorize without a scalar epilogue, are loops with 5621 // a bottom-test and a single exiting block. We'd have to handle the fact 5622 // that not every instruction executes on the last iteration. This will 5623 // require a lane mask which varies through the vector loop body. (TODO) 5624 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5625 // If there was a tail-folding hint/switch, but we can't fold the tail by 5626 // masking, fallback to a vectorization with a scalar epilogue. 5627 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5628 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5629 "scalar epilogue instead.\n"); 5630 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5631 return computeFeasibleMaxVF(TC, UserVF); 5632 } 5633 return None; 5634 } 5635 5636 // Now try the tail folding 5637 5638 // Invalidate interleave groups that require an epilogue if we can't mask 5639 // the interleave-group. 5640 if (!useMaskedInterleavedAccesses(TTI)) { 5641 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5642 "No decisions should have been taken at this point"); 5643 // Note: There is no need to invalidate any cost modeling decisions here, as 5644 // non where taken so far. 5645 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5646 } 5647 5648 ElementCount MaxVF = computeFeasibleMaxVF(TC, UserVF); 5649 assert(!MaxVF.isScalable() && 5650 "Scalable vectors do not yet support tail folding"); 5651 assert((UserVF.isNonZero() || isPowerOf2_32(MaxVF.getFixedValue())) && 5652 "MaxVF must be a power of 2"); 5653 unsigned MaxVFtimesIC = 5654 UserIC ? MaxVF.getFixedValue() * UserIC : MaxVF.getFixedValue(); 5655 // Avoid tail folding if the trip count is known to be a multiple of any VF we 5656 // chose. 5657 ScalarEvolution *SE = PSE.getSE(); 5658 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5659 const SCEV *ExitCount = SE->getAddExpr( 5660 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5661 const SCEV *Rem = SE->getURemExpr( 5662 SE->applyLoopGuards(ExitCount, TheLoop), 5663 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5664 if (Rem->isZero()) { 5665 // Accept MaxVF if we do not have a tail. 5666 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5667 return MaxVF; 5668 } 5669 5670 // If we don't know the precise trip count, or if the trip count that we 5671 // found modulo the vectorization factor is not zero, try to fold the tail 5672 // by masking. 5673 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5674 if (Legal->prepareToFoldTailByMasking()) { 5675 FoldTailByMasking = true; 5676 return MaxVF; 5677 } 5678 5679 // If there was a tail-folding hint/switch, but we can't fold the tail by 5680 // masking, fallback to a vectorization with a scalar epilogue. 5681 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5682 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5683 "scalar epilogue instead.\n"); 5684 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5685 return MaxVF; 5686 } 5687 5688 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5689 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5690 return None; 5691 } 5692 5693 if (TC == 0) { 5694 reportVectorizationFailure( 5695 "Unable to calculate the loop count due to complex control flow", 5696 "unable to calculate the loop count due to complex control flow", 5697 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5698 return None; 5699 } 5700 5701 reportVectorizationFailure( 5702 "Cannot optimize for size and vectorize at the same time.", 5703 "cannot optimize for size and vectorize at the same time. " 5704 "Enable vectorization of this loop with '#pragma clang loop " 5705 "vectorize(enable)' when compiling with -Os/-Oz", 5706 "NoTailLoopWithOptForSize", ORE, TheLoop); 5707 return None; 5708 } 5709 5710 ElementCount 5711 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5712 ElementCount UserVF) { 5713 bool IgnoreScalableUserVF = UserVF.isScalable() && 5714 !TTI.supportsScalableVectors() && 5715 !ForceTargetSupportsScalableVectors; 5716 if (IgnoreScalableUserVF) { 5717 LLVM_DEBUG( 5718 dbgs() << "LV: Ignoring VF=" << UserVF 5719 << " because target does not support scalable vectors.\n"); 5720 ORE->emit([&]() { 5721 return OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreScalableUserVF", 5722 TheLoop->getStartLoc(), 5723 TheLoop->getHeader()) 5724 << "Ignoring VF=" << ore::NV("UserVF", UserVF) 5725 << " because target does not support scalable vectors."; 5726 }); 5727 } 5728 5729 // Beyond this point two scenarios are handled. If UserVF isn't specified 5730 // then a suitable VF is chosen. If UserVF is specified and there are 5731 // dependencies, check if it's legal. However, if a UserVF is specified and 5732 // there are no dependencies, then there's nothing to do. 5733 if (UserVF.isNonZero() && !IgnoreScalableUserVF) { 5734 if (!canVectorizeReductions(UserVF)) { 5735 reportVectorizationFailure( 5736 "LV: Scalable vectorization not supported for the reduction " 5737 "operations found in this loop. Using fixed-width " 5738 "vectorization instead.", 5739 "Scalable vectorization not supported for the reduction operations " 5740 "found in this loop. Using fixed-width vectorization instead.", 5741 "ScalableVFUnfeasible", ORE, TheLoop); 5742 return computeFeasibleMaxVF( 5743 ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue())); 5744 } 5745 5746 if (Legal->isSafeForAnyVectorWidth()) 5747 return UserVF; 5748 } 5749 5750 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5751 unsigned SmallestType, WidestType; 5752 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5753 unsigned WidestRegister = 5754 TTI.getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 5755 .getFixedSize(); 5756 5757 // Get the maximum safe dependence distance in bits computed by LAA. 5758 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5759 // the memory accesses that is most restrictive (involved in the smallest 5760 // dependence distance). 5761 unsigned MaxSafeVectorWidthInBits = Legal->getMaxSafeVectorWidthInBits(); 5762 5763 // If the user vectorization factor is legally unsafe, clamp it to a safe 5764 // value. Otherwise, return as is. 5765 if (UserVF.isNonZero() && !IgnoreScalableUserVF) { 5766 unsigned MaxSafeElements = 5767 PowerOf2Floor(MaxSafeVectorWidthInBits / WidestType); 5768 ElementCount MaxSafeVF = ElementCount::getFixed(MaxSafeElements); 5769 5770 if (UserVF.isScalable()) { 5771 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5772 5773 // Scale VF by vscale before checking if it's safe. 5774 MaxSafeVF = ElementCount::getScalable( 5775 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5776 5777 if (MaxSafeVF.isZero()) { 5778 // The dependence distance is too small to use scalable vectors, 5779 // fallback on fixed. 5780 LLVM_DEBUG( 5781 dbgs() 5782 << "LV: Max legal vector width too small, scalable vectorization " 5783 "unfeasible. Using fixed-width vectorization instead.\n"); 5784 ORE->emit([&]() { 5785 return OptimizationRemarkAnalysis(DEBUG_TYPE, "ScalableVFUnfeasible", 5786 TheLoop->getStartLoc(), 5787 TheLoop->getHeader()) 5788 << "Max legal vector width too small, scalable vectorization " 5789 << "unfeasible. Using fixed-width vectorization instead."; 5790 }); 5791 return computeFeasibleMaxVF( 5792 ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue())); 5793 } 5794 } 5795 5796 LLVM_DEBUG(dbgs() << "LV: The max safe VF is: " << MaxSafeVF << ".\n"); 5797 5798 if (ElementCount::isKnownLE(UserVF, MaxSafeVF)) 5799 return UserVF; 5800 5801 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5802 << " is unsafe, clamping to max safe VF=" << MaxSafeVF 5803 << ".\n"); 5804 ORE->emit([&]() { 5805 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5806 TheLoop->getStartLoc(), 5807 TheLoop->getHeader()) 5808 << "User-specified vectorization factor " 5809 << ore::NV("UserVectorizationFactor", UserVF) 5810 << " is unsafe, clamping to maximum safe vectorization factor " 5811 << ore::NV("VectorizationFactor", MaxSafeVF); 5812 }); 5813 return MaxSafeVF; 5814 } 5815 5816 WidestRegister = std::min(WidestRegister, MaxSafeVectorWidthInBits); 5817 5818 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5819 // Note that both WidestRegister and WidestType may not be a powers of 2. 5820 auto MaxVectorSize = 5821 ElementCount::getFixed(PowerOf2Floor(WidestRegister / WidestType)); 5822 5823 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5824 << " / " << WidestType << " bits.\n"); 5825 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5826 << WidestRegister << " bits.\n"); 5827 5828 assert(MaxVectorSize.getFixedValue() <= WidestRegister && 5829 "Did not expect to pack so many elements" 5830 " into one vector!"); 5831 if (MaxVectorSize.getFixedValue() == 0) { 5832 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5833 return ElementCount::getFixed(1); 5834 } else if (ConstTripCount && ConstTripCount < MaxVectorSize.getFixedValue() && 5835 isPowerOf2_32(ConstTripCount)) { 5836 // We need to clamp the VF to be the ConstTripCount. There is no point in 5837 // choosing a higher viable VF as done in the loop below. 5838 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5839 << ConstTripCount << "\n"); 5840 return ElementCount::getFixed(ConstTripCount); 5841 } 5842 5843 ElementCount MaxVF = MaxVectorSize; 5844 if (TTI.shouldMaximizeVectorBandwidth() || 5845 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5846 // Collect all viable vectorization factors larger than the default MaxVF 5847 // (i.e. MaxVectorSize). 5848 SmallVector<ElementCount, 8> VFs; 5849 auto MaxVectorSizeMaxBW = 5850 ElementCount::getFixed(WidestRegister / SmallestType); 5851 for (ElementCount VS = MaxVectorSize * 2; 5852 ElementCount::isKnownLE(VS, MaxVectorSizeMaxBW); VS *= 2) 5853 VFs.push_back(VS); 5854 5855 // For each VF calculate its register usage. 5856 auto RUs = calculateRegisterUsage(VFs); 5857 5858 // Select the largest VF which doesn't require more registers than existing 5859 // ones. 5860 for (int i = RUs.size() - 1; i >= 0; --i) { 5861 bool Selected = true; 5862 for (auto &pair : RUs[i].MaxLocalUsers) { 5863 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5864 if (pair.second > TargetNumRegisters) 5865 Selected = false; 5866 } 5867 if (Selected) { 5868 MaxVF = VFs[i]; 5869 break; 5870 } 5871 } 5872 if (ElementCount MinVF = 5873 TTI.getMinimumVF(SmallestType, /*IsScalable=*/false)) { 5874 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5875 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5876 << ") with target's minimum: " << MinVF << '\n'); 5877 MaxVF = MinVF; 5878 } 5879 } 5880 } 5881 return MaxVF; 5882 } 5883 5884 bool LoopVectorizationCostModel::isMoreProfitable( 5885 const VectorizationFactor &A, const VectorizationFactor &B) const { 5886 InstructionCost::CostType CostA = *A.Cost.getValue(); 5887 InstructionCost::CostType CostB = *B.Cost.getValue(); 5888 5889 // To avoid the need for FP division: 5890 // (CostA / A.Width) < (CostB / B.Width) 5891 // <=> (CostA * B.Width) < (CostB * A.Width) 5892 return (CostA * B.Width.getKnownMinValue()) < 5893 (CostB * A.Width.getKnownMinValue()); 5894 } 5895 5896 VectorizationFactor 5897 LoopVectorizationCostModel::selectVectorizationFactor(ElementCount MaxVF) { 5898 // FIXME: This can be fixed for scalable vectors later, because at this stage 5899 // the LoopVectorizer will only consider vectorizing a loop with scalable 5900 // vectors when the loop has a hint to enable vectorization for a given VF. 5901 assert(!MaxVF.isScalable() && "scalable vectors not yet supported"); 5902 5903 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5904 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5905 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5906 5907 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 5908 VectorizationFactor ChosenFactor = ScalarCost; 5909 5910 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5911 if (ForceVectorization && MaxVF.isVector()) { 5912 // Ignore scalar width, because the user explicitly wants vectorization. 5913 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5914 // evaluation. 5915 ChosenFactor.Cost = std::numeric_limits<InstructionCost::CostType>::max(); 5916 } 5917 5918 for (auto i = ElementCount::getFixed(2); ElementCount::isKnownLE(i, MaxVF); 5919 i *= 2) { 5920 // Notice that the vector loop needs to be executed less times, so 5921 // we need to divide the cost of the vector loops by the width of 5922 // the vector elements. 5923 VectorizationCostTy C = expectedCost(i); 5924 5925 assert(C.first.isValid() && "Unexpected invalid cost for vector loop"); 5926 VectorizationFactor Candidate(i, C.first); 5927 LLVM_DEBUG( 5928 dbgs() << "LV: Vector loop of width " << i << " costs: " 5929 << (*Candidate.Cost.getValue() / Candidate.Width.getFixedValue()) 5930 << ".\n"); 5931 5932 if (!C.second && !ForceVectorization) { 5933 LLVM_DEBUG( 5934 dbgs() << "LV: Not considering vector loop of width " << i 5935 << " because it will not generate any vector instructions.\n"); 5936 continue; 5937 } 5938 5939 // If profitable add it to ProfitableVF list. 5940 if (isMoreProfitable(Candidate, ScalarCost)) 5941 ProfitableVFs.push_back(Candidate); 5942 5943 if (isMoreProfitable(Candidate, ChosenFactor)) 5944 ChosenFactor = Candidate; 5945 } 5946 5947 if (!EnableCondStoresVectorization && NumPredStores) { 5948 reportVectorizationFailure("There are conditional stores.", 5949 "store that is conditionally executed prevents vectorization", 5950 "ConditionalStore", ORE, TheLoop); 5951 ChosenFactor = ScalarCost; 5952 } 5953 5954 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 5955 *ChosenFactor.Cost.getValue() >= *ScalarCost.Cost.getValue()) 5956 dbgs() 5957 << "LV: Vectorization seems to be not beneficial, " 5958 << "but was forced by a user.\n"); 5959 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 5960 return ChosenFactor; 5961 } 5962 5963 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5964 const Loop &L, ElementCount VF) const { 5965 // Cross iteration phis such as reductions need special handling and are 5966 // currently unsupported. 5967 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 5968 return Legal->isFirstOrderRecurrence(&Phi) || 5969 Legal->isReductionVariable(&Phi); 5970 })) 5971 return false; 5972 5973 // Phis with uses outside of the loop require special handling and are 5974 // currently unsupported. 5975 for (auto &Entry : Legal->getInductionVars()) { 5976 // Look for uses of the value of the induction at the last iteration. 5977 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5978 for (User *U : PostInc->users()) 5979 if (!L.contains(cast<Instruction>(U))) 5980 return false; 5981 // Look for uses of penultimate value of the induction. 5982 for (User *U : Entry.first->users()) 5983 if (!L.contains(cast<Instruction>(U))) 5984 return false; 5985 } 5986 5987 // Induction variables that are widened require special handling that is 5988 // currently not supported. 5989 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5990 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5991 this->isProfitableToScalarize(Entry.first, VF)); 5992 })) 5993 return false; 5994 5995 return true; 5996 } 5997 5998 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5999 const ElementCount VF) const { 6000 // FIXME: We need a much better cost-model to take different parameters such 6001 // as register pressure, code size increase and cost of extra branches into 6002 // account. For now we apply a very crude heuristic and only consider loops 6003 // with vectorization factors larger than a certain value. 6004 // We also consider epilogue vectorization unprofitable for targets that don't 6005 // consider interleaving beneficial (eg. MVE). 6006 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 6007 return false; 6008 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 6009 return true; 6010 return false; 6011 } 6012 6013 VectorizationFactor 6014 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 6015 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 6016 VectorizationFactor Result = VectorizationFactor::Disabled(); 6017 if (!EnableEpilogueVectorization) { 6018 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 6019 return Result; 6020 } 6021 6022 if (!isScalarEpilogueAllowed()) { 6023 LLVM_DEBUG( 6024 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 6025 "allowed.\n";); 6026 return Result; 6027 } 6028 6029 // FIXME: This can be fixed for scalable vectors later, because at this stage 6030 // the LoopVectorizer will only consider vectorizing a loop with scalable 6031 // vectors when the loop has a hint to enable vectorization for a given VF. 6032 if (MainLoopVF.isScalable()) { 6033 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not " 6034 "yet supported.\n"); 6035 return Result; 6036 } 6037 6038 // Not really a cost consideration, but check for unsupported cases here to 6039 // simplify the logic. 6040 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 6041 LLVM_DEBUG( 6042 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 6043 "not a supported candidate.\n";); 6044 return Result; 6045 } 6046 6047 if (EpilogueVectorizationForceVF > 1) { 6048 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 6049 if (LVP.hasPlanWithVFs( 6050 {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)})) 6051 return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0}; 6052 else { 6053 LLVM_DEBUG( 6054 dbgs() 6055 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 6056 return Result; 6057 } 6058 } 6059 6060 if (TheLoop->getHeader()->getParent()->hasOptSize() || 6061 TheLoop->getHeader()->getParent()->hasMinSize()) { 6062 LLVM_DEBUG( 6063 dbgs() 6064 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 6065 return Result; 6066 } 6067 6068 if (!isEpilogueVectorizationProfitable(MainLoopVF)) 6069 return Result; 6070 6071 for (auto &NextVF : ProfitableVFs) 6072 if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) && 6073 (Result.Width.getFixedValue() == 1 || 6074 isMoreProfitable(NextVF, Result)) && 6075 LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width})) 6076 Result = NextVF; 6077 6078 if (Result != VectorizationFactor::Disabled()) 6079 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 6080 << Result.Width.getFixedValue() << "\n";); 6081 return Result; 6082 } 6083 6084 std::pair<unsigned, unsigned> 6085 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6086 unsigned MinWidth = -1U; 6087 unsigned MaxWidth = 8; 6088 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6089 6090 // For each block. 6091 for (BasicBlock *BB : TheLoop->blocks()) { 6092 // For each instruction in the loop. 6093 for (Instruction &I : BB->instructionsWithoutDebug()) { 6094 Type *T = I.getType(); 6095 6096 // Skip ignored values. 6097 if (ValuesToIgnore.count(&I)) 6098 continue; 6099 6100 // Only examine Loads, Stores and PHINodes. 6101 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6102 continue; 6103 6104 // Examine PHI nodes that are reduction variables. Update the type to 6105 // account for the recurrence type. 6106 if (auto *PN = dyn_cast<PHINode>(&I)) { 6107 if (!Legal->isReductionVariable(PN)) 6108 continue; 6109 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN]; 6110 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 6111 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 6112 RdxDesc.getRecurrenceType(), 6113 TargetTransformInfo::ReductionFlags())) 6114 continue; 6115 T = RdxDesc.getRecurrenceType(); 6116 } 6117 6118 // Examine the stored values. 6119 if (auto *ST = dyn_cast<StoreInst>(&I)) 6120 T = ST->getValueOperand()->getType(); 6121 6122 // Ignore loaded pointer types and stored pointer types that are not 6123 // vectorizable. 6124 // 6125 // FIXME: The check here attempts to predict whether a load or store will 6126 // be vectorized. We only know this for certain after a VF has 6127 // been selected. Here, we assume that if an access can be 6128 // vectorized, it will be. We should also look at extending this 6129 // optimization to non-pointer types. 6130 // 6131 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6132 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 6133 continue; 6134 6135 MinWidth = std::min(MinWidth, 6136 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6137 MaxWidth = std::max(MaxWidth, 6138 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6139 } 6140 } 6141 6142 return {MinWidth, MaxWidth}; 6143 } 6144 6145 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 6146 unsigned LoopCost) { 6147 // -- The interleave heuristics -- 6148 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6149 // There are many micro-architectural considerations that we can't predict 6150 // at this level. For example, frontend pressure (on decode or fetch) due to 6151 // code size, or the number and capabilities of the execution ports. 6152 // 6153 // We use the following heuristics to select the interleave count: 6154 // 1. If the code has reductions, then we interleave to break the cross 6155 // iteration dependency. 6156 // 2. If the loop is really small, then we interleave to reduce the loop 6157 // overhead. 6158 // 3. We don't interleave if we think that we will spill registers to memory 6159 // due to the increased register pressure. 6160 6161 if (!isScalarEpilogueAllowed()) 6162 return 1; 6163 6164 // We used the distance for the interleave count. 6165 if (Legal->getMaxSafeDepDistBytes() != -1U) 6166 return 1; 6167 6168 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6169 const bool HasReductions = !Legal->getReductionVars().empty(); 6170 // Do not interleave loops with a relatively small known or estimated trip 6171 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6172 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6173 // because with the above conditions interleaving can expose ILP and break 6174 // cross iteration dependences for reductions. 6175 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6176 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6177 return 1; 6178 6179 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6180 // We divide by these constants so assume that we have at least one 6181 // instruction that uses at least one register. 6182 for (auto& pair : R.MaxLocalUsers) { 6183 pair.second = std::max(pair.second, 1U); 6184 } 6185 6186 // We calculate the interleave count using the following formula. 6187 // Subtract the number of loop invariants from the number of available 6188 // registers. These registers are used by all of the interleaved instances. 6189 // Next, divide the remaining registers by the number of registers that is 6190 // required by the loop, in order to estimate how many parallel instances 6191 // fit without causing spills. All of this is rounded down if necessary to be 6192 // a power of two. We want power of two interleave count to simplify any 6193 // addressing operations or alignment considerations. 6194 // We also want power of two interleave counts to ensure that the induction 6195 // variable of the vector loop wraps to zero, when tail is folded by masking; 6196 // this currently happens when OptForSize, in which case IC is set to 1 above. 6197 unsigned IC = UINT_MAX; 6198 6199 for (auto& pair : R.MaxLocalUsers) { 6200 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6201 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6202 << " registers of " 6203 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6204 if (VF.isScalar()) { 6205 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6206 TargetNumRegisters = ForceTargetNumScalarRegs; 6207 } else { 6208 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6209 TargetNumRegisters = ForceTargetNumVectorRegs; 6210 } 6211 unsigned MaxLocalUsers = pair.second; 6212 unsigned LoopInvariantRegs = 0; 6213 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6214 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6215 6216 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6217 // Don't count the induction variable as interleaved. 6218 if (EnableIndVarRegisterHeur) { 6219 TmpIC = 6220 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6221 std::max(1U, (MaxLocalUsers - 1))); 6222 } 6223 6224 IC = std::min(IC, TmpIC); 6225 } 6226 6227 // Clamp the interleave ranges to reasonable counts. 6228 unsigned MaxInterleaveCount = 6229 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6230 6231 // Check if the user has overridden the max. 6232 if (VF.isScalar()) { 6233 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6234 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6235 } else { 6236 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6237 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6238 } 6239 6240 // If trip count is known or estimated compile time constant, limit the 6241 // interleave count to be less than the trip count divided by VF, provided it 6242 // is at least 1. 6243 // 6244 // For scalable vectors we can't know if interleaving is beneficial. It may 6245 // not be beneficial for small loops if none of the lanes in the second vector 6246 // iterations is enabled. However, for larger loops, there is likely to be a 6247 // similar benefit as for fixed-width vectors. For now, we choose to leave 6248 // the InterleaveCount as if vscale is '1', although if some information about 6249 // the vector is known (e.g. min vector size), we can make a better decision. 6250 if (BestKnownTC) { 6251 MaxInterleaveCount = 6252 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6253 // Make sure MaxInterleaveCount is greater than 0. 6254 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6255 } 6256 6257 assert(MaxInterleaveCount > 0 && 6258 "Maximum interleave count must be greater than 0"); 6259 6260 // Clamp the calculated IC to be between the 1 and the max interleave count 6261 // that the target and trip count allows. 6262 if (IC > MaxInterleaveCount) 6263 IC = MaxInterleaveCount; 6264 else 6265 // Make sure IC is greater than 0. 6266 IC = std::max(1u, IC); 6267 6268 assert(IC > 0 && "Interleave count must be greater than 0."); 6269 6270 // If we did not calculate the cost for VF (because the user selected the VF) 6271 // then we calculate the cost of VF here. 6272 if (LoopCost == 0) { 6273 assert(expectedCost(VF).first.isValid() && "Expected a valid cost"); 6274 LoopCost = *expectedCost(VF).first.getValue(); 6275 } 6276 6277 assert(LoopCost && "Non-zero loop cost expected"); 6278 6279 // Interleave if we vectorized this loop and there is a reduction that could 6280 // benefit from interleaving. 6281 if (VF.isVector() && HasReductions) { 6282 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6283 return IC; 6284 } 6285 6286 // Note that if we've already vectorized the loop we will have done the 6287 // runtime check and so interleaving won't require further checks. 6288 bool InterleavingRequiresRuntimePointerCheck = 6289 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6290 6291 // We want to interleave small loops in order to reduce the loop overhead and 6292 // potentially expose ILP opportunities. 6293 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6294 << "LV: IC is " << IC << '\n' 6295 << "LV: VF is " << VF << '\n'); 6296 const bool AggressivelyInterleaveReductions = 6297 TTI.enableAggressiveInterleaving(HasReductions); 6298 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6299 // We assume that the cost overhead is 1 and we use the cost model 6300 // to estimate the cost of the loop and interleave until the cost of the 6301 // loop overhead is about 5% of the cost of the loop. 6302 unsigned SmallIC = 6303 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6304 6305 // Interleave until store/load ports (estimated by max interleave count) are 6306 // saturated. 6307 unsigned NumStores = Legal->getNumStores(); 6308 unsigned NumLoads = Legal->getNumLoads(); 6309 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6310 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6311 6312 // If we have a scalar reduction (vector reductions are already dealt with 6313 // by this point), we can increase the critical path length if the loop 6314 // we're interleaving is inside another loop. Limit, by default to 2, so the 6315 // critical path only gets increased by one reduction operation. 6316 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6317 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6318 SmallIC = std::min(SmallIC, F); 6319 StoresIC = std::min(StoresIC, F); 6320 LoadsIC = std::min(LoadsIC, F); 6321 } 6322 6323 if (EnableLoadStoreRuntimeInterleave && 6324 std::max(StoresIC, LoadsIC) > SmallIC) { 6325 LLVM_DEBUG( 6326 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6327 return std::max(StoresIC, LoadsIC); 6328 } 6329 6330 // If there are scalar reductions and TTI has enabled aggressive 6331 // interleaving for reductions, we will interleave to expose ILP. 6332 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6333 AggressivelyInterleaveReductions) { 6334 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6335 // Interleave no less than SmallIC but not as aggressive as the normal IC 6336 // to satisfy the rare situation when resources are too limited. 6337 return std::max(IC / 2, SmallIC); 6338 } else { 6339 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6340 return SmallIC; 6341 } 6342 } 6343 6344 // Interleave if this is a large loop (small loops are already dealt with by 6345 // this point) that could benefit from interleaving. 6346 if (AggressivelyInterleaveReductions) { 6347 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6348 return IC; 6349 } 6350 6351 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6352 return 1; 6353 } 6354 6355 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6356 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6357 // This function calculates the register usage by measuring the highest number 6358 // of values that are alive at a single location. Obviously, this is a very 6359 // rough estimation. We scan the loop in a topological order in order and 6360 // assign a number to each instruction. We use RPO to ensure that defs are 6361 // met before their users. We assume that each instruction that has in-loop 6362 // users starts an interval. We record every time that an in-loop value is 6363 // used, so we have a list of the first and last occurrences of each 6364 // instruction. Next, we transpose this data structure into a multi map that 6365 // holds the list of intervals that *end* at a specific location. This multi 6366 // map allows us to perform a linear search. We scan the instructions linearly 6367 // and record each time that a new interval starts, by placing it in a set. 6368 // If we find this value in the multi-map then we remove it from the set. 6369 // The max register usage is the maximum size of the set. 6370 // We also search for instructions that are defined outside the loop, but are 6371 // used inside the loop. We need this number separately from the max-interval 6372 // usage number because when we unroll, loop-invariant values do not take 6373 // more register. 6374 LoopBlocksDFS DFS(TheLoop); 6375 DFS.perform(LI); 6376 6377 RegisterUsage RU; 6378 6379 // Each 'key' in the map opens a new interval. The values 6380 // of the map are the index of the 'last seen' usage of the 6381 // instruction that is the key. 6382 using IntervalMap = DenseMap<Instruction *, unsigned>; 6383 6384 // Maps instruction to its index. 6385 SmallVector<Instruction *, 64> IdxToInstr; 6386 // Marks the end of each interval. 6387 IntervalMap EndPoint; 6388 // Saves the list of instruction indices that are used in the loop. 6389 SmallPtrSet<Instruction *, 8> Ends; 6390 // Saves the list of values that are used in the loop but are 6391 // defined outside the loop, such as arguments and constants. 6392 SmallPtrSet<Value *, 8> LoopInvariants; 6393 6394 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6395 for (Instruction &I : BB->instructionsWithoutDebug()) { 6396 IdxToInstr.push_back(&I); 6397 6398 // Save the end location of each USE. 6399 for (Value *U : I.operands()) { 6400 auto *Instr = dyn_cast<Instruction>(U); 6401 6402 // Ignore non-instruction values such as arguments, constants, etc. 6403 if (!Instr) 6404 continue; 6405 6406 // If this instruction is outside the loop then record it and continue. 6407 if (!TheLoop->contains(Instr)) { 6408 LoopInvariants.insert(Instr); 6409 continue; 6410 } 6411 6412 // Overwrite previous end points. 6413 EndPoint[Instr] = IdxToInstr.size(); 6414 Ends.insert(Instr); 6415 } 6416 } 6417 } 6418 6419 // Saves the list of intervals that end with the index in 'key'. 6420 using InstrList = SmallVector<Instruction *, 2>; 6421 DenseMap<unsigned, InstrList> TransposeEnds; 6422 6423 // Transpose the EndPoints to a list of values that end at each index. 6424 for (auto &Interval : EndPoint) 6425 TransposeEnds[Interval.second].push_back(Interval.first); 6426 6427 SmallPtrSet<Instruction *, 8> OpenIntervals; 6428 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6429 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6430 6431 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6432 6433 // A lambda that gets the register usage for the given type and VF. 6434 const auto &TTICapture = TTI; 6435 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) { 6436 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6437 return 0U; 6438 return TTICapture.getRegUsageForType(VectorType::get(Ty, VF)); 6439 }; 6440 6441 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6442 Instruction *I = IdxToInstr[i]; 6443 6444 // Remove all of the instructions that end at this location. 6445 InstrList &List = TransposeEnds[i]; 6446 for (Instruction *ToRemove : List) 6447 OpenIntervals.erase(ToRemove); 6448 6449 // Ignore instructions that are never used within the loop. 6450 if (!Ends.count(I)) 6451 continue; 6452 6453 // Skip ignored values. 6454 if (ValuesToIgnore.count(I)) 6455 continue; 6456 6457 // For each VF find the maximum usage of registers. 6458 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6459 // Count the number of live intervals. 6460 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6461 6462 if (VFs[j].isScalar()) { 6463 for (auto Inst : OpenIntervals) { 6464 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6465 if (RegUsage.find(ClassID) == RegUsage.end()) 6466 RegUsage[ClassID] = 1; 6467 else 6468 RegUsage[ClassID] += 1; 6469 } 6470 } else { 6471 collectUniformsAndScalars(VFs[j]); 6472 for (auto Inst : OpenIntervals) { 6473 // Skip ignored values for VF > 1. 6474 if (VecValuesToIgnore.count(Inst)) 6475 continue; 6476 if (isScalarAfterVectorization(Inst, VFs[j])) { 6477 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6478 if (RegUsage.find(ClassID) == RegUsage.end()) 6479 RegUsage[ClassID] = 1; 6480 else 6481 RegUsage[ClassID] += 1; 6482 } else { 6483 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6484 if (RegUsage.find(ClassID) == RegUsage.end()) 6485 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6486 else 6487 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6488 } 6489 } 6490 } 6491 6492 for (auto& pair : RegUsage) { 6493 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6494 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6495 else 6496 MaxUsages[j][pair.first] = pair.second; 6497 } 6498 } 6499 6500 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6501 << OpenIntervals.size() << '\n'); 6502 6503 // Add the current instruction to the list of open intervals. 6504 OpenIntervals.insert(I); 6505 } 6506 6507 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6508 SmallMapVector<unsigned, unsigned, 4> Invariant; 6509 6510 for (auto Inst : LoopInvariants) { 6511 unsigned Usage = 6512 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6513 unsigned ClassID = 6514 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6515 if (Invariant.find(ClassID) == Invariant.end()) 6516 Invariant[ClassID] = Usage; 6517 else 6518 Invariant[ClassID] += Usage; 6519 } 6520 6521 LLVM_DEBUG({ 6522 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6523 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6524 << " item\n"; 6525 for (const auto &pair : MaxUsages[i]) { 6526 dbgs() << "LV(REG): RegisterClass: " 6527 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6528 << " registers\n"; 6529 } 6530 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6531 << " item\n"; 6532 for (const auto &pair : Invariant) { 6533 dbgs() << "LV(REG): RegisterClass: " 6534 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6535 << " registers\n"; 6536 } 6537 }); 6538 6539 RU.LoopInvariantRegs = Invariant; 6540 RU.MaxLocalUsers = MaxUsages[i]; 6541 RUs[i] = RU; 6542 } 6543 6544 return RUs; 6545 } 6546 6547 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6548 // TODO: Cost model for emulated masked load/store is completely 6549 // broken. This hack guides the cost model to use an artificially 6550 // high enough value to practically disable vectorization with such 6551 // operations, except where previously deployed legality hack allowed 6552 // using very low cost values. This is to avoid regressions coming simply 6553 // from moving "masked load/store" check from legality to cost model. 6554 // Masked Load/Gather emulation was previously never allowed. 6555 // Limited number of Masked Store/Scatter emulation was allowed. 6556 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 6557 return isa<LoadInst>(I) || 6558 (isa<StoreInst>(I) && 6559 NumPredStores > NumberOfStoresToPredicate); 6560 } 6561 6562 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6563 // If we aren't vectorizing the loop, or if we've already collected the 6564 // instructions to scalarize, there's nothing to do. Collection may already 6565 // have occurred if we have a user-selected VF and are now computing the 6566 // expected cost for interleaving. 6567 if (VF.isScalar() || VF.isZero() || 6568 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6569 return; 6570 6571 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6572 // not profitable to scalarize any instructions, the presence of VF in the 6573 // map will indicate that we've analyzed it already. 6574 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6575 6576 // Find all the instructions that are scalar with predication in the loop and 6577 // determine if it would be better to not if-convert the blocks they are in. 6578 // If so, we also record the instructions to scalarize. 6579 for (BasicBlock *BB : TheLoop->blocks()) { 6580 if (!blockNeedsPredication(BB)) 6581 continue; 6582 for (Instruction &I : *BB) 6583 if (isScalarWithPredication(&I)) { 6584 ScalarCostsTy ScalarCosts; 6585 // Do not apply discount logic if hacked cost is needed 6586 // for emulated masked memrefs. 6587 if (!useEmulatedMaskMemRefHack(&I) && 6588 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6589 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6590 // Remember that BB will remain after vectorization. 6591 PredicatedBBsAfterVectorization.insert(BB); 6592 } 6593 } 6594 } 6595 6596 int LoopVectorizationCostModel::computePredInstDiscount( 6597 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6598 assert(!isUniformAfterVectorization(PredInst, VF) && 6599 "Instruction marked uniform-after-vectorization will be predicated"); 6600 6601 // Initialize the discount to zero, meaning that the scalar version and the 6602 // vector version cost the same. 6603 InstructionCost Discount = 0; 6604 6605 // Holds instructions to analyze. The instructions we visit are mapped in 6606 // ScalarCosts. Those instructions are the ones that would be scalarized if 6607 // we find that the scalar version costs less. 6608 SmallVector<Instruction *, 8> Worklist; 6609 6610 // Returns true if the given instruction can be scalarized. 6611 auto canBeScalarized = [&](Instruction *I) -> bool { 6612 // We only attempt to scalarize instructions forming a single-use chain 6613 // from the original predicated block that would otherwise be vectorized. 6614 // Although not strictly necessary, we give up on instructions we know will 6615 // already be scalar to avoid traversing chains that are unlikely to be 6616 // beneficial. 6617 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6618 isScalarAfterVectorization(I, VF)) 6619 return false; 6620 6621 // If the instruction is scalar with predication, it will be analyzed 6622 // separately. We ignore it within the context of PredInst. 6623 if (isScalarWithPredication(I)) 6624 return false; 6625 6626 // If any of the instruction's operands are uniform after vectorization, 6627 // the instruction cannot be scalarized. This prevents, for example, a 6628 // masked load from being scalarized. 6629 // 6630 // We assume we will only emit a value for lane zero of an instruction 6631 // marked uniform after vectorization, rather than VF identical values. 6632 // Thus, if we scalarize an instruction that uses a uniform, we would 6633 // create uses of values corresponding to the lanes we aren't emitting code 6634 // for. This behavior can be changed by allowing getScalarValue to clone 6635 // the lane zero values for uniforms rather than asserting. 6636 for (Use &U : I->operands()) 6637 if (auto *J = dyn_cast<Instruction>(U.get())) 6638 if (isUniformAfterVectorization(J, VF)) 6639 return false; 6640 6641 // Otherwise, we can scalarize the instruction. 6642 return true; 6643 }; 6644 6645 // Compute the expected cost discount from scalarizing the entire expression 6646 // feeding the predicated instruction. We currently only consider expressions 6647 // that are single-use instruction chains. 6648 Worklist.push_back(PredInst); 6649 while (!Worklist.empty()) { 6650 Instruction *I = Worklist.pop_back_val(); 6651 6652 // If we've already analyzed the instruction, there's nothing to do. 6653 if (ScalarCosts.find(I) != ScalarCosts.end()) 6654 continue; 6655 6656 // Compute the cost of the vector instruction. Note that this cost already 6657 // includes the scalarization overhead of the predicated instruction. 6658 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6659 6660 // Compute the cost of the scalarized instruction. This cost is the cost of 6661 // the instruction as if it wasn't if-converted and instead remained in the 6662 // predicated block. We will scale this cost by block probability after 6663 // computing the scalarization overhead. 6664 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6665 InstructionCost ScalarCost = 6666 VF.getKnownMinValue() * 6667 getInstructionCost(I, ElementCount::getFixed(1)).first; 6668 6669 // Compute the scalarization overhead of needed insertelement instructions 6670 // and phi nodes. 6671 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6672 ScalarCost += TTI.getScalarizationOverhead( 6673 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6674 APInt::getAllOnesValue(VF.getKnownMinValue()), true, false); 6675 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6676 ScalarCost += 6677 VF.getKnownMinValue() * 6678 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6679 } 6680 6681 // Compute the scalarization overhead of needed extractelement 6682 // instructions. For each of the instruction's operands, if the operand can 6683 // be scalarized, add it to the worklist; otherwise, account for the 6684 // overhead. 6685 for (Use &U : I->operands()) 6686 if (auto *J = dyn_cast<Instruction>(U.get())) { 6687 assert(VectorType::isValidElementType(J->getType()) && 6688 "Instruction has non-scalar type"); 6689 if (canBeScalarized(J)) 6690 Worklist.push_back(J); 6691 else if (needsExtract(J, VF)) { 6692 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6693 ScalarCost += TTI.getScalarizationOverhead( 6694 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6695 APInt::getAllOnesValue(VF.getKnownMinValue()), false, true); 6696 } 6697 } 6698 6699 // Scale the total scalar cost by block probability. 6700 ScalarCost /= getReciprocalPredBlockProb(); 6701 6702 // Compute the discount. A non-negative discount means the vector version 6703 // of the instruction costs more, and scalarizing would be beneficial. 6704 Discount += VectorCost - ScalarCost; 6705 ScalarCosts[I] = ScalarCost; 6706 } 6707 6708 return *Discount.getValue(); 6709 } 6710 6711 LoopVectorizationCostModel::VectorizationCostTy 6712 LoopVectorizationCostModel::expectedCost(ElementCount VF) { 6713 VectorizationCostTy Cost; 6714 6715 // For each block. 6716 for (BasicBlock *BB : TheLoop->blocks()) { 6717 VectorizationCostTy BlockCost; 6718 6719 // For each instruction in the old loop. 6720 for (Instruction &I : BB->instructionsWithoutDebug()) { 6721 // Skip ignored values. 6722 if (ValuesToIgnore.count(&I) || 6723 (VF.isVector() && VecValuesToIgnore.count(&I))) 6724 continue; 6725 6726 VectorizationCostTy C = getInstructionCost(&I, VF); 6727 6728 // Check if we should override the cost. 6729 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6730 C.first = InstructionCost(ForceTargetInstructionCost); 6731 6732 BlockCost.first += C.first; 6733 BlockCost.second |= C.second; 6734 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6735 << " for VF " << VF << " For instruction: " << I 6736 << '\n'); 6737 } 6738 6739 // If we are vectorizing a predicated block, it will have been 6740 // if-converted. This means that the block's instructions (aside from 6741 // stores and instructions that may divide by zero) will now be 6742 // unconditionally executed. For the scalar case, we may not always execute 6743 // the predicated block, if it is an if-else block. Thus, scale the block's 6744 // cost by the probability of executing it. blockNeedsPredication from 6745 // Legal is used so as to not include all blocks in tail folded loops. 6746 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6747 BlockCost.first /= getReciprocalPredBlockProb(); 6748 6749 Cost.first += BlockCost.first; 6750 Cost.second |= BlockCost.second; 6751 } 6752 6753 return Cost; 6754 } 6755 6756 /// Gets Address Access SCEV after verifying that the access pattern 6757 /// is loop invariant except the induction variable dependence. 6758 /// 6759 /// This SCEV can be sent to the Target in order to estimate the address 6760 /// calculation cost. 6761 static const SCEV *getAddressAccessSCEV( 6762 Value *Ptr, 6763 LoopVectorizationLegality *Legal, 6764 PredicatedScalarEvolution &PSE, 6765 const Loop *TheLoop) { 6766 6767 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6768 if (!Gep) 6769 return nullptr; 6770 6771 // We are looking for a gep with all loop invariant indices except for one 6772 // which should be an induction variable. 6773 auto SE = PSE.getSE(); 6774 unsigned NumOperands = Gep->getNumOperands(); 6775 for (unsigned i = 1; i < NumOperands; ++i) { 6776 Value *Opd = Gep->getOperand(i); 6777 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6778 !Legal->isInductionVariable(Opd)) 6779 return nullptr; 6780 } 6781 6782 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6783 return PSE.getSCEV(Ptr); 6784 } 6785 6786 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6787 return Legal->hasStride(I->getOperand(0)) || 6788 Legal->hasStride(I->getOperand(1)); 6789 } 6790 6791 InstructionCost 6792 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6793 ElementCount VF) { 6794 assert(VF.isVector() && 6795 "Scalarization cost of instruction implies vectorization."); 6796 if (VF.isScalable()) 6797 return InstructionCost::getInvalid(); 6798 6799 Type *ValTy = getMemInstValueType(I); 6800 auto SE = PSE.getSE(); 6801 6802 unsigned AS = getLoadStoreAddressSpace(I); 6803 Value *Ptr = getLoadStorePointerOperand(I); 6804 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6805 6806 // Figure out whether the access is strided and get the stride value 6807 // if it's known in compile time 6808 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6809 6810 // Get the cost of the scalar memory instruction and address computation. 6811 InstructionCost Cost = 6812 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6813 6814 // Don't pass *I here, since it is scalar but will actually be part of a 6815 // vectorized loop where the user of it is a vectorized instruction. 6816 const Align Alignment = getLoadStoreAlignment(I); 6817 Cost += VF.getKnownMinValue() * 6818 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6819 AS, TTI::TCK_RecipThroughput); 6820 6821 // Get the overhead of the extractelement and insertelement instructions 6822 // we might create due to scalarization. 6823 Cost += getScalarizationOverhead(I, VF); 6824 6825 // If we have a predicated load/store, it will need extra i1 extracts and 6826 // conditional branches, but may not be executed for each vector lane. Scale 6827 // the cost by the probability of executing the predicated block. 6828 if (isPredicatedInst(I)) { 6829 Cost /= getReciprocalPredBlockProb(); 6830 6831 // Add the cost of an i1 extract and a branch 6832 auto *Vec_i1Ty = 6833 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6834 Cost += TTI.getScalarizationOverhead( 6835 Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), 6836 /*Insert=*/false, /*Extract=*/true); 6837 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6838 6839 if (useEmulatedMaskMemRefHack(I)) 6840 // Artificially setting to a high enough value to practically disable 6841 // vectorization with such operations. 6842 Cost = 3000000; 6843 } 6844 6845 return Cost; 6846 } 6847 6848 InstructionCost 6849 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6850 ElementCount VF) { 6851 Type *ValTy = getMemInstValueType(I); 6852 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6853 Value *Ptr = getLoadStorePointerOperand(I); 6854 unsigned AS = getLoadStoreAddressSpace(I); 6855 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6856 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6857 6858 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6859 "Stride should be 1 or -1 for consecutive memory access"); 6860 const Align Alignment = getLoadStoreAlignment(I); 6861 InstructionCost Cost = 0; 6862 if (Legal->isMaskRequired(I)) 6863 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6864 CostKind); 6865 else 6866 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6867 CostKind, I); 6868 6869 bool Reverse = ConsecutiveStride < 0; 6870 if (Reverse) 6871 Cost += 6872 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6873 return Cost; 6874 } 6875 6876 InstructionCost 6877 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6878 ElementCount VF) { 6879 assert(Legal->isUniformMemOp(*I)); 6880 6881 Type *ValTy = getMemInstValueType(I); 6882 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6883 const Align Alignment = getLoadStoreAlignment(I); 6884 unsigned AS = getLoadStoreAddressSpace(I); 6885 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6886 if (isa<LoadInst>(I)) { 6887 return TTI.getAddressComputationCost(ValTy) + 6888 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6889 CostKind) + 6890 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6891 } 6892 StoreInst *SI = cast<StoreInst>(I); 6893 6894 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6895 return TTI.getAddressComputationCost(ValTy) + 6896 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6897 CostKind) + 6898 (isLoopInvariantStoreValue 6899 ? 0 6900 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6901 VF.getKnownMinValue() - 1)); 6902 } 6903 6904 InstructionCost 6905 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6906 ElementCount VF) { 6907 Type *ValTy = getMemInstValueType(I); 6908 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6909 const Align Alignment = getLoadStoreAlignment(I); 6910 const Value *Ptr = getLoadStorePointerOperand(I); 6911 6912 return TTI.getAddressComputationCost(VectorTy) + 6913 TTI.getGatherScatterOpCost( 6914 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6915 TargetTransformInfo::TCK_RecipThroughput, I); 6916 } 6917 6918 InstructionCost 6919 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6920 ElementCount VF) { 6921 // TODO: Once we have support for interleaving with scalable vectors 6922 // we can calculate the cost properly here. 6923 if (VF.isScalable()) 6924 return InstructionCost::getInvalid(); 6925 6926 Type *ValTy = getMemInstValueType(I); 6927 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6928 unsigned AS = getLoadStoreAddressSpace(I); 6929 6930 auto Group = getInterleavedAccessGroup(I); 6931 assert(Group && "Fail to get an interleaved access group."); 6932 6933 unsigned InterleaveFactor = Group->getFactor(); 6934 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6935 6936 // Holds the indices of existing members in an interleaved load group. 6937 // An interleaved store group doesn't need this as it doesn't allow gaps. 6938 SmallVector<unsigned, 4> Indices; 6939 if (isa<LoadInst>(I)) { 6940 for (unsigned i = 0; i < InterleaveFactor; i++) 6941 if (Group->getMember(i)) 6942 Indices.push_back(i); 6943 } 6944 6945 // Calculate the cost of the whole interleaved group. 6946 bool UseMaskForGaps = 6947 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 6948 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6949 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6950 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6951 6952 if (Group->isReverse()) { 6953 // TODO: Add support for reversed masked interleaved access. 6954 assert(!Legal->isMaskRequired(I) && 6955 "Reverse masked interleaved access not supported."); 6956 Cost += 6957 Group->getNumMembers() * 6958 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6959 } 6960 return Cost; 6961 } 6962 6963 InstructionCost LoopVectorizationCostModel::getReductionPatternCost( 6964 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6965 // Early exit for no inloop reductions 6966 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6967 return InstructionCost::getInvalid(); 6968 auto *VectorTy = cast<VectorType>(Ty); 6969 6970 // We are looking for a pattern of, and finding the minimal acceptable cost: 6971 // reduce(mul(ext(A), ext(B))) or 6972 // reduce(mul(A, B)) or 6973 // reduce(ext(A)) or 6974 // reduce(A). 6975 // The basic idea is that we walk down the tree to do that, finding the root 6976 // reduction instruction in InLoopReductionImmediateChains. From there we find 6977 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6978 // of the components. If the reduction cost is lower then we return it for the 6979 // reduction instruction and 0 for the other instructions in the pattern. If 6980 // it is not we return an invalid cost specifying the orignal cost method 6981 // should be used. 6982 Instruction *RetI = I; 6983 if ((RetI->getOpcode() == Instruction::SExt || 6984 RetI->getOpcode() == Instruction::ZExt)) { 6985 if (!RetI->hasOneUser()) 6986 return InstructionCost::getInvalid(); 6987 RetI = RetI->user_back(); 6988 } 6989 if (RetI->getOpcode() == Instruction::Mul && 6990 RetI->user_back()->getOpcode() == Instruction::Add) { 6991 if (!RetI->hasOneUser()) 6992 return InstructionCost::getInvalid(); 6993 RetI = RetI->user_back(); 6994 } 6995 6996 // Test if the found instruction is a reduction, and if not return an invalid 6997 // cost specifying the parent to use the original cost modelling. 6998 if (!InLoopReductionImmediateChains.count(RetI)) 6999 return InstructionCost::getInvalid(); 7000 7001 // Find the reduction this chain is a part of and calculate the basic cost of 7002 // the reduction on its own. 7003 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 7004 Instruction *ReductionPhi = LastChain; 7005 while (!isa<PHINode>(ReductionPhi)) 7006 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 7007 7008 RecurrenceDescriptor RdxDesc = 7009 Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; 7010 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 7011 RdxDesc.getOpcode(), VectorTy, false, CostKind); 7012 7013 // Get the operand that was not the reduction chain and match it to one of the 7014 // patterns, returning the better cost if it is found. 7015 Instruction *RedOp = RetI->getOperand(1) == LastChain 7016 ? dyn_cast<Instruction>(RetI->getOperand(0)) 7017 : dyn_cast<Instruction>(RetI->getOperand(1)); 7018 7019 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 7020 7021 if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) && 7022 !TheLoop->isLoopInvariant(RedOp)) { 7023 bool IsUnsigned = isa<ZExtInst>(RedOp); 7024 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 7025 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7026 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7027 CostKind); 7028 7029 InstructionCost ExtCost = 7030 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 7031 TTI::CastContextHint::None, CostKind, RedOp); 7032 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 7033 return I == RetI ? *RedCost.getValue() : 0; 7034 } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) { 7035 Instruction *Mul = RedOp; 7036 Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0)); 7037 Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1)); 7038 if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) && 7039 Op0->getOpcode() == Op1->getOpcode() && 7040 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7041 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 7042 bool IsUnsigned = isa<ZExtInst>(Op0); 7043 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7044 // reduce(mul(ext, ext)) 7045 InstructionCost ExtCost = 7046 TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, 7047 TTI::CastContextHint::None, CostKind, Op0); 7048 InstructionCost MulCost = 7049 TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); 7050 7051 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7052 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7053 CostKind); 7054 7055 if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) 7056 return I == RetI ? *RedCost.getValue() : 0; 7057 } else { 7058 InstructionCost MulCost = 7059 TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); 7060 7061 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7062 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 7063 CostKind); 7064 7065 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 7066 return I == RetI ? *RedCost.getValue() : 0; 7067 } 7068 } 7069 7070 return I == RetI ? BaseCost : InstructionCost::getInvalid(); 7071 } 7072 7073 InstructionCost 7074 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7075 ElementCount VF) { 7076 // Calculate scalar cost only. Vectorization cost should be ready at this 7077 // moment. 7078 if (VF.isScalar()) { 7079 Type *ValTy = getMemInstValueType(I); 7080 const Align Alignment = getLoadStoreAlignment(I); 7081 unsigned AS = getLoadStoreAddressSpace(I); 7082 7083 return TTI.getAddressComputationCost(ValTy) + 7084 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 7085 TTI::TCK_RecipThroughput, I); 7086 } 7087 return getWideningCost(I, VF); 7088 } 7089 7090 LoopVectorizationCostModel::VectorizationCostTy 7091 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7092 ElementCount VF) { 7093 // If we know that this instruction will remain uniform, check the cost of 7094 // the scalar version. 7095 if (isUniformAfterVectorization(I, VF)) 7096 VF = ElementCount::getFixed(1); 7097 7098 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7099 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7100 7101 // Forced scalars do not have any scalarization overhead. 7102 auto ForcedScalar = ForcedScalars.find(VF); 7103 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7104 auto InstSet = ForcedScalar->second; 7105 if (InstSet.count(I)) 7106 return VectorizationCostTy( 7107 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7108 VF.getKnownMinValue()), 7109 false); 7110 } 7111 7112 Type *VectorTy; 7113 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7114 7115 bool TypeNotScalarized = 7116 VF.isVector() && VectorTy->isVectorTy() && 7117 TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue(); 7118 return VectorizationCostTy(C, TypeNotScalarized); 7119 } 7120 7121 InstructionCost 7122 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7123 ElementCount VF) const { 7124 7125 if (VF.isScalable()) 7126 return InstructionCost::getInvalid(); 7127 7128 if (VF.isScalar()) 7129 return 0; 7130 7131 InstructionCost Cost = 0; 7132 Type *RetTy = ToVectorTy(I->getType(), VF); 7133 if (!RetTy->isVoidTy() && 7134 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7135 Cost += TTI.getScalarizationOverhead( 7136 cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()), 7137 true, false); 7138 7139 // Some targets keep addresses scalar. 7140 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7141 return Cost; 7142 7143 // Some targets support efficient element stores. 7144 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7145 return Cost; 7146 7147 // Collect operands to consider. 7148 CallInst *CI = dyn_cast<CallInst>(I); 7149 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 7150 7151 // Skip operands that do not require extraction/scalarization and do not incur 7152 // any overhead. 7153 SmallVector<Type *> Tys; 7154 for (auto *V : filterExtractingOperands(Ops, VF)) 7155 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 7156 return Cost + TTI.getOperandsScalarizationOverhead( 7157 filterExtractingOperands(Ops, VF), Tys); 7158 } 7159 7160 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7161 if (VF.isScalar()) 7162 return; 7163 NumPredStores = 0; 7164 for (BasicBlock *BB : TheLoop->blocks()) { 7165 // For each instruction in the old loop. 7166 for (Instruction &I : *BB) { 7167 Value *Ptr = getLoadStorePointerOperand(&I); 7168 if (!Ptr) 7169 continue; 7170 7171 // TODO: We should generate better code and update the cost model for 7172 // predicated uniform stores. Today they are treated as any other 7173 // predicated store (see added test cases in 7174 // invariant-store-vectorization.ll). 7175 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 7176 NumPredStores++; 7177 7178 if (Legal->isUniformMemOp(I)) { 7179 // TODO: Avoid replicating loads and stores instead of 7180 // relying on instcombine to remove them. 7181 // Load: Scalar load + broadcast 7182 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7183 InstructionCost Cost = getUniformMemOpCost(&I, VF); 7184 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7185 continue; 7186 } 7187 7188 // We assume that widening is the best solution when possible. 7189 if (memoryInstructionCanBeWidened(&I, VF)) { 7190 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7191 int ConsecutiveStride = 7192 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 7193 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7194 "Expected consecutive stride."); 7195 InstWidening Decision = 7196 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7197 setWideningDecision(&I, VF, Decision, Cost); 7198 continue; 7199 } 7200 7201 // Choose between Interleaving, Gather/Scatter or Scalarization. 7202 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7203 unsigned NumAccesses = 1; 7204 if (isAccessInterleaved(&I)) { 7205 auto Group = getInterleavedAccessGroup(&I); 7206 assert(Group && "Fail to get an interleaved access group."); 7207 7208 // Make one decision for the whole group. 7209 if (getWideningDecision(&I, VF) != CM_Unknown) 7210 continue; 7211 7212 NumAccesses = Group->getNumMembers(); 7213 if (interleavedAccessCanBeWidened(&I, VF)) 7214 InterleaveCost = getInterleaveGroupCost(&I, VF); 7215 } 7216 7217 InstructionCost GatherScatterCost = 7218 isLegalGatherOrScatter(&I) 7219 ? getGatherScatterCost(&I, VF) * NumAccesses 7220 : InstructionCost::getInvalid(); 7221 7222 InstructionCost ScalarizationCost = 7223 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7224 7225 // Choose better solution for the current VF, 7226 // write down this decision and use it during vectorization. 7227 InstructionCost Cost; 7228 InstWidening Decision; 7229 if (InterleaveCost <= GatherScatterCost && 7230 InterleaveCost < ScalarizationCost) { 7231 Decision = CM_Interleave; 7232 Cost = InterleaveCost; 7233 } else if (GatherScatterCost < ScalarizationCost) { 7234 Decision = CM_GatherScatter; 7235 Cost = GatherScatterCost; 7236 } else { 7237 assert(!VF.isScalable() && 7238 "We cannot yet scalarise for scalable vectors"); 7239 Decision = CM_Scalarize; 7240 Cost = ScalarizationCost; 7241 } 7242 // If the instructions belongs to an interleave group, the whole group 7243 // receives the same decision. The whole group receives the cost, but 7244 // the cost will actually be assigned to one instruction. 7245 if (auto Group = getInterleavedAccessGroup(&I)) 7246 setWideningDecision(Group, VF, Decision, Cost); 7247 else 7248 setWideningDecision(&I, VF, Decision, Cost); 7249 } 7250 } 7251 7252 // Make sure that any load of address and any other address computation 7253 // remains scalar unless there is gather/scatter support. This avoids 7254 // inevitable extracts into address registers, and also has the benefit of 7255 // activating LSR more, since that pass can't optimize vectorized 7256 // addresses. 7257 if (TTI.prefersVectorizedAddressing()) 7258 return; 7259 7260 // Start with all scalar pointer uses. 7261 SmallPtrSet<Instruction *, 8> AddrDefs; 7262 for (BasicBlock *BB : TheLoop->blocks()) 7263 for (Instruction &I : *BB) { 7264 Instruction *PtrDef = 7265 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7266 if (PtrDef && TheLoop->contains(PtrDef) && 7267 getWideningDecision(&I, VF) != CM_GatherScatter) 7268 AddrDefs.insert(PtrDef); 7269 } 7270 7271 // Add all instructions used to generate the addresses. 7272 SmallVector<Instruction *, 4> Worklist; 7273 append_range(Worklist, AddrDefs); 7274 while (!Worklist.empty()) { 7275 Instruction *I = Worklist.pop_back_val(); 7276 for (auto &Op : I->operands()) 7277 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7278 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7279 AddrDefs.insert(InstOp).second) 7280 Worklist.push_back(InstOp); 7281 } 7282 7283 for (auto *I : AddrDefs) { 7284 if (isa<LoadInst>(I)) { 7285 // Setting the desired widening decision should ideally be handled in 7286 // by cost functions, but since this involves the task of finding out 7287 // if the loaded register is involved in an address computation, it is 7288 // instead changed here when we know this is the case. 7289 InstWidening Decision = getWideningDecision(I, VF); 7290 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7291 // Scalarize a widened load of address. 7292 setWideningDecision( 7293 I, VF, CM_Scalarize, 7294 (VF.getKnownMinValue() * 7295 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7296 else if (auto Group = getInterleavedAccessGroup(I)) { 7297 // Scalarize an interleave group of address loads. 7298 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7299 if (Instruction *Member = Group->getMember(I)) 7300 setWideningDecision( 7301 Member, VF, CM_Scalarize, 7302 (VF.getKnownMinValue() * 7303 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7304 } 7305 } 7306 } else 7307 // Make sure I gets scalarized and a cost estimate without 7308 // scalarization overhead. 7309 ForcedScalars[VF].insert(I); 7310 } 7311 } 7312 7313 InstructionCost 7314 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7315 Type *&VectorTy) { 7316 Type *RetTy = I->getType(); 7317 if (canTruncateToMinimalBitwidth(I, VF)) 7318 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7319 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 7320 auto SE = PSE.getSE(); 7321 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7322 7323 // TODO: We need to estimate the cost of intrinsic calls. 7324 switch (I->getOpcode()) { 7325 case Instruction::GetElementPtr: 7326 // We mark this instruction as zero-cost because the cost of GEPs in 7327 // vectorized code depends on whether the corresponding memory instruction 7328 // is scalarized or not. Therefore, we handle GEPs with the memory 7329 // instruction cost. 7330 return 0; 7331 case Instruction::Br: { 7332 // In cases of scalarized and predicated instructions, there will be VF 7333 // predicated blocks in the vectorized loop. Each branch around these 7334 // blocks requires also an extract of its vector compare i1 element. 7335 bool ScalarPredicatedBB = false; 7336 BranchInst *BI = cast<BranchInst>(I); 7337 if (VF.isVector() && BI->isConditional() && 7338 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7339 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7340 ScalarPredicatedBB = true; 7341 7342 if (ScalarPredicatedBB) { 7343 // Return cost for branches around scalarized and predicated blocks. 7344 assert(!VF.isScalable() && "scalable vectors not yet supported."); 7345 auto *Vec_i1Ty = 7346 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7347 return (TTI.getScalarizationOverhead( 7348 Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), 7349 false, true) + 7350 (TTI.getCFInstrCost(Instruction::Br, CostKind) * 7351 VF.getKnownMinValue())); 7352 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7353 // The back-edge branch will remain, as will all scalar branches. 7354 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7355 else 7356 // This branch will be eliminated by if-conversion. 7357 return 0; 7358 // Note: We currently assume zero cost for an unconditional branch inside 7359 // a predicated block since it will become a fall-through, although we 7360 // may decide in the future to call TTI for all branches. 7361 } 7362 case Instruction::PHI: { 7363 auto *Phi = cast<PHINode>(I); 7364 7365 // First-order recurrences are replaced by vector shuffles inside the loop. 7366 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7367 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7368 return TTI.getShuffleCost( 7369 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7370 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7371 7372 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7373 // converted into select instructions. We require N - 1 selects per phi 7374 // node, where N is the number of incoming values. 7375 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7376 return (Phi->getNumIncomingValues() - 1) * 7377 TTI.getCmpSelInstrCost( 7378 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7379 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7380 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7381 7382 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7383 } 7384 case Instruction::UDiv: 7385 case Instruction::SDiv: 7386 case Instruction::URem: 7387 case Instruction::SRem: 7388 // If we have a predicated instruction, it may not be executed for each 7389 // vector lane. Get the scalarization cost and scale this amount by the 7390 // probability of executing the predicated block. If the instruction is not 7391 // predicated, we fall through to the next case. 7392 if (VF.isVector() && isScalarWithPredication(I)) { 7393 InstructionCost Cost = 0; 7394 7395 // These instructions have a non-void type, so account for the phi nodes 7396 // that we will create. This cost is likely to be zero. The phi node 7397 // cost, if any, should be scaled by the block probability because it 7398 // models a copy at the end of each predicated block. 7399 Cost += VF.getKnownMinValue() * 7400 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7401 7402 // The cost of the non-predicated instruction. 7403 Cost += VF.getKnownMinValue() * 7404 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7405 7406 // The cost of insertelement and extractelement instructions needed for 7407 // scalarization. 7408 Cost += getScalarizationOverhead(I, VF); 7409 7410 // Scale the cost by the probability of executing the predicated blocks. 7411 // This assumes the predicated block for each vector lane is equally 7412 // likely. 7413 return Cost / getReciprocalPredBlockProb(); 7414 } 7415 LLVM_FALLTHROUGH; 7416 case Instruction::Add: 7417 case Instruction::FAdd: 7418 case Instruction::Sub: 7419 case Instruction::FSub: 7420 case Instruction::Mul: 7421 case Instruction::FMul: 7422 case Instruction::FDiv: 7423 case Instruction::FRem: 7424 case Instruction::Shl: 7425 case Instruction::LShr: 7426 case Instruction::AShr: 7427 case Instruction::And: 7428 case Instruction::Or: 7429 case Instruction::Xor: { 7430 // Since we will replace the stride by 1 the multiplication should go away. 7431 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7432 return 0; 7433 7434 // Detect reduction patterns 7435 InstructionCost RedCost; 7436 if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7437 .isValid()) 7438 return RedCost; 7439 7440 // Certain instructions can be cheaper to vectorize if they have a constant 7441 // second vector operand. One example of this are shifts on x86. 7442 Value *Op2 = I->getOperand(1); 7443 TargetTransformInfo::OperandValueProperties Op2VP; 7444 TargetTransformInfo::OperandValueKind Op2VK = 7445 TTI.getOperandInfo(Op2, Op2VP); 7446 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7447 Op2VK = TargetTransformInfo::OK_UniformValue; 7448 7449 SmallVector<const Value *, 4> Operands(I->operand_values()); 7450 unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1; 7451 return N * TTI.getArithmeticInstrCost( 7452 I->getOpcode(), VectorTy, CostKind, 7453 TargetTransformInfo::OK_AnyValue, 7454 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7455 } 7456 case Instruction::FNeg: { 7457 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 7458 unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1; 7459 return N * TTI.getArithmeticInstrCost( 7460 I->getOpcode(), VectorTy, CostKind, 7461 TargetTransformInfo::OK_AnyValue, 7462 TargetTransformInfo::OK_AnyValue, 7463 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None, 7464 I->getOperand(0), I); 7465 } 7466 case Instruction::Select: { 7467 SelectInst *SI = cast<SelectInst>(I); 7468 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7469 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7470 7471 const Value *Op0, *Op1; 7472 using namespace llvm::PatternMatch; 7473 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7474 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7475 // select x, y, false --> x & y 7476 // select x, true, y --> x | y 7477 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7478 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7479 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7480 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7481 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7482 Op1->getType()->getScalarSizeInBits() == 1); 7483 7484 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7485 return TTI.getArithmeticInstrCost( 7486 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7487 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7488 } 7489 7490 Type *CondTy = SI->getCondition()->getType(); 7491 if (!ScalarCond) 7492 CondTy = VectorType::get(CondTy, VF); 7493 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 7494 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7495 } 7496 case Instruction::ICmp: 7497 case Instruction::FCmp: { 7498 Type *ValTy = I->getOperand(0)->getType(); 7499 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7500 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7501 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7502 VectorTy = ToVectorTy(ValTy, VF); 7503 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7504 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7505 } 7506 case Instruction::Store: 7507 case Instruction::Load: { 7508 ElementCount Width = VF; 7509 if (Width.isVector()) { 7510 InstWidening Decision = getWideningDecision(I, Width); 7511 assert(Decision != CM_Unknown && 7512 "CM decision should be taken at this point"); 7513 if (Decision == CM_Scalarize) 7514 Width = ElementCount::getFixed(1); 7515 } 7516 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 7517 return getMemoryInstructionCost(I, VF); 7518 } 7519 case Instruction::ZExt: 7520 case Instruction::SExt: 7521 case Instruction::FPToUI: 7522 case Instruction::FPToSI: 7523 case Instruction::FPExt: 7524 case Instruction::PtrToInt: 7525 case Instruction::IntToPtr: 7526 case Instruction::SIToFP: 7527 case Instruction::UIToFP: 7528 case Instruction::Trunc: 7529 case Instruction::FPTrunc: 7530 case Instruction::BitCast: { 7531 // Computes the CastContextHint from a Load/Store instruction. 7532 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7533 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7534 "Expected a load or a store!"); 7535 7536 if (VF.isScalar() || !TheLoop->contains(I)) 7537 return TTI::CastContextHint::Normal; 7538 7539 switch (getWideningDecision(I, VF)) { 7540 case LoopVectorizationCostModel::CM_GatherScatter: 7541 return TTI::CastContextHint::GatherScatter; 7542 case LoopVectorizationCostModel::CM_Interleave: 7543 return TTI::CastContextHint::Interleave; 7544 case LoopVectorizationCostModel::CM_Scalarize: 7545 case LoopVectorizationCostModel::CM_Widen: 7546 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7547 : TTI::CastContextHint::Normal; 7548 case LoopVectorizationCostModel::CM_Widen_Reverse: 7549 return TTI::CastContextHint::Reversed; 7550 case LoopVectorizationCostModel::CM_Unknown: 7551 llvm_unreachable("Instr did not go through cost modelling?"); 7552 } 7553 7554 llvm_unreachable("Unhandled case!"); 7555 }; 7556 7557 unsigned Opcode = I->getOpcode(); 7558 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7559 // For Trunc, the context is the only user, which must be a StoreInst. 7560 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7561 if (I->hasOneUse()) 7562 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7563 CCH = ComputeCCH(Store); 7564 } 7565 // For Z/Sext, the context is the operand, which must be a LoadInst. 7566 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7567 Opcode == Instruction::FPExt) { 7568 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7569 CCH = ComputeCCH(Load); 7570 } 7571 7572 // We optimize the truncation of induction variables having constant 7573 // integer steps. The cost of these truncations is the same as the scalar 7574 // operation. 7575 if (isOptimizableIVTruncate(I, VF)) { 7576 auto *Trunc = cast<TruncInst>(I); 7577 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7578 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7579 } 7580 7581 // Detect reduction patterns 7582 InstructionCost RedCost; 7583 if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7584 .isValid()) 7585 return RedCost; 7586 7587 Type *SrcScalarTy = I->getOperand(0)->getType(); 7588 Type *SrcVecTy = 7589 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7590 if (canTruncateToMinimalBitwidth(I, VF)) { 7591 // This cast is going to be shrunk. This may remove the cast or it might 7592 // turn it into slightly different cast. For example, if MinBW == 16, 7593 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7594 // 7595 // Calculate the modified src and dest types. 7596 Type *MinVecTy = VectorTy; 7597 if (Opcode == Instruction::Trunc) { 7598 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7599 VectorTy = 7600 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7601 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7602 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7603 VectorTy = 7604 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7605 } 7606 } 7607 7608 unsigned N; 7609 if (isScalarAfterVectorization(I, VF)) { 7610 assert(!VF.isScalable() && "VF is assumed to be non scalable"); 7611 N = VF.getKnownMinValue(); 7612 } else 7613 N = 1; 7614 return N * 7615 TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7616 } 7617 case Instruction::Call: { 7618 bool NeedToScalarize; 7619 CallInst *CI = cast<CallInst>(I); 7620 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7621 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7622 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7623 return std::min(CallCost, IntrinsicCost); 7624 } 7625 return CallCost; 7626 } 7627 case Instruction::ExtractValue: 7628 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7629 default: 7630 // The cost of executing VF copies of the scalar instruction. This opcode 7631 // is unknown. Assume that it is the same as 'mul'. 7632 return VF.getKnownMinValue() * TTI.getArithmeticInstrCost( 7633 Instruction::Mul, VectorTy, CostKind) + 7634 getScalarizationOverhead(I, VF); 7635 } // end of switch. 7636 } 7637 7638 char LoopVectorize::ID = 0; 7639 7640 static const char lv_name[] = "Loop Vectorization"; 7641 7642 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7643 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7644 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7645 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7646 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7647 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7648 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7649 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7650 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7651 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7652 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7653 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7654 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7655 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7656 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7657 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7658 7659 namespace llvm { 7660 7661 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7662 7663 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7664 bool VectorizeOnlyWhenForced) { 7665 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7666 } 7667 7668 } // end namespace llvm 7669 7670 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7671 // Check if the pointer operand of a load or store instruction is 7672 // consecutive. 7673 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7674 return Legal->isConsecutivePtr(Ptr); 7675 return false; 7676 } 7677 7678 void LoopVectorizationCostModel::collectValuesToIgnore() { 7679 // Ignore ephemeral values. 7680 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7681 7682 // Ignore type-promoting instructions we identified during reduction 7683 // detection. 7684 for (auto &Reduction : Legal->getReductionVars()) { 7685 RecurrenceDescriptor &RedDes = Reduction.second; 7686 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7687 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7688 } 7689 // Ignore type-casting instructions we identified during induction 7690 // detection. 7691 for (auto &Induction : Legal->getInductionVars()) { 7692 InductionDescriptor &IndDes = Induction.second; 7693 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7694 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7695 } 7696 } 7697 7698 void LoopVectorizationCostModel::collectInLoopReductions() { 7699 for (auto &Reduction : Legal->getReductionVars()) { 7700 PHINode *Phi = Reduction.first; 7701 RecurrenceDescriptor &RdxDesc = Reduction.second; 7702 7703 // We don't collect reductions that are type promoted (yet). 7704 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7705 continue; 7706 7707 // If the target would prefer this reduction to happen "in-loop", then we 7708 // want to record it as such. 7709 unsigned Opcode = RdxDesc.getOpcode(); 7710 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7711 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7712 TargetTransformInfo::ReductionFlags())) 7713 continue; 7714 7715 // Check that we can correctly put the reductions into the loop, by 7716 // finding the chain of operations that leads from the phi to the loop 7717 // exit value. 7718 SmallVector<Instruction *, 4> ReductionOperations = 7719 RdxDesc.getReductionOpChain(Phi, TheLoop); 7720 bool InLoop = !ReductionOperations.empty(); 7721 if (InLoop) { 7722 InLoopReductionChains[Phi] = ReductionOperations; 7723 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7724 Instruction *LastChain = Phi; 7725 for (auto *I : ReductionOperations) { 7726 InLoopReductionImmediateChains[I] = LastChain; 7727 LastChain = I; 7728 } 7729 } 7730 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7731 << " reduction for phi: " << *Phi << "\n"); 7732 } 7733 } 7734 7735 // TODO: we could return a pair of values that specify the max VF and 7736 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7737 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7738 // doesn't have a cost model that can choose which plan to execute if 7739 // more than one is generated. 7740 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7741 LoopVectorizationCostModel &CM) { 7742 unsigned WidestType; 7743 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7744 return WidestVectorRegBits / WidestType; 7745 } 7746 7747 VectorizationFactor 7748 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7749 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7750 ElementCount VF = UserVF; 7751 // Outer loop handling: They may require CFG and instruction level 7752 // transformations before even evaluating whether vectorization is profitable. 7753 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7754 // the vectorization pipeline. 7755 if (!OrigLoop->isInnermost()) { 7756 // If the user doesn't provide a vectorization factor, determine a 7757 // reasonable one. 7758 if (UserVF.isZero()) { 7759 VF = ElementCount::getFixed(determineVPlanVF( 7760 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7761 .getFixedSize(), 7762 CM)); 7763 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7764 7765 // Make sure we have a VF > 1 for stress testing. 7766 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7767 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7768 << "overriding computed VF.\n"); 7769 VF = ElementCount::getFixed(4); 7770 } 7771 } 7772 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7773 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7774 "VF needs to be a power of two"); 7775 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7776 << "VF " << VF << " to build VPlans.\n"); 7777 buildVPlans(VF, VF); 7778 7779 // For VPlan build stress testing, we bail out after VPlan construction. 7780 if (VPlanBuildStressTest) 7781 return VectorizationFactor::Disabled(); 7782 7783 return {VF, 0 /*Cost*/}; 7784 } 7785 7786 LLVM_DEBUG( 7787 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7788 "VPlan-native path.\n"); 7789 return VectorizationFactor::Disabled(); 7790 } 7791 7792 Optional<VectorizationFactor> 7793 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7794 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7795 Optional<ElementCount> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC); 7796 if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved. 7797 return None; 7798 7799 // Invalidate interleave groups if all blocks of loop will be predicated. 7800 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 7801 !useMaskedInterleavedAccesses(*TTI)) { 7802 LLVM_DEBUG( 7803 dbgs() 7804 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7805 "which requires masked-interleaved support.\n"); 7806 if (CM.InterleaveInfo.invalidateGroups()) 7807 // Invalidating interleave groups also requires invalidating all decisions 7808 // based on them, which includes widening decisions and uniform and scalar 7809 // values. 7810 CM.invalidateCostModelingDecisions(); 7811 } 7812 7813 ElementCount MaxVF = MaybeMaxVF.getValue(); 7814 assert(MaxVF.isNonZero() && "MaxVF is zero."); 7815 7816 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxVF); 7817 if (!UserVF.isZero() && 7818 (UserVFIsLegal || (UserVF.isScalable() && MaxVF.isScalable()))) { 7819 // FIXME: MaxVF is temporarily used inplace of UserVF for illegal scalable 7820 // VFs here, this should be reverted to only use legal UserVFs once the 7821 // loop below supports scalable VFs. 7822 ElementCount VF = UserVFIsLegal ? UserVF : MaxVF; 7823 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max") 7824 << " VF " << VF << ".\n"); 7825 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7826 "VF needs to be a power of two"); 7827 // Collect the instructions (and their associated costs) that will be more 7828 // profitable to scalarize. 7829 CM.selectUserVectorizationFactor(VF); 7830 CM.collectInLoopReductions(); 7831 buildVPlansWithVPRecipes(VF, VF); 7832 LLVM_DEBUG(printPlans(dbgs())); 7833 return {{VF, 0}}; 7834 } 7835 7836 assert(!MaxVF.isScalable() && 7837 "Scalable vectors not yet supported beyond this point"); 7838 7839 for (ElementCount VF = ElementCount::getFixed(1); 7840 ElementCount::isKnownLE(VF, MaxVF); VF *= 2) { 7841 // Collect Uniform and Scalar instructions after vectorization with VF. 7842 CM.collectUniformsAndScalars(VF); 7843 7844 // Collect the instructions (and their associated costs) that will be more 7845 // profitable to scalarize. 7846 if (VF.isVector()) 7847 CM.collectInstsToScalarize(VF); 7848 } 7849 7850 CM.collectInLoopReductions(); 7851 7852 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxVF); 7853 LLVM_DEBUG(printPlans(dbgs())); 7854 if (MaxVF.isScalar()) 7855 return VectorizationFactor::Disabled(); 7856 7857 // Select the optimal vectorization factor. 7858 auto SelectedVF = CM.selectVectorizationFactor(MaxVF); 7859 7860 // Check if it is profitable to vectorize with runtime checks. 7861 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 7862 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 7863 bool PragmaThresholdReached = 7864 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 7865 bool ThresholdReached = 7866 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 7867 if ((ThresholdReached && !Hints.allowReordering()) || 7868 PragmaThresholdReached) { 7869 ORE->emit([&]() { 7870 return OptimizationRemarkAnalysisAliasing( 7871 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 7872 OrigLoop->getHeader()) 7873 << "loop not vectorized: cannot prove it is safe to reorder " 7874 "memory operations"; 7875 }); 7876 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 7877 Hints.emitRemarkWithHints(); 7878 return VectorizationFactor::Disabled(); 7879 } 7880 } 7881 return SelectedVF; 7882 } 7883 7884 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) { 7885 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 7886 << '\n'); 7887 BestVF = VF; 7888 BestUF = UF; 7889 7890 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 7891 return !Plan->hasVF(VF); 7892 }); 7893 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 7894 } 7895 7896 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 7897 DominatorTree *DT) { 7898 // Perform the actual loop transformation. 7899 7900 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7901 assert(BestVF.hasValue() && "Vectorization Factor is missing"); 7902 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 7903 7904 VPTransformState State{ 7905 *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()}; 7906 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 7907 State.TripCount = ILV.getOrCreateTripCount(nullptr); 7908 State.CanonicalIV = ILV.Induction; 7909 7910 ILV.printDebugTracesAtStart(); 7911 7912 //===------------------------------------------------===// 7913 // 7914 // Notice: any optimization or new instruction that go 7915 // into the code below should also be implemented in 7916 // the cost-model. 7917 // 7918 //===------------------------------------------------===// 7919 7920 // 2. Copy and widen instructions from the old loop into the new loop. 7921 VPlans.front()->execute(&State); 7922 7923 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7924 // predication, updating analyses. 7925 ILV.fixVectorizedLoop(State); 7926 7927 ILV.printDebugTracesAtEnd(); 7928 } 7929 7930 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 7931 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 7932 for (const auto &Plan : VPlans) 7933 if (PrintVPlansInDotFormat) 7934 Plan->printDOT(O); 7935 else 7936 Plan->print(O); 7937 } 7938 #endif 7939 7940 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7941 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7942 7943 // We create new control-flow for the vectorized loop, so the original exit 7944 // conditions will be dead after vectorization if it's only used by the 7945 // terminator 7946 SmallVector<BasicBlock*> ExitingBlocks; 7947 OrigLoop->getExitingBlocks(ExitingBlocks); 7948 for (auto *BB : ExitingBlocks) { 7949 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 7950 if (!Cmp || !Cmp->hasOneUse()) 7951 continue; 7952 7953 // TODO: we should introduce a getUniqueExitingBlocks on Loop 7954 if (!DeadInstructions.insert(Cmp).second) 7955 continue; 7956 7957 // The operands of the icmp is often a dead trunc, used by IndUpdate. 7958 // TODO: can recurse through operands in general 7959 for (Value *Op : Cmp->operands()) { 7960 if (isa<TruncInst>(Op) && Op->hasOneUse()) 7961 DeadInstructions.insert(cast<Instruction>(Op)); 7962 } 7963 } 7964 7965 // We create new "steps" for induction variable updates to which the original 7966 // induction variables map. An original update instruction will be dead if 7967 // all its users except the induction variable are dead. 7968 auto *Latch = OrigLoop->getLoopLatch(); 7969 for (auto &Induction : Legal->getInductionVars()) { 7970 PHINode *Ind = Induction.first; 7971 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7972 7973 // If the tail is to be folded by masking, the primary induction variable, 7974 // if exists, isn't dead: it will be used for masking. Don't kill it. 7975 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 7976 continue; 7977 7978 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7979 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7980 })) 7981 DeadInstructions.insert(IndUpdate); 7982 7983 // We record as "Dead" also the type-casting instructions we had identified 7984 // during induction analysis. We don't need any handling for them in the 7985 // vectorized loop because we have proven that, under a proper runtime 7986 // test guarding the vectorized loop, the value of the phi, and the casted 7987 // value of the phi, are the same. The last instruction in this casting chain 7988 // will get its scalar/vector/widened def from the scalar/vector/widened def 7989 // of the respective phi node. Any other casts in the induction def-use chain 7990 // have no other uses outside the phi update chain, and will be ignored. 7991 InductionDescriptor &IndDes = Induction.second; 7992 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7993 DeadInstructions.insert(Casts.begin(), Casts.end()); 7994 } 7995 } 7996 7997 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 7998 7999 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 8000 8001 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 8002 Instruction::BinaryOps BinOp) { 8003 // When unrolling and the VF is 1, we only need to add a simple scalar. 8004 Type *Ty = Val->getType(); 8005 assert(!Ty->isVectorTy() && "Val must be a scalar"); 8006 8007 if (Ty->isFloatingPointTy()) { 8008 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 8009 8010 // Floating-point operations inherit FMF via the builder's flags. 8011 Value *MulOp = Builder.CreateFMul(C, Step); 8012 return Builder.CreateBinOp(BinOp, Val, MulOp); 8013 } 8014 Constant *C = ConstantInt::get(Ty, StartIdx); 8015 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 8016 } 8017 8018 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 8019 SmallVector<Metadata *, 4> MDs; 8020 // Reserve first location for self reference to the LoopID metadata node. 8021 MDs.push_back(nullptr); 8022 bool IsUnrollMetadata = false; 8023 MDNode *LoopID = L->getLoopID(); 8024 if (LoopID) { 8025 // First find existing loop unrolling disable metadata. 8026 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 8027 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 8028 if (MD) { 8029 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 8030 IsUnrollMetadata = 8031 S && S->getString().startswith("llvm.loop.unroll.disable"); 8032 } 8033 MDs.push_back(LoopID->getOperand(i)); 8034 } 8035 } 8036 8037 if (!IsUnrollMetadata) { 8038 // Add runtime unroll disable metadata. 8039 LLVMContext &Context = L->getHeader()->getContext(); 8040 SmallVector<Metadata *, 1> DisableOperands; 8041 DisableOperands.push_back( 8042 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 8043 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 8044 MDs.push_back(DisableNode); 8045 MDNode *NewLoopID = MDNode::get(Context, MDs); 8046 // Set operand 0 to refer to the loop id itself. 8047 NewLoopID->replaceOperandWith(0, NewLoopID); 8048 L->setLoopID(NewLoopID); 8049 } 8050 } 8051 8052 //===--------------------------------------------------------------------===// 8053 // EpilogueVectorizerMainLoop 8054 //===--------------------------------------------------------------------===// 8055 8056 /// This function is partially responsible for generating the control flow 8057 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8058 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 8059 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8060 Loop *Lp = createVectorLoopSkeleton(""); 8061 8062 // Generate the code to check the minimum iteration count of the vector 8063 // epilogue (see below). 8064 EPI.EpilogueIterationCountCheck = 8065 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 8066 EPI.EpilogueIterationCountCheck->setName("iter.check"); 8067 8068 // Generate the code to check any assumptions that we've made for SCEV 8069 // expressions. 8070 EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); 8071 8072 // Generate the code that checks at runtime if arrays overlap. We put the 8073 // checks into a separate block to make the more common case of few elements 8074 // faster. 8075 EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 8076 8077 // Generate the iteration count check for the main loop, *after* the check 8078 // for the epilogue loop, so that the path-length is shorter for the case 8079 // that goes directly through the vector epilogue. The longer-path length for 8080 // the main loop is compensated for, by the gain from vectorizing the larger 8081 // trip count. Note: the branch will get updated later on when we vectorize 8082 // the epilogue. 8083 EPI.MainLoopIterationCountCheck = 8084 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 8085 8086 // Generate the induction variable. 8087 OldInduction = Legal->getPrimaryInduction(); 8088 Type *IdxTy = Legal->getWidestInductionType(); 8089 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8090 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8091 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8092 EPI.VectorTripCount = CountRoundDown; 8093 Induction = 8094 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8095 getDebugLocFromInstOrOperands(OldInduction)); 8096 8097 // Skip induction resume value creation here because they will be created in 8098 // the second pass. If we created them here, they wouldn't be used anyway, 8099 // because the vplan in the second pass still contains the inductions from the 8100 // original loop. 8101 8102 return completeLoopSkeleton(Lp, OrigLoopID); 8103 } 8104 8105 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 8106 LLVM_DEBUG({ 8107 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 8108 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 8109 << ", Main Loop UF:" << EPI.MainLoopUF 8110 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 8111 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8112 }); 8113 } 8114 8115 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 8116 DEBUG_WITH_TYPE(VerboseDebug, { 8117 dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; 8118 }); 8119 } 8120 8121 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 8122 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 8123 assert(L && "Expected valid Loop."); 8124 assert(Bypass && "Expected valid bypass basic block."); 8125 unsigned VFactor = 8126 ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue(); 8127 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 8128 Value *Count = getOrCreateTripCount(L); 8129 // Reuse existing vector loop preheader for TC checks. 8130 // Note that new preheader block is generated for vector loop. 8131 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 8132 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8133 8134 // Generate code to check if the loop's trip count is less than VF * UF of the 8135 // main vector loop. 8136 auto P = 8137 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8138 8139 Value *CheckMinIters = Builder.CreateICmp( 8140 P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor), 8141 "min.iters.check"); 8142 8143 if (!ForEpilogue) 8144 TCCheckBlock->setName("vector.main.loop.iter.check"); 8145 8146 // Create new preheader for vector loop. 8147 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8148 DT, LI, nullptr, "vector.ph"); 8149 8150 if (ForEpilogue) { 8151 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8152 DT->getNode(Bypass)->getIDom()) && 8153 "TC check is expected to dominate Bypass"); 8154 8155 // Update dominator for Bypass & LoopExit. 8156 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8157 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8158 8159 LoopBypassBlocks.push_back(TCCheckBlock); 8160 8161 // Save the trip count so we don't have to regenerate it in the 8162 // vec.epilog.iter.check. This is safe to do because the trip count 8163 // generated here dominates the vector epilog iter check. 8164 EPI.TripCount = Count; 8165 } 8166 8167 ReplaceInstWithInst( 8168 TCCheckBlock->getTerminator(), 8169 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8170 8171 return TCCheckBlock; 8172 } 8173 8174 //===--------------------------------------------------------------------===// 8175 // EpilogueVectorizerEpilogueLoop 8176 //===--------------------------------------------------------------------===// 8177 8178 /// This function is partially responsible for generating the control flow 8179 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8180 BasicBlock * 8181 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8182 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8183 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8184 8185 // Now, compare the remaining count and if there aren't enough iterations to 8186 // execute the vectorized epilogue skip to the scalar part. 8187 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8188 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8189 LoopVectorPreHeader = 8190 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8191 LI, nullptr, "vec.epilog.ph"); 8192 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8193 VecEpilogueIterationCountCheck); 8194 8195 // Adjust the control flow taking the state info from the main loop 8196 // vectorization into account. 8197 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8198 "expected this to be saved from the previous pass."); 8199 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8200 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8201 8202 DT->changeImmediateDominator(LoopVectorPreHeader, 8203 EPI.MainLoopIterationCountCheck); 8204 8205 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8206 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8207 8208 if (EPI.SCEVSafetyCheck) 8209 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8210 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8211 if (EPI.MemSafetyCheck) 8212 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8213 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8214 8215 DT->changeImmediateDominator( 8216 VecEpilogueIterationCountCheck, 8217 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8218 8219 DT->changeImmediateDominator(LoopScalarPreHeader, 8220 EPI.EpilogueIterationCountCheck); 8221 DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck); 8222 8223 // Keep track of bypass blocks, as they feed start values to the induction 8224 // phis in the scalar loop preheader. 8225 if (EPI.SCEVSafetyCheck) 8226 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8227 if (EPI.MemSafetyCheck) 8228 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8229 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8230 8231 // Generate a resume induction for the vector epilogue and put it in the 8232 // vector epilogue preheader 8233 Type *IdxTy = Legal->getWidestInductionType(); 8234 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8235 LoopVectorPreHeader->getFirstNonPHI()); 8236 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8237 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8238 EPI.MainLoopIterationCountCheck); 8239 8240 // Generate the induction variable. 8241 OldInduction = Legal->getPrimaryInduction(); 8242 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8243 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8244 Value *StartIdx = EPResumeVal; 8245 Induction = 8246 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8247 getDebugLocFromInstOrOperands(OldInduction)); 8248 8249 // Generate induction resume values. These variables save the new starting 8250 // indexes for the scalar loop. They are used to test if there are any tail 8251 // iterations left once the vector loop has completed. 8252 // Note that when the vectorized epilogue is skipped due to iteration count 8253 // check, then the resume value for the induction variable comes from 8254 // the trip count of the main vector loop, hence passing the AdditionalBypass 8255 // argument. 8256 createInductionResumeValues(Lp, CountRoundDown, 8257 {VecEpilogueIterationCountCheck, 8258 EPI.VectorTripCount} /* AdditionalBypass */); 8259 8260 AddRuntimeUnrollDisableMetaData(Lp); 8261 return completeLoopSkeleton(Lp, OrigLoopID); 8262 } 8263 8264 BasicBlock * 8265 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8266 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8267 8268 assert(EPI.TripCount && 8269 "Expected trip count to have been safed in the first pass."); 8270 assert( 8271 (!isa<Instruction>(EPI.TripCount) || 8272 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8273 "saved trip count does not dominate insertion point."); 8274 Value *TC = EPI.TripCount; 8275 IRBuilder<> Builder(Insert->getTerminator()); 8276 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8277 8278 // Generate code to check if the loop's trip count is less than VF * UF of the 8279 // vector epilogue loop. 8280 auto P = 8281 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8282 8283 Value *CheckMinIters = Builder.CreateICmp( 8284 P, Count, 8285 ConstantInt::get(Count->getType(), 8286 EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF), 8287 "min.epilog.iters.check"); 8288 8289 ReplaceInstWithInst( 8290 Insert->getTerminator(), 8291 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8292 8293 LoopBypassBlocks.push_back(Insert); 8294 return Insert; 8295 } 8296 8297 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8298 LLVM_DEBUG({ 8299 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8300 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 8301 << ", Main Loop UF:" << EPI.MainLoopUF 8302 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 8303 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8304 }); 8305 } 8306 8307 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8308 DEBUG_WITH_TYPE(VerboseDebug, { 8309 dbgs() << "final fn:\n" << *Induction->getFunction() << "\n"; 8310 }); 8311 } 8312 8313 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8314 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8315 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8316 bool PredicateAtRangeStart = Predicate(Range.Start); 8317 8318 for (ElementCount TmpVF = Range.Start * 2; 8319 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8320 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8321 Range.End = TmpVF; 8322 break; 8323 } 8324 8325 return PredicateAtRangeStart; 8326 } 8327 8328 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8329 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8330 /// of VF's starting at a given VF and extending it as much as possible. Each 8331 /// vectorization decision can potentially shorten this sub-range during 8332 /// buildVPlan(). 8333 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8334 ElementCount MaxVF) { 8335 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8336 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8337 VFRange SubRange = {VF, MaxVFPlusOne}; 8338 VPlans.push_back(buildVPlan(SubRange)); 8339 VF = SubRange.End; 8340 } 8341 } 8342 8343 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8344 VPlanPtr &Plan) { 8345 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8346 8347 // Look for cached value. 8348 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8349 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8350 if (ECEntryIt != EdgeMaskCache.end()) 8351 return ECEntryIt->second; 8352 8353 VPValue *SrcMask = createBlockInMask(Src, Plan); 8354 8355 // The terminator has to be a branch inst! 8356 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8357 assert(BI && "Unexpected terminator found"); 8358 8359 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8360 return EdgeMaskCache[Edge] = SrcMask; 8361 8362 // If source is an exiting block, we know the exit edge is dynamically dead 8363 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8364 // adding uses of an otherwise potentially dead instruction. 8365 if (OrigLoop->isLoopExiting(Src)) 8366 return EdgeMaskCache[Edge] = SrcMask; 8367 8368 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8369 assert(EdgeMask && "No Edge Mask found for condition"); 8370 8371 if (BI->getSuccessor(0) != Dst) 8372 EdgeMask = Builder.createNot(EdgeMask); 8373 8374 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8375 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8376 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8377 // The select version does not introduce new UB if SrcMask is false and 8378 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8379 VPValue *False = Plan->getOrAddVPValue( 8380 ConstantInt::getFalse(BI->getCondition()->getType())); 8381 EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False); 8382 } 8383 8384 return EdgeMaskCache[Edge] = EdgeMask; 8385 } 8386 8387 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8388 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8389 8390 // Look for cached value. 8391 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8392 if (BCEntryIt != BlockMaskCache.end()) 8393 return BCEntryIt->second; 8394 8395 // All-one mask is modelled as no-mask following the convention for masked 8396 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8397 VPValue *BlockMask = nullptr; 8398 8399 if (OrigLoop->getHeader() == BB) { 8400 if (!CM.blockNeedsPredication(BB)) 8401 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8402 8403 // Create the block in mask as the first non-phi instruction in the block. 8404 VPBuilder::InsertPointGuard Guard(Builder); 8405 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8406 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8407 8408 // Introduce the early-exit compare IV <= BTC to form header block mask. 8409 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8410 // Start by constructing the desired canonical IV. 8411 VPValue *IV = nullptr; 8412 if (Legal->getPrimaryInduction()) 8413 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8414 else { 8415 auto IVRecipe = new VPWidenCanonicalIVRecipe(); 8416 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8417 IV = IVRecipe->getVPValue(); 8418 } 8419 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8420 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8421 8422 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8423 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8424 // as a second argument, we only pass the IV here and extract the 8425 // tripcount from the transform state where codegen of the VP instructions 8426 // happen. 8427 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8428 } else { 8429 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8430 } 8431 return BlockMaskCache[BB] = BlockMask; 8432 } 8433 8434 // This is the block mask. We OR all incoming edges. 8435 for (auto *Predecessor : predecessors(BB)) { 8436 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8437 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8438 return BlockMaskCache[BB] = EdgeMask; 8439 8440 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8441 BlockMask = EdgeMask; 8442 continue; 8443 } 8444 8445 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8446 } 8447 8448 return BlockMaskCache[BB] = BlockMask; 8449 } 8450 8451 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8452 ArrayRef<VPValue *> Operands, 8453 VFRange &Range, 8454 VPlanPtr &Plan) { 8455 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8456 "Must be called with either a load or store"); 8457 8458 auto willWiden = [&](ElementCount VF) -> bool { 8459 if (VF.isScalar()) 8460 return false; 8461 LoopVectorizationCostModel::InstWidening Decision = 8462 CM.getWideningDecision(I, VF); 8463 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8464 "CM decision should be taken at this point."); 8465 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8466 return true; 8467 if (CM.isScalarAfterVectorization(I, VF) || 8468 CM.isProfitableToScalarize(I, VF)) 8469 return false; 8470 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8471 }; 8472 8473 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8474 return nullptr; 8475 8476 VPValue *Mask = nullptr; 8477 if (Legal->isMaskRequired(I)) 8478 Mask = createBlockInMask(I->getParent(), Plan); 8479 8480 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8481 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask); 8482 8483 StoreInst *Store = cast<StoreInst>(I); 8484 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8485 Mask); 8486 } 8487 8488 VPWidenIntOrFpInductionRecipe * 8489 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, 8490 ArrayRef<VPValue *> Operands) const { 8491 // Check if this is an integer or fp induction. If so, build the recipe that 8492 // produces its scalar and vector values. 8493 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8494 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8495 II.getKind() == InductionDescriptor::IK_FpInduction) { 8496 assert(II.getStartValue() == 8497 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8498 const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); 8499 return new VPWidenIntOrFpInductionRecipe( 8500 Phi, Operands[0], Casts.empty() ? nullptr : Casts.front()); 8501 } 8502 8503 return nullptr; 8504 } 8505 8506 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8507 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8508 VPlan &Plan) const { 8509 // Optimize the special case where the source is a constant integer 8510 // induction variable. Notice that we can only optimize the 'trunc' case 8511 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8512 // (c) other casts depend on pointer size. 8513 8514 // Determine whether \p K is a truncation based on an induction variable that 8515 // can be optimized. 8516 auto isOptimizableIVTruncate = 8517 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8518 return [=](ElementCount VF) -> bool { 8519 return CM.isOptimizableIVTruncate(K, VF); 8520 }; 8521 }; 8522 8523 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8524 isOptimizableIVTruncate(I), Range)) { 8525 8526 InductionDescriptor II = 8527 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 8528 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8529 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8530 Start, nullptr, I); 8531 } 8532 return nullptr; 8533 } 8534 8535 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8536 ArrayRef<VPValue *> Operands, 8537 VPlanPtr &Plan) { 8538 // If all incoming values are equal, the incoming VPValue can be used directly 8539 // instead of creating a new VPBlendRecipe. 8540 VPValue *FirstIncoming = Operands[0]; 8541 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8542 return FirstIncoming == Inc; 8543 })) { 8544 return Operands[0]; 8545 } 8546 8547 // We know that all PHIs in non-header blocks are converted into selects, so 8548 // we don't have to worry about the insertion order and we can just use the 8549 // builder. At this point we generate the predication tree. There may be 8550 // duplications since this is a simple recursive scan, but future 8551 // optimizations will clean it up. 8552 SmallVector<VPValue *, 2> OperandsWithMask; 8553 unsigned NumIncoming = Phi->getNumIncomingValues(); 8554 8555 for (unsigned In = 0; In < NumIncoming; In++) { 8556 VPValue *EdgeMask = 8557 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8558 assert((EdgeMask || NumIncoming == 1) && 8559 "Multiple predecessors with one having a full mask"); 8560 OperandsWithMask.push_back(Operands[In]); 8561 if (EdgeMask) 8562 OperandsWithMask.push_back(EdgeMask); 8563 } 8564 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8565 } 8566 8567 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8568 ArrayRef<VPValue *> Operands, 8569 VFRange &Range) const { 8570 8571 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8572 [this, CI](ElementCount VF) { 8573 return CM.isScalarWithPredication(CI, VF); 8574 }, 8575 Range); 8576 8577 if (IsPredicated) 8578 return nullptr; 8579 8580 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8581 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8582 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8583 ID == Intrinsic::pseudoprobe || 8584 ID == Intrinsic::experimental_noalias_scope_decl)) 8585 return nullptr; 8586 8587 auto willWiden = [&](ElementCount VF) -> bool { 8588 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8589 // The following case may be scalarized depending on the VF. 8590 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8591 // version of the instruction. 8592 // Is it beneficial to perform intrinsic call compared to lib call? 8593 bool NeedToScalarize = false; 8594 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8595 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8596 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8597 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 8598 "Either the intrinsic cost or vector call cost must be valid"); 8599 return UseVectorIntrinsic || !NeedToScalarize; 8600 }; 8601 8602 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8603 return nullptr; 8604 8605 ArrayRef<VPValue *> Ops = Operands.take_front(CI->getNumArgOperands()); 8606 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8607 } 8608 8609 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8610 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8611 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8612 // Instruction should be widened, unless it is scalar after vectorization, 8613 // scalarization is profitable or it is predicated. 8614 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8615 return CM.isScalarAfterVectorization(I, VF) || 8616 CM.isProfitableToScalarize(I, VF) || 8617 CM.isScalarWithPredication(I, VF); 8618 }; 8619 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8620 Range); 8621 } 8622 8623 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8624 ArrayRef<VPValue *> Operands) const { 8625 auto IsVectorizableOpcode = [](unsigned Opcode) { 8626 switch (Opcode) { 8627 case Instruction::Add: 8628 case Instruction::And: 8629 case Instruction::AShr: 8630 case Instruction::BitCast: 8631 case Instruction::FAdd: 8632 case Instruction::FCmp: 8633 case Instruction::FDiv: 8634 case Instruction::FMul: 8635 case Instruction::FNeg: 8636 case Instruction::FPExt: 8637 case Instruction::FPToSI: 8638 case Instruction::FPToUI: 8639 case Instruction::FPTrunc: 8640 case Instruction::FRem: 8641 case Instruction::FSub: 8642 case Instruction::ICmp: 8643 case Instruction::IntToPtr: 8644 case Instruction::LShr: 8645 case Instruction::Mul: 8646 case Instruction::Or: 8647 case Instruction::PtrToInt: 8648 case Instruction::SDiv: 8649 case Instruction::Select: 8650 case Instruction::SExt: 8651 case Instruction::Shl: 8652 case Instruction::SIToFP: 8653 case Instruction::SRem: 8654 case Instruction::Sub: 8655 case Instruction::Trunc: 8656 case Instruction::UDiv: 8657 case Instruction::UIToFP: 8658 case Instruction::URem: 8659 case Instruction::Xor: 8660 case Instruction::ZExt: 8661 return true; 8662 } 8663 return false; 8664 }; 8665 8666 if (!IsVectorizableOpcode(I->getOpcode())) 8667 return nullptr; 8668 8669 // Success: widen this instruction. 8670 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8671 } 8672 8673 VPBasicBlock *VPRecipeBuilder::handleReplication( 8674 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8675 VPlanPtr &Plan) { 8676 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8677 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8678 Range); 8679 8680 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8681 [&](ElementCount VF) { return CM.isScalarWithPredication(I, VF); }, 8682 Range); 8683 8684 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8685 IsUniform, IsPredicated); 8686 setRecipe(I, Recipe); 8687 Plan->addVPValue(I, Recipe); 8688 8689 // Find if I uses a predicated instruction. If so, it will use its scalar 8690 // value. Avoid hoisting the insert-element which packs the scalar value into 8691 // a vector value, as that happens iff all users use the vector value. 8692 for (VPValue *Op : Recipe->operands()) { 8693 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8694 if (!PredR) 8695 continue; 8696 auto *RepR = 8697 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8698 assert(RepR->isPredicated() && 8699 "expected Replicate recipe to be predicated"); 8700 RepR->setAlsoPack(false); 8701 } 8702 8703 // Finalize the recipe for Instr, first if it is not predicated. 8704 if (!IsPredicated) { 8705 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8706 VPBB->appendRecipe(Recipe); 8707 return VPBB; 8708 } 8709 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8710 assert(VPBB->getSuccessors().empty() && 8711 "VPBB has successors when handling predicated replication."); 8712 // Record predicated instructions for above packing optimizations. 8713 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8714 VPBlockUtils::insertBlockAfter(Region, VPBB); 8715 auto *RegSucc = new VPBasicBlock(); 8716 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8717 return RegSucc; 8718 } 8719 8720 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8721 VPRecipeBase *PredRecipe, 8722 VPlanPtr &Plan) { 8723 // Instructions marked for predication are replicated and placed under an 8724 // if-then construct to prevent side-effects. 8725 8726 // Generate recipes to compute the block mask for this region. 8727 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8728 8729 // Build the triangular if-then region. 8730 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8731 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8732 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8733 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8734 auto *PHIRecipe = Instr->getType()->isVoidTy() 8735 ? nullptr 8736 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8737 if (PHIRecipe) { 8738 Plan->removeVPValueFor(Instr); 8739 Plan->addVPValue(Instr, PHIRecipe); 8740 } 8741 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8742 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8743 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8744 8745 // Note: first set Entry as region entry and then connect successors starting 8746 // from it in order, to propagate the "parent" of each VPBasicBlock. 8747 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8748 VPBlockUtils::connectBlocks(Pred, Exit); 8749 8750 return Region; 8751 } 8752 8753 VPRecipeOrVPValueTy 8754 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8755 ArrayRef<VPValue *> Operands, 8756 VFRange &Range, VPlanPtr &Plan) { 8757 // First, check for specific widening recipes that deal with calls, memory 8758 // operations, inductions and Phi nodes. 8759 if (auto *CI = dyn_cast<CallInst>(Instr)) 8760 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8761 8762 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8763 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8764 8765 VPRecipeBase *Recipe; 8766 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8767 if (Phi->getParent() != OrigLoop->getHeader()) 8768 return tryToBlend(Phi, Operands, Plan); 8769 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands))) 8770 return toVPRecipeResult(Recipe); 8771 8772 if (Legal->isReductionVariable(Phi)) { 8773 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 8774 assert(RdxDesc.getRecurrenceStartValue() == 8775 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8776 VPValue *StartV = Operands[0]; 8777 return toVPRecipeResult(new VPWidenPHIRecipe(Phi, RdxDesc, *StartV)); 8778 } 8779 8780 return toVPRecipeResult(new VPWidenPHIRecipe(Phi)); 8781 } 8782 8783 if (isa<TruncInst>(Instr) && 8784 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8785 Range, *Plan))) 8786 return toVPRecipeResult(Recipe); 8787 8788 if (!shouldWiden(Instr, Range)) 8789 return nullptr; 8790 8791 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8792 return toVPRecipeResult(new VPWidenGEPRecipe( 8793 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8794 8795 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8796 bool InvariantCond = 8797 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8798 return toVPRecipeResult(new VPWidenSelectRecipe( 8799 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8800 } 8801 8802 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8803 } 8804 8805 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8806 ElementCount MaxVF) { 8807 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8808 8809 // Collect instructions from the original loop that will become trivially dead 8810 // in the vectorized loop. We don't need to vectorize these instructions. For 8811 // example, original induction update instructions can become dead because we 8812 // separately emit induction "steps" when generating code for the new loop. 8813 // Similarly, we create a new latch condition when setting up the structure 8814 // of the new loop, so the old one can become dead. 8815 SmallPtrSet<Instruction *, 4> DeadInstructions; 8816 collectTriviallyDeadInstructions(DeadInstructions); 8817 8818 // Add assume instructions we need to drop to DeadInstructions, to prevent 8819 // them from being added to the VPlan. 8820 // TODO: We only need to drop assumes in blocks that get flattend. If the 8821 // control flow is preserved, we should keep them. 8822 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8823 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8824 8825 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8826 // Dead instructions do not need sinking. Remove them from SinkAfter. 8827 for (Instruction *I : DeadInstructions) 8828 SinkAfter.erase(I); 8829 8830 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8831 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8832 VFRange SubRange = {VF, MaxVFPlusOne}; 8833 VPlans.push_back( 8834 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8835 VF = SubRange.End; 8836 } 8837 } 8838 8839 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8840 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8841 const DenseMap<Instruction *, Instruction *> &SinkAfter) { 8842 8843 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8844 8845 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8846 8847 // --------------------------------------------------------------------------- 8848 // Pre-construction: record ingredients whose recipes we'll need to further 8849 // process after constructing the initial VPlan. 8850 // --------------------------------------------------------------------------- 8851 8852 // Mark instructions we'll need to sink later and their targets as 8853 // ingredients whose recipe we'll need to record. 8854 for (auto &Entry : SinkAfter) { 8855 RecipeBuilder.recordRecipeOf(Entry.first); 8856 RecipeBuilder.recordRecipeOf(Entry.second); 8857 } 8858 for (auto &Reduction : CM.getInLoopReductionChains()) { 8859 PHINode *Phi = Reduction.first; 8860 RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); 8861 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8862 8863 RecipeBuilder.recordRecipeOf(Phi); 8864 for (auto &R : ReductionOperations) { 8865 RecipeBuilder.recordRecipeOf(R); 8866 // For min/max reducitons, where we have a pair of icmp/select, we also 8867 // need to record the ICmp recipe, so it can be removed later. 8868 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8869 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8870 } 8871 } 8872 8873 // For each interleave group which is relevant for this (possibly trimmed) 8874 // Range, add it to the set of groups to be later applied to the VPlan and add 8875 // placeholders for its members' Recipes which we'll be replacing with a 8876 // single VPInterleaveRecipe. 8877 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8878 auto applyIG = [IG, this](ElementCount VF) -> bool { 8879 return (VF.isVector() && // Query is illegal for VF == 1 8880 CM.getWideningDecision(IG->getInsertPos(), VF) == 8881 LoopVectorizationCostModel::CM_Interleave); 8882 }; 8883 if (!getDecisionAndClampRange(applyIG, Range)) 8884 continue; 8885 InterleaveGroups.insert(IG); 8886 for (unsigned i = 0; i < IG->getFactor(); i++) 8887 if (Instruction *Member = IG->getMember(i)) 8888 RecipeBuilder.recordRecipeOf(Member); 8889 }; 8890 8891 // --------------------------------------------------------------------------- 8892 // Build initial VPlan: Scan the body of the loop in a topological order to 8893 // visit each basic block after having visited its predecessor basic blocks. 8894 // --------------------------------------------------------------------------- 8895 8896 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 8897 auto Plan = std::make_unique<VPlan>(); 8898 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 8899 Plan->setEntry(VPBB); 8900 8901 // Scan the body of the loop in a topological order to visit each basic block 8902 // after having visited its predecessor basic blocks. 8903 LoopBlocksDFS DFS(OrigLoop); 8904 DFS.perform(LI); 8905 8906 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8907 // Relevant instructions from basic block BB will be grouped into VPRecipe 8908 // ingredients and fill a new VPBasicBlock. 8909 unsigned VPBBsForBB = 0; 8910 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 8911 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 8912 VPBB = FirstVPBBForBB; 8913 Builder.setInsertPoint(VPBB); 8914 8915 // Introduce each ingredient into VPlan. 8916 // TODO: Model and preserve debug instrinsics in VPlan. 8917 for (Instruction &I : BB->instructionsWithoutDebug()) { 8918 Instruction *Instr = &I; 8919 8920 // First filter out irrelevant instructions, to ensure no recipes are 8921 // built for them. 8922 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 8923 continue; 8924 8925 SmallVector<VPValue *, 4> Operands; 8926 auto *Phi = dyn_cast<PHINode>(Instr); 8927 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 8928 Operands.push_back(Plan->getOrAddVPValue( 8929 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 8930 } else { 8931 auto OpRange = Plan->mapToVPValues(Instr->operands()); 8932 Operands = {OpRange.begin(), OpRange.end()}; 8933 } 8934 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 8935 Instr, Operands, Range, Plan)) { 8936 // If Instr can be simplified to an existing VPValue, use it. 8937 if (RecipeOrValue.is<VPValue *>()) { 8938 Plan->addVPValue(Instr, RecipeOrValue.get<VPValue *>()); 8939 continue; 8940 } 8941 // Otherwise, add the new recipe. 8942 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 8943 for (auto *Def : Recipe->definedValues()) { 8944 auto *UV = Def->getUnderlyingValue(); 8945 Plan->addVPValue(UV, Def); 8946 } 8947 8948 RecipeBuilder.setRecipe(Instr, Recipe); 8949 VPBB->appendRecipe(Recipe); 8950 continue; 8951 } 8952 8953 // Otherwise, if all widening options failed, Instruction is to be 8954 // replicated. This may create a successor for VPBB. 8955 VPBasicBlock *NextVPBB = 8956 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 8957 if (NextVPBB != VPBB) { 8958 VPBB = NextVPBB; 8959 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8960 : ""); 8961 } 8962 } 8963 } 8964 8965 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 8966 // may also be empty, such as the last one VPBB, reflecting original 8967 // basic-blocks with no recipes. 8968 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 8969 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 8970 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 8971 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 8972 delete PreEntry; 8973 8974 // --------------------------------------------------------------------------- 8975 // Transform initial VPlan: Apply previously taken decisions, in order, to 8976 // bring the VPlan to its final state. 8977 // --------------------------------------------------------------------------- 8978 8979 // Apply Sink-After legal constraints. 8980 for (auto &Entry : SinkAfter) { 8981 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 8982 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 8983 // If the target is in a replication region, make sure to move Sink to the 8984 // block after it, not into the replication region itself. 8985 if (auto *Region = 8986 dyn_cast_or_null<VPRegionBlock>(Target->getParent()->getParent())) { 8987 if (Region->isReplicator()) { 8988 assert(Region->getNumSuccessors() == 1 && "Expected SESE region!"); 8989 VPBasicBlock *NextBlock = 8990 cast<VPBasicBlock>(Region->getSuccessors().front()); 8991 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 8992 continue; 8993 } 8994 } 8995 Sink->moveAfter(Target); 8996 } 8997 8998 // Interleave memory: for each Interleave Group we marked earlier as relevant 8999 // for this VPlan, replace the Recipes widening its memory instructions with a 9000 // single VPInterleaveRecipe at its insertion point. 9001 for (auto IG : InterleaveGroups) { 9002 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9003 RecipeBuilder.getRecipe(IG->getInsertPos())); 9004 SmallVector<VPValue *, 4> StoredValues; 9005 for (unsigned i = 0; i < IG->getFactor(); ++i) 9006 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) 9007 StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0))); 9008 9009 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9010 Recipe->getMask()); 9011 VPIG->insertBefore(Recipe); 9012 unsigned J = 0; 9013 for (unsigned i = 0; i < IG->getFactor(); ++i) 9014 if (Instruction *Member = IG->getMember(i)) { 9015 if (!Member->getType()->isVoidTy()) { 9016 VPValue *OriginalV = Plan->getVPValue(Member); 9017 Plan->removeVPValueFor(Member); 9018 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9019 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9020 J++; 9021 } 9022 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9023 } 9024 } 9025 9026 // Adjust the recipes for any inloop reductions. 9027 if (Range.Start.isVector()) 9028 adjustRecipesForInLoopReductions(Plan, RecipeBuilder); 9029 9030 // Finally, if tail is folded by masking, introduce selects between the phi 9031 // and the live-out instruction of each reduction, at the end of the latch. 9032 if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) { 9033 Builder.setInsertPoint(VPBB); 9034 auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9035 for (auto &Reduction : Legal->getReductionVars()) { 9036 if (CM.isInLoopReduction(Reduction.first)) 9037 continue; 9038 VPValue *Phi = Plan->getOrAddVPValue(Reduction.first); 9039 VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr()); 9040 Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi}); 9041 } 9042 } 9043 9044 std::string PlanName; 9045 raw_string_ostream RSO(PlanName); 9046 ElementCount VF = Range.Start; 9047 Plan->addVF(VF); 9048 RSO << "Initial VPlan for VF={" << VF; 9049 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9050 Plan->addVF(VF); 9051 RSO << "," << VF; 9052 } 9053 RSO << "},UF>=1"; 9054 RSO.flush(); 9055 Plan->setName(PlanName); 9056 9057 return Plan; 9058 } 9059 9060 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9061 // Outer loop handling: They may require CFG and instruction level 9062 // transformations before even evaluating whether vectorization is profitable. 9063 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9064 // the vectorization pipeline. 9065 assert(!OrigLoop->isInnermost()); 9066 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9067 9068 // Create new empty VPlan 9069 auto Plan = std::make_unique<VPlan>(); 9070 9071 // Build hierarchical CFG 9072 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9073 HCFGBuilder.buildHierarchicalCFG(); 9074 9075 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9076 VF *= 2) 9077 Plan->addVF(VF); 9078 9079 if (EnableVPlanPredication) { 9080 VPlanPredicator VPP(*Plan); 9081 VPP.predicate(); 9082 9083 // Avoid running transformation to recipes until masked code generation in 9084 // VPlan-native path is in place. 9085 return Plan; 9086 } 9087 9088 SmallPtrSet<Instruction *, 1> DeadInstructions; 9089 VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan, 9090 Legal->getInductionVars(), 9091 DeadInstructions, *PSE.getSE()); 9092 return Plan; 9093 } 9094 9095 // Adjust the recipes for any inloop reductions. The chain of instructions 9096 // leading from the loop exit instr to the phi need to be converted to 9097 // reductions, with one operand being vector and the other being the scalar 9098 // reduction chain. 9099 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions( 9100 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) { 9101 for (auto &Reduction : CM.getInLoopReductionChains()) { 9102 PHINode *Phi = Reduction.first; 9103 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 9104 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9105 9106 // ReductionOperations are orders top-down from the phi's use to the 9107 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9108 // which of the two operands will remain scalar and which will be reduced. 9109 // For minmax the chain will be the select instructions. 9110 Instruction *Chain = Phi; 9111 for (Instruction *R : ReductionOperations) { 9112 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9113 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9114 9115 VPValue *ChainOp = Plan->getVPValue(Chain); 9116 unsigned FirstOpId; 9117 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9118 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9119 "Expected to replace a VPWidenSelectSC"); 9120 FirstOpId = 1; 9121 } else { 9122 assert(isa<VPWidenRecipe>(WidenRecipe) && 9123 "Expected to replace a VPWidenSC"); 9124 FirstOpId = 0; 9125 } 9126 unsigned VecOpId = 9127 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9128 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9129 9130 auto *CondOp = CM.foldTailByMasking() 9131 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9132 : nullptr; 9133 VPReductionRecipe *RedRecipe = new VPReductionRecipe( 9134 &RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9135 WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe); 9136 Plan->removeVPValueFor(R); 9137 Plan->addVPValue(R, RedRecipe); 9138 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9139 WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe); 9140 WidenRecipe->eraseFromParent(); 9141 9142 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9143 VPRecipeBase *CompareRecipe = 9144 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9145 assert(isa<VPWidenRecipe>(CompareRecipe) && 9146 "Expected to replace a VPWidenSC"); 9147 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9148 "Expected no remaining users"); 9149 CompareRecipe->eraseFromParent(); 9150 } 9151 Chain = R; 9152 } 9153 } 9154 } 9155 9156 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9157 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9158 VPSlotTracker &SlotTracker) const { 9159 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9160 IG->getInsertPos()->printAsOperand(O, false); 9161 O << ", "; 9162 getAddr()->printAsOperand(O, SlotTracker); 9163 VPValue *Mask = getMask(); 9164 if (Mask) { 9165 O << ", "; 9166 Mask->printAsOperand(O, SlotTracker); 9167 } 9168 for (unsigned i = 0; i < IG->getFactor(); ++i) 9169 if (Instruction *I = IG->getMember(i)) 9170 O << "\n" << Indent << " " << VPlanIngredient(I) << " " << i; 9171 } 9172 #endif 9173 9174 void VPWidenCallRecipe::execute(VPTransformState &State) { 9175 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9176 *this, State); 9177 } 9178 9179 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9180 State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), 9181 this, *this, InvariantCond, State); 9182 } 9183 9184 void VPWidenRecipe::execute(VPTransformState &State) { 9185 State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State); 9186 } 9187 9188 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9189 State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, 9190 *this, State.UF, State.VF, IsPtrLoopInvariant, 9191 IsIndexLoopInvariant, State); 9192 } 9193 9194 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9195 assert(!State.Instance && "Int or FP induction being replicated."); 9196 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 9197 getTruncInst(), getVPValue(0), 9198 getCastValue(), State); 9199 } 9200 9201 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9202 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), RdxDesc, 9203 this, State); 9204 } 9205 9206 void VPBlendRecipe::execute(VPTransformState &State) { 9207 State.ILV->setDebugLocFromInst(State.Builder, Phi); 9208 // We know that all PHIs in non-header blocks are converted into 9209 // selects, so we don't have to worry about the insertion order and we 9210 // can just use the builder. 9211 // At this point we generate the predication tree. There may be 9212 // duplications since this is a simple recursive scan, but future 9213 // optimizations will clean it up. 9214 9215 unsigned NumIncoming = getNumIncomingValues(); 9216 9217 // Generate a sequence of selects of the form: 9218 // SELECT(Mask3, In3, 9219 // SELECT(Mask2, In2, 9220 // SELECT(Mask1, In1, 9221 // In0))) 9222 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9223 // are essentially undef are taken from In0. 9224 InnerLoopVectorizer::VectorParts Entry(State.UF); 9225 for (unsigned In = 0; In < NumIncoming; ++In) { 9226 for (unsigned Part = 0; Part < State.UF; ++Part) { 9227 // We might have single edge PHIs (blocks) - use an identity 9228 // 'select' for the first PHI operand. 9229 Value *In0 = State.get(getIncomingValue(In), Part); 9230 if (In == 0) 9231 Entry[Part] = In0; // Initialize with the first incoming value. 9232 else { 9233 // Select between the current value and the previous incoming edge 9234 // based on the incoming mask. 9235 Value *Cond = State.get(getMask(In), Part); 9236 Entry[Part] = 9237 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9238 } 9239 } 9240 } 9241 for (unsigned Part = 0; Part < State.UF; ++Part) 9242 State.set(this, Entry[Part], Part); 9243 } 9244 9245 void VPInterleaveRecipe::execute(VPTransformState &State) { 9246 assert(!State.Instance && "Interleave group being replicated."); 9247 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9248 getStoredValues(), getMask()); 9249 } 9250 9251 void VPReductionRecipe::execute(VPTransformState &State) { 9252 assert(!State.Instance && "Reduction being replicated."); 9253 Value *PrevInChain = State.get(getChainOp(), 0); 9254 for (unsigned Part = 0; Part < State.UF; ++Part) { 9255 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9256 bool IsOrdered = useOrderedReductions(*RdxDesc); 9257 Value *NewVecOp = State.get(getVecOp(), Part); 9258 if (VPValue *Cond = getCondOp()) { 9259 Value *NewCond = State.get(Cond, Part); 9260 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9261 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 9262 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9263 Constant *IdenVec = 9264 ConstantVector::getSplat(VecTy->getElementCount(), Iden); 9265 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9266 NewVecOp = Select; 9267 } 9268 Value *NewRed; 9269 Value *NextInChain; 9270 if (IsOrdered) { 9271 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9272 PrevInChain); 9273 PrevInChain = NewRed; 9274 } else { 9275 PrevInChain = State.get(getChainOp(), Part); 9276 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9277 } 9278 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9279 NextInChain = 9280 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9281 NewRed, PrevInChain); 9282 } else if (IsOrdered) 9283 NextInChain = NewRed; 9284 else { 9285 NextInChain = State.Builder.CreateBinOp( 9286 (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed, 9287 PrevInChain); 9288 } 9289 State.set(this, NextInChain, Part); 9290 } 9291 } 9292 9293 void VPReplicateRecipe::execute(VPTransformState &State) { 9294 if (State.Instance) { // Generate a single instance. 9295 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9296 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9297 *State.Instance, IsPredicated, State); 9298 // Insert scalar instance packing it into a vector. 9299 if (AlsoPack && State.VF.isVector()) { 9300 // If we're constructing lane 0, initialize to start from poison. 9301 if (State.Instance->Lane.isFirstLane()) { 9302 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9303 Value *Poison = PoisonValue::get( 9304 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9305 State.set(this, Poison, State.Instance->Part); 9306 } 9307 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9308 } 9309 return; 9310 } 9311 9312 // Generate scalar instances for all VF lanes of all UF parts, unless the 9313 // instruction is uniform inwhich case generate only the first lane for each 9314 // of the UF parts. 9315 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9316 assert((!State.VF.isScalable() || IsUniform) && 9317 "Can't scalarize a scalable vector"); 9318 for (unsigned Part = 0; Part < State.UF; ++Part) 9319 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9320 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9321 VPIteration(Part, Lane), IsPredicated, 9322 State); 9323 } 9324 9325 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9326 assert(State.Instance && "Branch on Mask works only on single instance."); 9327 9328 unsigned Part = State.Instance->Part; 9329 unsigned Lane = State.Instance->Lane.getKnownLane(); 9330 9331 Value *ConditionBit = nullptr; 9332 VPValue *BlockInMask = getMask(); 9333 if (BlockInMask) { 9334 ConditionBit = State.get(BlockInMask, Part); 9335 if (ConditionBit->getType()->isVectorTy()) 9336 ConditionBit = State.Builder.CreateExtractElement( 9337 ConditionBit, State.Builder.getInt32(Lane)); 9338 } else // Block in mask is all-one. 9339 ConditionBit = State.Builder.getTrue(); 9340 9341 // Replace the temporary unreachable terminator with a new conditional branch, 9342 // whose two destinations will be set later when they are created. 9343 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9344 assert(isa<UnreachableInst>(CurrentTerminator) && 9345 "Expected to replace unreachable terminator with conditional branch."); 9346 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9347 CondBr->setSuccessor(0, nullptr); 9348 ReplaceInstWithInst(CurrentTerminator, CondBr); 9349 } 9350 9351 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9352 assert(State.Instance && "Predicated instruction PHI works per instance."); 9353 Instruction *ScalarPredInst = 9354 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9355 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9356 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9357 assert(PredicatingBB && "Predicated block has no single predecessor."); 9358 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9359 "operand must be VPReplicateRecipe"); 9360 9361 // By current pack/unpack logic we need to generate only a single phi node: if 9362 // a vector value for the predicated instruction exists at this point it means 9363 // the instruction has vector users only, and a phi for the vector value is 9364 // needed. In this case the recipe of the predicated instruction is marked to 9365 // also do that packing, thereby "hoisting" the insert-element sequence. 9366 // Otherwise, a phi node for the scalar value is needed. 9367 unsigned Part = State.Instance->Part; 9368 if (State.hasVectorValue(getOperand(0), Part)) { 9369 Value *VectorValue = State.get(getOperand(0), Part); 9370 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9371 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9372 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9373 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9374 if (State.hasVectorValue(this, Part)) 9375 State.reset(this, VPhi, Part); 9376 else 9377 State.set(this, VPhi, Part); 9378 // NOTE: Currently we need to update the value of the operand, so the next 9379 // predicated iteration inserts its generated value in the correct vector. 9380 State.reset(getOperand(0), VPhi, Part); 9381 } else { 9382 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9383 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9384 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9385 PredicatingBB); 9386 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9387 if (State.hasScalarValue(this, *State.Instance)) 9388 State.reset(this, Phi, *State.Instance); 9389 else 9390 State.set(this, Phi, *State.Instance); 9391 // NOTE: Currently we need to update the value of the operand, so the next 9392 // predicated iteration inserts its generated value in the correct vector. 9393 State.reset(getOperand(0), Phi, *State.Instance); 9394 } 9395 } 9396 9397 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9398 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9399 State.ILV->vectorizeMemoryInstruction(&Ingredient, State, 9400 StoredValue ? nullptr : getVPValue(), 9401 getAddr(), StoredValue, getMask()); 9402 } 9403 9404 // Determine how to lower the scalar epilogue, which depends on 1) optimising 9405 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 9406 // predication, and 4) a TTI hook that analyses whether the loop is suitable 9407 // for predication. 9408 static ScalarEpilogueLowering getScalarEpilogueLowering( 9409 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 9410 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 9411 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 9412 LoopVectorizationLegality &LVL) { 9413 // 1) OptSize takes precedence over all other options, i.e. if this is set, 9414 // don't look at hints or options, and don't request a scalar epilogue. 9415 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 9416 // LoopAccessInfo (due to code dependency and not being able to reliably get 9417 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 9418 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 9419 // versioning when the vectorization is forced, unlike hasOptSize. So revert 9420 // back to the old way and vectorize with versioning when forced. See D81345.) 9421 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 9422 PGSOQueryType::IRPass) && 9423 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 9424 return CM_ScalarEpilogueNotAllowedOptSize; 9425 9426 // 2) If set, obey the directives 9427 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 9428 switch (PreferPredicateOverEpilogue) { 9429 case PreferPredicateTy::ScalarEpilogue: 9430 return CM_ScalarEpilogueAllowed; 9431 case PreferPredicateTy::PredicateElseScalarEpilogue: 9432 return CM_ScalarEpilogueNotNeededUsePredicate; 9433 case PreferPredicateTy::PredicateOrDontVectorize: 9434 return CM_ScalarEpilogueNotAllowedUsePredicate; 9435 }; 9436 } 9437 9438 // 3) If set, obey the hints 9439 switch (Hints.getPredicate()) { 9440 case LoopVectorizeHints::FK_Enabled: 9441 return CM_ScalarEpilogueNotNeededUsePredicate; 9442 case LoopVectorizeHints::FK_Disabled: 9443 return CM_ScalarEpilogueAllowed; 9444 }; 9445 9446 // 4) if the TTI hook indicates this is profitable, request predication. 9447 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 9448 LVL.getLAI())) 9449 return CM_ScalarEpilogueNotNeededUsePredicate; 9450 9451 return CM_ScalarEpilogueAllowed; 9452 } 9453 9454 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 9455 // If Values have been set for this Def return the one relevant for \p Part. 9456 if (hasVectorValue(Def, Part)) 9457 return Data.PerPartOutput[Def][Part]; 9458 9459 if (!hasScalarValue(Def, {Part, 0})) { 9460 Value *IRV = Def->getLiveInIRValue(); 9461 Value *B = ILV->getBroadcastInstrs(IRV); 9462 set(Def, B, Part); 9463 return B; 9464 } 9465 9466 Value *ScalarValue = get(Def, {Part, 0}); 9467 // If we aren't vectorizing, we can just copy the scalar map values over 9468 // to the vector map. 9469 if (VF.isScalar()) { 9470 set(Def, ScalarValue, Part); 9471 return ScalarValue; 9472 } 9473 9474 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 9475 bool IsUniform = RepR && RepR->isUniform(); 9476 9477 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 9478 // Check if there is a scalar value for the selected lane. 9479 if (!hasScalarValue(Def, {Part, LastLane})) { 9480 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 9481 assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && 9482 "unexpected recipe found to be invariant"); 9483 IsUniform = true; 9484 LastLane = 0; 9485 } 9486 9487 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 9488 9489 // Set the insert point after the last scalarized instruction. This 9490 // ensures the insertelement sequence will directly follow the scalar 9491 // definitions. 9492 auto OldIP = Builder.saveIP(); 9493 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 9494 Builder.SetInsertPoint(&*NewIP); 9495 9496 // However, if we are vectorizing, we need to construct the vector values. 9497 // If the value is known to be uniform after vectorization, we can just 9498 // broadcast the scalar value corresponding to lane zero for each unroll 9499 // iteration. Otherwise, we construct the vector values using 9500 // insertelement instructions. Since the resulting vectors are stored in 9501 // State, we will only generate the insertelements once. 9502 Value *VectorValue = nullptr; 9503 if (IsUniform) { 9504 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 9505 set(Def, VectorValue, Part); 9506 } else { 9507 // Initialize packing with insertelements to start from undef. 9508 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 9509 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 9510 set(Def, Undef, Part); 9511 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 9512 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 9513 VectorValue = get(Def, Part); 9514 } 9515 Builder.restoreIP(OldIP); 9516 return VectorValue; 9517 } 9518 9519 // Process the loop in the VPlan-native vectorization path. This path builds 9520 // VPlan upfront in the vectorization pipeline, which allows to apply 9521 // VPlan-to-VPlan transformations from the very beginning without modifying the 9522 // input LLVM IR. 9523 static bool processLoopInVPlanNativePath( 9524 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 9525 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 9526 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 9527 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 9528 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 9529 LoopVectorizationRequirements &Requirements) { 9530 9531 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 9532 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 9533 return false; 9534 } 9535 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 9536 Function *F = L->getHeader()->getParent(); 9537 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 9538 9539 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9540 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 9541 9542 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 9543 &Hints, IAI); 9544 // Use the planner for outer loop vectorization. 9545 // TODO: CM is not used at this point inside the planner. Turn CM into an 9546 // optional argument if we don't need it in the future. 9547 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 9548 Requirements, ORE); 9549 9550 // Get user vectorization factor. 9551 ElementCount UserVF = Hints.getWidth(); 9552 9553 // Plan how to best vectorize, return the best VF and its cost. 9554 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 9555 9556 // If we are stress testing VPlan builds, do not attempt to generate vector 9557 // code. Masked vector code generation support will follow soon. 9558 // Also, do not attempt to vectorize if no vector code will be produced. 9559 if (VPlanBuildStressTest || EnableVPlanPredication || 9560 VectorizationFactor::Disabled() == VF) 9561 return false; 9562 9563 LVP.setBestPlan(VF.Width, 1); 9564 9565 { 9566 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 9567 F->getParent()->getDataLayout()); 9568 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 9569 &CM, BFI, PSI, Checks); 9570 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 9571 << L->getHeader()->getParent()->getName() << "\"\n"); 9572 LVP.executePlan(LB, DT); 9573 } 9574 9575 // Mark the loop as already vectorized to avoid vectorizing again. 9576 Hints.setAlreadyVectorized(); 9577 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9578 return true; 9579 } 9580 9581 // Emit a remark if there are stores to floats that required a floating point 9582 // extension. If the vectorized loop was generated with floating point there 9583 // will be a performance penalty from the conversion overhead and the change in 9584 // the vector width. 9585 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 9586 SmallVector<Instruction *, 4> Worklist; 9587 for (BasicBlock *BB : L->getBlocks()) { 9588 for (Instruction &Inst : *BB) { 9589 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 9590 if (S->getValueOperand()->getType()->isFloatTy()) 9591 Worklist.push_back(S); 9592 } 9593 } 9594 } 9595 9596 // Traverse the floating point stores upwards searching, for floating point 9597 // conversions. 9598 SmallPtrSet<const Instruction *, 4> Visited; 9599 SmallPtrSet<const Instruction *, 4> EmittedRemark; 9600 while (!Worklist.empty()) { 9601 auto *I = Worklist.pop_back_val(); 9602 if (!L->contains(I)) 9603 continue; 9604 if (!Visited.insert(I).second) 9605 continue; 9606 9607 // Emit a remark if the floating point store required a floating 9608 // point conversion. 9609 // TODO: More work could be done to identify the root cause such as a 9610 // constant or a function return type and point the user to it. 9611 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 9612 ORE->emit([&]() { 9613 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 9614 I->getDebugLoc(), L->getHeader()) 9615 << "floating point conversion changes vector width. " 9616 << "Mixed floating point precision requires an up/down " 9617 << "cast that will negatively impact performance."; 9618 }); 9619 9620 for (Use &Op : I->operands()) 9621 if (auto *OpI = dyn_cast<Instruction>(Op)) 9622 Worklist.push_back(OpI); 9623 } 9624 } 9625 9626 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 9627 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 9628 !EnableLoopInterleaving), 9629 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 9630 !EnableLoopVectorization) {} 9631 9632 bool LoopVectorizePass::processLoop(Loop *L) { 9633 assert((EnableVPlanNativePath || L->isInnermost()) && 9634 "VPlan-native path is not enabled. Only process inner loops."); 9635 9636 #ifndef NDEBUG 9637 const std::string DebugLocStr = getDebugLocString(L); 9638 #endif /* NDEBUG */ 9639 9640 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 9641 << L->getHeader()->getParent()->getName() << "\" from " 9642 << DebugLocStr << "\n"); 9643 9644 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 9645 9646 LLVM_DEBUG( 9647 dbgs() << "LV: Loop hints:" 9648 << " force=" 9649 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 9650 ? "disabled" 9651 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 9652 ? "enabled" 9653 : "?")) 9654 << " width=" << Hints.getWidth() 9655 << " unroll=" << Hints.getInterleave() << "\n"); 9656 9657 // Function containing loop 9658 Function *F = L->getHeader()->getParent(); 9659 9660 // Looking at the diagnostic output is the only way to determine if a loop 9661 // was vectorized (other than looking at the IR or machine code), so it 9662 // is important to generate an optimization remark for each loop. Most of 9663 // these messages are generated as OptimizationRemarkAnalysis. Remarks 9664 // generated as OptimizationRemark and OptimizationRemarkMissed are 9665 // less verbose reporting vectorized loops and unvectorized loops that may 9666 // benefit from vectorization, respectively. 9667 9668 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 9669 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 9670 return false; 9671 } 9672 9673 PredicatedScalarEvolution PSE(*SE, *L); 9674 9675 // Check if it is legal to vectorize the loop. 9676 LoopVectorizationRequirements Requirements; 9677 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 9678 &Requirements, &Hints, DB, AC, BFI, PSI); 9679 if (!LVL.canVectorize(EnableVPlanNativePath)) { 9680 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 9681 Hints.emitRemarkWithHints(); 9682 return false; 9683 } 9684 9685 // Check the function attributes and profiles to find out if this function 9686 // should be optimized for size. 9687 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9688 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 9689 9690 // Entrance to the VPlan-native vectorization path. Outer loops are processed 9691 // here. They may require CFG and instruction level transformations before 9692 // even evaluating whether vectorization is profitable. Since we cannot modify 9693 // the incoming IR, we need to build VPlan upfront in the vectorization 9694 // pipeline. 9695 if (!L->isInnermost()) 9696 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 9697 ORE, BFI, PSI, Hints, Requirements); 9698 9699 assert(L->isInnermost() && "Inner loop expected."); 9700 9701 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 9702 // count by optimizing for size, to minimize overheads. 9703 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 9704 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 9705 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 9706 << "This loop is worth vectorizing only if no scalar " 9707 << "iteration overheads are incurred."); 9708 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 9709 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 9710 else { 9711 LLVM_DEBUG(dbgs() << "\n"); 9712 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 9713 } 9714 } 9715 9716 // Check the function attributes to see if implicit floats are allowed. 9717 // FIXME: This check doesn't seem possibly correct -- what if the loop is 9718 // an integer loop and the vector instructions selected are purely integer 9719 // vector instructions? 9720 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 9721 reportVectorizationFailure( 9722 "Can't vectorize when the NoImplicitFloat attribute is used", 9723 "loop not vectorized due to NoImplicitFloat attribute", 9724 "NoImplicitFloat", ORE, L); 9725 Hints.emitRemarkWithHints(); 9726 return false; 9727 } 9728 9729 // Check if the target supports potentially unsafe FP vectorization. 9730 // FIXME: Add a check for the type of safety issue (denormal, signaling) 9731 // for the target we're vectorizing for, to make sure none of the 9732 // additional fp-math flags can help. 9733 if (Hints.isPotentiallyUnsafe() && 9734 TTI->isFPVectorizationPotentiallyUnsafe()) { 9735 reportVectorizationFailure( 9736 "Potentially unsafe FP op prevents vectorization", 9737 "loop not vectorized due to unsafe FP support.", 9738 "UnsafeFP", ORE, L); 9739 Hints.emitRemarkWithHints(); 9740 return false; 9741 } 9742 9743 if (!Requirements.canVectorizeFPMath(Hints)) { 9744 ORE->emit([&]() { 9745 auto *ExactFPMathInst = Requirements.getExactFPInst(); 9746 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 9747 ExactFPMathInst->getDebugLoc(), 9748 ExactFPMathInst->getParent()) 9749 << "loop not vectorized: cannot prove it is safe to reorder " 9750 "floating-point operations"; 9751 }); 9752 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 9753 "reorder floating-point operations\n"); 9754 Hints.emitRemarkWithHints(); 9755 return false; 9756 } 9757 9758 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 9759 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 9760 9761 // If an override option has been passed in for interleaved accesses, use it. 9762 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 9763 UseInterleaved = EnableInterleavedMemAccesses; 9764 9765 // Analyze interleaved memory accesses. 9766 if (UseInterleaved) { 9767 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 9768 } 9769 9770 // Use the cost model. 9771 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 9772 F, &Hints, IAI); 9773 CM.collectValuesToIgnore(); 9774 9775 // Use the planner for vectorization. 9776 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 9777 Requirements, ORE); 9778 9779 // Get user vectorization factor and interleave count. 9780 ElementCount UserVF = Hints.getWidth(); 9781 unsigned UserIC = Hints.getInterleave(); 9782 9783 // Plan how to best vectorize, return the best VF and its cost. 9784 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 9785 9786 VectorizationFactor VF = VectorizationFactor::Disabled(); 9787 unsigned IC = 1; 9788 9789 if (MaybeVF) { 9790 VF = *MaybeVF; 9791 // Select the interleave count. 9792 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 9793 } 9794 9795 // Identify the diagnostic messages that should be produced. 9796 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 9797 bool VectorizeLoop = true, InterleaveLoop = true; 9798 if (VF.Width.isScalar()) { 9799 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 9800 VecDiagMsg = std::make_pair( 9801 "VectorizationNotBeneficial", 9802 "the cost-model indicates that vectorization is not beneficial"); 9803 VectorizeLoop = false; 9804 } 9805 9806 if (!MaybeVF && UserIC > 1) { 9807 // Tell the user interleaving was avoided up-front, despite being explicitly 9808 // requested. 9809 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 9810 "interleaving should be avoided up front\n"); 9811 IntDiagMsg = std::make_pair( 9812 "InterleavingAvoided", 9813 "Ignoring UserIC, because interleaving was avoided up front"); 9814 InterleaveLoop = false; 9815 } else if (IC == 1 && UserIC <= 1) { 9816 // Tell the user interleaving is not beneficial. 9817 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 9818 IntDiagMsg = std::make_pair( 9819 "InterleavingNotBeneficial", 9820 "the cost-model indicates that interleaving is not beneficial"); 9821 InterleaveLoop = false; 9822 if (UserIC == 1) { 9823 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 9824 IntDiagMsg.second += 9825 " and is explicitly disabled or interleave count is set to 1"; 9826 } 9827 } else if (IC > 1 && UserIC == 1) { 9828 // Tell the user interleaving is beneficial, but it explicitly disabled. 9829 LLVM_DEBUG( 9830 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 9831 IntDiagMsg = std::make_pair( 9832 "InterleavingBeneficialButDisabled", 9833 "the cost-model indicates that interleaving is beneficial " 9834 "but is explicitly disabled or interleave count is set to 1"); 9835 InterleaveLoop = false; 9836 } 9837 9838 // Override IC if user provided an interleave count. 9839 IC = UserIC > 0 ? UserIC : IC; 9840 9841 // Emit diagnostic messages, if any. 9842 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 9843 if (!VectorizeLoop && !InterleaveLoop) { 9844 // Do not vectorize or interleaving the loop. 9845 ORE->emit([&]() { 9846 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 9847 L->getStartLoc(), L->getHeader()) 9848 << VecDiagMsg.second; 9849 }); 9850 ORE->emit([&]() { 9851 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 9852 L->getStartLoc(), L->getHeader()) 9853 << IntDiagMsg.second; 9854 }); 9855 return false; 9856 } else if (!VectorizeLoop && InterleaveLoop) { 9857 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 9858 ORE->emit([&]() { 9859 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 9860 L->getStartLoc(), L->getHeader()) 9861 << VecDiagMsg.second; 9862 }); 9863 } else if (VectorizeLoop && !InterleaveLoop) { 9864 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 9865 << ") in " << DebugLocStr << '\n'); 9866 ORE->emit([&]() { 9867 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 9868 L->getStartLoc(), L->getHeader()) 9869 << IntDiagMsg.second; 9870 }); 9871 } else if (VectorizeLoop && InterleaveLoop) { 9872 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 9873 << ") in " << DebugLocStr << '\n'); 9874 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 9875 } 9876 9877 bool DisableRuntimeUnroll = false; 9878 MDNode *OrigLoopID = L->getLoopID(); 9879 { 9880 // Optimistically generate runtime checks. Drop them if they turn out to not 9881 // be profitable. Limit the scope of Checks, so the cleanup happens 9882 // immediately after vector codegeneration is done. 9883 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 9884 F->getParent()->getDataLayout()); 9885 if (!VF.Width.isScalar() || IC > 1) 9886 Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); 9887 LVP.setBestPlan(VF.Width, IC); 9888 9889 using namespace ore; 9890 if (!VectorizeLoop) { 9891 assert(IC > 1 && "interleave count should not be 1 or 0"); 9892 // If we decided that it is not legal to vectorize the loop, then 9893 // interleave it. 9894 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 9895 &CM, BFI, PSI, Checks); 9896 LVP.executePlan(Unroller, DT); 9897 9898 ORE->emit([&]() { 9899 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 9900 L->getHeader()) 9901 << "interleaved loop (interleaved count: " 9902 << NV("InterleaveCount", IC) << ")"; 9903 }); 9904 } else { 9905 // If we decided that it is *legal* to vectorize the loop, then do it. 9906 9907 // Consider vectorizing the epilogue too if it's profitable. 9908 VectorizationFactor EpilogueVF = 9909 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 9910 if (EpilogueVF.Width.isVector()) { 9911 9912 // The first pass vectorizes the main loop and creates a scalar epilogue 9913 // to be vectorized by executing the plan (potentially with a different 9914 // factor) again shortly afterwards. 9915 EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC, 9916 EpilogueVF.Width.getKnownMinValue(), 9917 1); 9918 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 9919 EPI, &LVL, &CM, BFI, PSI, Checks); 9920 9921 LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF); 9922 LVP.executePlan(MainILV, DT); 9923 ++LoopsVectorized; 9924 9925 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 9926 formLCSSARecursively(*L, *DT, LI, SE); 9927 9928 // Second pass vectorizes the epilogue and adjusts the control flow 9929 // edges from the first pass. 9930 LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF); 9931 EPI.MainLoopVF = EPI.EpilogueVF; 9932 EPI.MainLoopUF = EPI.EpilogueUF; 9933 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 9934 ORE, EPI, &LVL, &CM, BFI, PSI, 9935 Checks); 9936 LVP.executePlan(EpilogILV, DT); 9937 ++LoopsEpilogueVectorized; 9938 9939 if (!MainILV.areSafetyChecksAdded()) 9940 DisableRuntimeUnroll = true; 9941 } else { 9942 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 9943 &LVL, &CM, BFI, PSI, Checks); 9944 LVP.executePlan(LB, DT); 9945 ++LoopsVectorized; 9946 9947 // Add metadata to disable runtime unrolling a scalar loop when there 9948 // are no runtime checks about strides and memory. A scalar loop that is 9949 // rarely used is not worth unrolling. 9950 if (!LB.areSafetyChecksAdded()) 9951 DisableRuntimeUnroll = true; 9952 } 9953 // Report the vectorization decision. 9954 ORE->emit([&]() { 9955 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 9956 L->getHeader()) 9957 << "vectorized loop (vectorization width: " 9958 << NV("VectorizationFactor", VF.Width) 9959 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 9960 }); 9961 } 9962 9963 if (ORE->allowExtraAnalysis(LV_NAME)) 9964 checkMixedPrecision(L, ORE); 9965 } 9966 9967 Optional<MDNode *> RemainderLoopID = 9968 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 9969 LLVMLoopVectorizeFollowupEpilogue}); 9970 if (RemainderLoopID.hasValue()) { 9971 L->setLoopID(RemainderLoopID.getValue()); 9972 } else { 9973 if (DisableRuntimeUnroll) 9974 AddRuntimeUnrollDisableMetaData(L); 9975 9976 // Mark the loop as already vectorized to avoid vectorizing again. 9977 Hints.setAlreadyVectorized(); 9978 } 9979 9980 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9981 return true; 9982 } 9983 9984 LoopVectorizeResult LoopVectorizePass::runImpl( 9985 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 9986 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 9987 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 9988 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 9989 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 9990 SE = &SE_; 9991 LI = &LI_; 9992 TTI = &TTI_; 9993 DT = &DT_; 9994 BFI = &BFI_; 9995 TLI = TLI_; 9996 AA = &AA_; 9997 AC = &AC_; 9998 GetLAA = &GetLAA_; 9999 DB = &DB_; 10000 ORE = &ORE_; 10001 PSI = PSI_; 10002 10003 // Don't attempt if 10004 // 1. the target claims to have no vector registers, and 10005 // 2. interleaving won't help ILP. 10006 // 10007 // The second condition is necessary because, even if the target has no 10008 // vector registers, loop vectorization may still enable scalar 10009 // interleaving. 10010 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10011 TTI->getMaxInterleaveFactor(1) < 2) 10012 return LoopVectorizeResult(false, false); 10013 10014 bool Changed = false, CFGChanged = false; 10015 10016 // The vectorizer requires loops to be in simplified form. 10017 // Since simplification may add new inner loops, it has to run before the 10018 // legality and profitability checks. This means running the loop vectorizer 10019 // will simplify all loops, regardless of whether anything end up being 10020 // vectorized. 10021 for (auto &L : *LI) 10022 Changed |= CFGChanged |= 10023 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10024 10025 // Build up a worklist of inner-loops to vectorize. This is necessary as 10026 // the act of vectorizing or partially unrolling a loop creates new loops 10027 // and can invalidate iterators across the loops. 10028 SmallVector<Loop *, 8> Worklist; 10029 10030 for (Loop *L : *LI) 10031 collectSupportedLoops(*L, LI, ORE, Worklist); 10032 10033 LoopsAnalyzed += Worklist.size(); 10034 10035 // Now walk the identified inner loops. 10036 while (!Worklist.empty()) { 10037 Loop *L = Worklist.pop_back_val(); 10038 10039 // For the inner loops we actually process, form LCSSA to simplify the 10040 // transform. 10041 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10042 10043 Changed |= CFGChanged |= processLoop(L); 10044 } 10045 10046 // Process each loop nest in the function. 10047 return LoopVectorizeResult(Changed, CFGChanged); 10048 } 10049 10050 PreservedAnalyses LoopVectorizePass::run(Function &F, 10051 FunctionAnalysisManager &AM) { 10052 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10053 auto &LI = AM.getResult<LoopAnalysis>(F); 10054 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10055 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10056 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10057 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10058 auto &AA = AM.getResult<AAManager>(F); 10059 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10060 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10061 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10062 MemorySSA *MSSA = EnableMSSALoopDependency 10063 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 10064 : nullptr; 10065 10066 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10067 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10068 [&](Loop &L) -> const LoopAccessInfo & { 10069 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10070 TLI, TTI, nullptr, MSSA}; 10071 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10072 }; 10073 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10074 ProfileSummaryInfo *PSI = 10075 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10076 LoopVectorizeResult Result = 10077 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10078 if (!Result.MadeAnyChange) 10079 return PreservedAnalyses::all(); 10080 PreservedAnalyses PA; 10081 10082 // We currently do not preserve loopinfo/dominator analyses with outer loop 10083 // vectorization. Until this is addressed, mark these analyses as preserved 10084 // only for non-VPlan-native path. 10085 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10086 if (!EnableVPlanNativePath) { 10087 PA.preserve<LoopAnalysis>(); 10088 PA.preserve<DominatorTreeAnalysis>(); 10089 } 10090 PA.preserve<BasicAA>(); 10091 PA.preserve<GlobalsAA>(); 10092 if (!Result.MadeCFGChange) 10093 PA.preserveSet<CFGAnalyses>(); 10094 return PA; 10095 } 10096