1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallVector.h" 74 #include "llvm/ADT/Statistic.h" 75 #include "llvm/ADT/StringRef.h" 76 #include "llvm/ADT/Twine.h" 77 #include "llvm/ADT/iterator_range.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/BasicAliasAnalysis.h" 80 #include "llvm/Analysis/BlockFrequencyInfo.h" 81 #include "llvm/Analysis/CFG.h" 82 #include "llvm/Analysis/CodeMetrics.h" 83 #include "llvm/Analysis/DemandedBits.h" 84 #include "llvm/Analysis/GlobalsModRef.h" 85 #include "llvm/Analysis/LoopAccessAnalysis.h" 86 #include "llvm/Analysis/LoopAnalysisManager.h" 87 #include "llvm/Analysis/LoopInfo.h" 88 #include "llvm/Analysis/LoopIterator.h" 89 #include "llvm/Analysis/MemorySSA.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/LLVMContext.h" 116 #include "llvm/IR/Metadata.h" 117 #include "llvm/IR/Module.h" 118 #include "llvm/IR/Operator.h" 119 #include "llvm/IR/PatternMatch.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 202 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 203 cl::desc("The maximum allowed number of runtime memory checks with a " 204 "vectorize(enable) pragma.")); 205 206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 207 // that predication is preferred, and this lists all options. I.e., the 208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 209 // and predicate the instructions accordingly. If tail-folding fails, there are 210 // different fallback strategies depending on these values: 211 namespace PreferPredicateTy { 212 enum Option { 213 ScalarEpilogue = 0, 214 PredicateElseScalarEpilogue, 215 PredicateOrDontVectorize 216 }; 217 } // namespace PreferPredicateTy 218 219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 220 "prefer-predicate-over-epilogue", 221 cl::init(PreferPredicateTy::ScalarEpilogue), 222 cl::Hidden, 223 cl::desc("Tail-folding and predication preferences over creating a scalar " 224 "epilogue loop."), 225 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 226 "scalar-epilogue", 227 "Don't tail-predicate loops, create scalar epilogue"), 228 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 229 "predicate-else-scalar-epilogue", 230 "prefer tail-folding, create scalar epilogue if tail " 231 "folding fails."), 232 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 233 "predicate-dont-vectorize", 234 "prefers tail-folding, don't attempt vectorization if " 235 "tail-folding fails."))); 236 237 static cl::opt<bool> MaximizeBandwidth( 238 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 239 cl::desc("Maximize bandwidth when selecting vectorization factor which " 240 "will be determined by the smallest type in loop.")); 241 242 static cl::opt<bool> EnableInterleavedMemAccesses( 243 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 244 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 245 246 /// An interleave-group may need masking if it resides in a block that needs 247 /// predication, or in order to mask away gaps. 248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 249 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 250 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 251 252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 253 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 254 cl::desc("We don't interleave loops with a estimated constant trip count " 255 "below this number")); 256 257 static cl::opt<unsigned> ForceTargetNumScalarRegs( 258 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 259 cl::desc("A flag that overrides the target's number of scalar registers.")); 260 261 static cl::opt<unsigned> ForceTargetNumVectorRegs( 262 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 263 cl::desc("A flag that overrides the target's number of vector registers.")); 264 265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 266 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "scalar loops.")); 269 270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 271 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's max interleave factor for " 273 "vectorized loops.")); 274 275 static cl::opt<unsigned> ForceTargetInstructionCost( 276 "force-target-instruction-cost", cl::init(0), cl::Hidden, 277 cl::desc("A flag that overrides the target's expected cost for " 278 "an instruction to a single constant value. Mostly " 279 "useful for getting consistent testing.")); 280 281 static cl::opt<bool> ForceTargetSupportsScalableVectors( 282 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 283 cl::desc( 284 "Pretend that scalable vectors are supported, even if the target does " 285 "not support them. This flag should only be used for testing.")); 286 287 static cl::opt<unsigned> SmallLoopCost( 288 "small-loop-cost", cl::init(20), cl::Hidden, 289 cl::desc( 290 "The cost of a loop that is considered 'small' by the interleaver.")); 291 292 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 293 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 294 cl::desc("Enable the use of the block frequency analysis to access PGO " 295 "heuristics minimizing code growth in cold regions and being more " 296 "aggressive in hot regions.")); 297 298 // Runtime interleave loops for load/store throughput. 299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 300 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 301 cl::desc( 302 "Enable runtime interleaving until load/store ports are saturated")); 303 304 /// Interleave small loops with scalar reductions. 305 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 306 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 307 cl::desc("Enable interleaving for loops with small iteration counts that " 308 "contain scalar reductions to expose ILP.")); 309 310 /// The number of stores in a loop that are allowed to need predication. 311 static cl::opt<unsigned> NumberOfStoresToPredicate( 312 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 313 cl::desc("Max number of stores to be predicated behind an if.")); 314 315 static cl::opt<bool> EnableIndVarRegisterHeur( 316 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 317 cl::desc("Count the induction variable only once when interleaving")); 318 319 static cl::opt<bool> EnableCondStoresVectorization( 320 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 321 cl::desc("Enable if predication of stores during vectorization.")); 322 323 static cl::opt<unsigned> MaxNestedScalarReductionIC( 324 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 325 cl::desc("The maximum interleave count to use when interleaving a scalar " 326 "reduction in a nested loop.")); 327 328 static cl::opt<bool> 329 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 330 cl::Hidden, 331 cl::desc("Prefer in-loop vector reductions, " 332 "overriding the targets preference.")); 333 334 cl::opt<bool> EnableStrictReductions( 335 "enable-strict-reductions", cl::init(false), cl::Hidden, 336 cl::desc("Enable the vectorisation of loops with in-order (strict) " 337 "FP reductions")); 338 339 static cl::opt<bool> PreferPredicatedReductionSelect( 340 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 341 cl::desc( 342 "Prefer predicating a reduction operation over an after loop select.")); 343 344 cl::opt<bool> EnableVPlanNativePath( 345 "enable-vplan-native-path", cl::init(false), cl::Hidden, 346 cl::desc("Enable VPlan-native vectorization path with " 347 "support for outer loop vectorization.")); 348 349 // FIXME: Remove this switch once we have divergence analysis. Currently we 350 // assume divergent non-backedge branches when this switch is true. 351 cl::opt<bool> EnableVPlanPredication( 352 "enable-vplan-predication", cl::init(false), cl::Hidden, 353 cl::desc("Enable VPlan-native vectorization path predicator with " 354 "support for outer loop vectorization.")); 355 356 // This flag enables the stress testing of the VPlan H-CFG construction in the 357 // VPlan-native vectorization path. It must be used in conjuction with 358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 359 // verification of the H-CFGs built. 360 static cl::opt<bool> VPlanBuildStressTest( 361 "vplan-build-stress-test", cl::init(false), cl::Hidden, 362 cl::desc( 363 "Build VPlan for every supported loop nest in the function and bail " 364 "out right after the build (stress test the VPlan H-CFG construction " 365 "in the VPlan-native vectorization path).")); 366 367 cl::opt<bool> llvm::EnableLoopInterleaving( 368 "interleave-loops", cl::init(true), cl::Hidden, 369 cl::desc("Enable loop interleaving in Loop vectorization passes")); 370 cl::opt<bool> llvm::EnableLoopVectorization( 371 "vectorize-loops", cl::init(true), cl::Hidden, 372 cl::desc("Run the Loop vectorization passes")); 373 374 cl::opt<bool> PrintVPlansInDotFormat( 375 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 376 cl::desc("Use dot format instead of plain text when dumping VPlans")); 377 378 /// A helper function that returns the type of loaded or stored value. 379 static Type *getMemInstValueType(Value *I) { 380 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 381 "Expected Load or Store instruction"); 382 if (auto *LI = dyn_cast<LoadInst>(I)) 383 return LI->getType(); 384 return cast<StoreInst>(I)->getValueOperand()->getType(); 385 } 386 387 /// A helper function that returns true if the given type is irregular. The 388 /// type is irregular if its allocated size doesn't equal the store size of an 389 /// element of the corresponding vector type. 390 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 391 // Determine if an array of N elements of type Ty is "bitcast compatible" 392 // with a <N x Ty> vector. 393 // This is only true if there is no padding between the array elements. 394 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 395 } 396 397 /// A helper function that returns the reciprocal of the block probability of 398 /// predicated blocks. If we return X, we are assuming the predicated block 399 /// will execute once for every X iterations of the loop header. 400 /// 401 /// TODO: We should use actual block probability here, if available. Currently, 402 /// we always assume predicated blocks have a 50% chance of executing. 403 static unsigned getReciprocalPredBlockProb() { return 2; } 404 405 /// A helper function that returns an integer or floating-point constant with 406 /// value C. 407 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 408 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 409 : ConstantFP::get(Ty, C); 410 } 411 412 /// Returns "best known" trip count for the specified loop \p L as defined by 413 /// the following procedure: 414 /// 1) Returns exact trip count if it is known. 415 /// 2) Returns expected trip count according to profile data if any. 416 /// 3) Returns upper bound estimate if it is known. 417 /// 4) Returns None if all of the above failed. 418 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 419 // Check if exact trip count is known. 420 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 421 return ExpectedTC; 422 423 // Check if there is an expected trip count available from profile data. 424 if (LoopVectorizeWithBlockFrequency) 425 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 426 return EstimatedTC; 427 428 // Check if upper bound estimate is known. 429 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 430 return ExpectedTC; 431 432 return None; 433 } 434 435 // Forward declare GeneratedRTChecks. 436 class GeneratedRTChecks; 437 438 namespace llvm { 439 440 /// InnerLoopVectorizer vectorizes loops which contain only one basic 441 /// block to a specified vectorization factor (VF). 442 /// This class performs the widening of scalars into vectors, or multiple 443 /// scalars. This class also implements the following features: 444 /// * It inserts an epilogue loop for handling loops that don't have iteration 445 /// counts that are known to be a multiple of the vectorization factor. 446 /// * It handles the code generation for reduction variables. 447 /// * Scalarization (implementation using scalars) of un-vectorizable 448 /// instructions. 449 /// InnerLoopVectorizer does not perform any vectorization-legality 450 /// checks, and relies on the caller to check for the different legality 451 /// aspects. The InnerLoopVectorizer relies on the 452 /// LoopVectorizationLegality class to provide information about the induction 453 /// and reduction variables that were found to a given vectorization factor. 454 class InnerLoopVectorizer { 455 public: 456 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 457 LoopInfo *LI, DominatorTree *DT, 458 const TargetLibraryInfo *TLI, 459 const TargetTransformInfo *TTI, AssumptionCache *AC, 460 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 461 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 462 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 463 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 464 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 465 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 466 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 467 PSI(PSI), RTChecks(RTChecks) { 468 // Query this against the original loop and save it here because the profile 469 // of the original loop header may change as the transformation happens. 470 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 471 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 472 } 473 474 virtual ~InnerLoopVectorizer() = default; 475 476 /// Create a new empty loop that will contain vectorized instructions later 477 /// on, while the old loop will be used as the scalar remainder. Control flow 478 /// is generated around the vectorized (and scalar epilogue) loops consisting 479 /// of various checks and bypasses. Return the pre-header block of the new 480 /// loop. 481 /// In the case of epilogue vectorization, this function is overriden to 482 /// handle the more complex control flow around the loops. 483 virtual BasicBlock *createVectorizedLoopSkeleton(); 484 485 /// Widen a single instruction within the innermost loop. 486 void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands, 487 VPTransformState &State); 488 489 /// Widen a single call instruction within the innermost loop. 490 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 491 VPTransformState &State); 492 493 /// Widen a single select instruction within the innermost loop. 494 void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, 495 bool InvariantCond, VPTransformState &State); 496 497 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 498 void fixVectorizedLoop(VPTransformState &State); 499 500 // Return true if any runtime check is added. 501 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 502 503 /// A type for vectorized values in the new loop. Each value from the 504 /// original loop, when vectorized, is represented by UF vector values in the 505 /// new unrolled loop, where UF is the unroll factor. 506 using VectorParts = SmallVector<Value *, 2>; 507 508 /// Vectorize a single GetElementPtrInst based on information gathered and 509 /// decisions taken during planning. 510 void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices, 511 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant, 512 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); 513 514 /// Vectorize a single PHINode in a block. This method handles the induction 515 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 516 /// arbitrary length vectors. 517 void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc, 518 VPWidenPHIRecipe *PhiR, VPTransformState &State); 519 520 /// A helper function to scalarize a single Instruction in the innermost loop. 521 /// Generates a sequence of scalar instances for each lane between \p MinLane 522 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 523 /// inclusive. Uses the VPValue operands from \p Operands instead of \p 524 /// Instr's operands. 525 void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands, 526 const VPIteration &Instance, bool IfPredicateInstr, 527 VPTransformState &State); 528 529 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 530 /// is provided, the integer induction variable will first be truncated to 531 /// the corresponding type. 532 void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, 533 VPValue *Def, VPValue *CastDef, 534 VPTransformState &State); 535 536 /// Construct the vector value of a scalarized value \p V one lane at a time. 537 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 538 VPTransformState &State); 539 540 /// Try to vectorize interleaved access group \p Group with the base address 541 /// given in \p Addr, optionally masking the vector operations if \p 542 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 543 /// values in the vectorized loop. 544 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 545 ArrayRef<VPValue *> VPDefs, 546 VPTransformState &State, VPValue *Addr, 547 ArrayRef<VPValue *> StoredValues, 548 VPValue *BlockInMask = nullptr); 549 550 /// Vectorize Load and Store instructions with the base address given in \p 551 /// Addr, optionally masking the vector operations if \p BlockInMask is 552 /// non-null. Use \p State to translate given VPValues to IR values in the 553 /// vectorized loop. 554 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 555 VPValue *Def, VPValue *Addr, 556 VPValue *StoredValue, VPValue *BlockInMask); 557 558 /// Set the debug location in the builder using the debug location in 559 /// the instruction. 560 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 561 562 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 563 void fixNonInductionPHIs(VPTransformState &State); 564 565 /// Create a broadcast instruction. This method generates a broadcast 566 /// instruction (shuffle) for loop invariant values and for the induction 567 /// value. If this is the induction variable then we extend it to N, N+1, ... 568 /// this is needed because each iteration in the loop corresponds to a SIMD 569 /// element. 570 virtual Value *getBroadcastInstrs(Value *V); 571 572 protected: 573 friend class LoopVectorizationPlanner; 574 575 /// A small list of PHINodes. 576 using PhiVector = SmallVector<PHINode *, 4>; 577 578 /// A type for scalarized values in the new loop. Each value from the 579 /// original loop, when scalarized, is represented by UF x VF scalar values 580 /// in the new unrolled loop, where UF is the unroll factor and VF is the 581 /// vectorization factor. 582 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 583 584 /// Set up the values of the IVs correctly when exiting the vector loop. 585 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 586 Value *CountRoundDown, Value *EndValue, 587 BasicBlock *MiddleBlock); 588 589 /// Create a new induction variable inside L. 590 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 591 Value *Step, Instruction *DL); 592 593 /// Handle all cross-iteration phis in the header. 594 void fixCrossIterationPHIs(VPTransformState &State); 595 596 /// Fix a first-order recurrence. This is the second phase of vectorizing 597 /// this phi node. 598 void fixFirstOrderRecurrence(PHINode *Phi, VPTransformState &State); 599 600 /// Fix a reduction cross-iteration phi. This is the second phase of 601 /// vectorizing this phi node. 602 void fixReduction(VPWidenPHIRecipe *Phi, VPTransformState &State); 603 604 /// Clear NSW/NUW flags from reduction instructions if necessary. 605 void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc, 606 VPTransformState &State); 607 608 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 609 /// means we need to add the appropriate incoming value from the middle 610 /// block as exiting edges from the scalar epilogue loop (if present) are 611 /// already in place, and we exit the vector loop exclusively to the middle 612 /// block. 613 void fixLCSSAPHIs(VPTransformState &State); 614 615 /// Iteratively sink the scalarized operands of a predicated instruction into 616 /// the block that was created for it. 617 void sinkScalarOperands(Instruction *PredInst); 618 619 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 620 /// represented as. 621 void truncateToMinimalBitwidths(VPTransformState &State); 622 623 /// This function adds 624 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 625 /// to each vector element of Val. The sequence starts at StartIndex. 626 /// \p Opcode is relevant for FP induction variable. 627 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 628 Instruction::BinaryOps Opcode = 629 Instruction::BinaryOpsEnd); 630 631 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 632 /// variable on which to base the steps, \p Step is the size of the step, and 633 /// \p EntryVal is the value from the original loop that maps to the steps. 634 /// Note that \p EntryVal doesn't have to be an induction variable - it 635 /// can also be a truncate instruction. 636 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 637 const InductionDescriptor &ID, VPValue *Def, 638 VPValue *CastDef, VPTransformState &State); 639 640 /// Create a vector induction phi node based on an existing scalar one. \p 641 /// EntryVal is the value from the original loop that maps to the vector phi 642 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 643 /// truncate instruction, instead of widening the original IV, we widen a 644 /// version of the IV truncated to \p EntryVal's type. 645 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 646 Value *Step, Value *Start, 647 Instruction *EntryVal, VPValue *Def, 648 VPValue *CastDef, 649 VPTransformState &State); 650 651 /// Returns true if an instruction \p I should be scalarized instead of 652 /// vectorized for the chosen vectorization factor. 653 bool shouldScalarizeInstruction(Instruction *I) const; 654 655 /// Returns true if we should generate a scalar version of \p IV. 656 bool needsScalarInduction(Instruction *IV) const; 657 658 /// If there is a cast involved in the induction variable \p ID, which should 659 /// be ignored in the vectorized loop body, this function records the 660 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 661 /// cast. We had already proved that the casted Phi is equal to the uncasted 662 /// Phi in the vectorized loop (under a runtime guard), and therefore 663 /// there is no need to vectorize the cast - the same value can be used in the 664 /// vector loop for both the Phi and the cast. 665 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 666 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 667 /// 668 /// \p EntryVal is the value from the original loop that maps to the vector 669 /// phi node and is used to distinguish what is the IV currently being 670 /// processed - original one (if \p EntryVal is a phi corresponding to the 671 /// original IV) or the "newly-created" one based on the proof mentioned above 672 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 673 /// latter case \p EntryVal is a TruncInst and we must not record anything for 674 /// that IV, but it's error-prone to expect callers of this routine to care 675 /// about that, hence this explicit parameter. 676 void recordVectorLoopValueForInductionCast( 677 const InductionDescriptor &ID, const Instruction *EntryVal, 678 Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, 679 unsigned Part, unsigned Lane = UINT_MAX); 680 681 /// Generate a shuffle sequence that will reverse the vector Vec. 682 virtual Value *reverseVector(Value *Vec); 683 684 /// Returns (and creates if needed) the original loop trip count. 685 Value *getOrCreateTripCount(Loop *NewLoop); 686 687 /// Returns (and creates if needed) the trip count of the widened loop. 688 Value *getOrCreateVectorTripCount(Loop *NewLoop); 689 690 /// Returns a bitcasted value to the requested vector type. 691 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 692 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 693 const DataLayout &DL); 694 695 /// Emit a bypass check to see if the vector trip count is zero, including if 696 /// it overflows. 697 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 698 699 /// Emit a bypass check to see if all of the SCEV assumptions we've 700 /// had to make are correct. Returns the block containing the checks or 701 /// nullptr if no checks have been added. 702 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); 703 704 /// Emit bypass checks to check any memory assumptions we may have made. 705 /// Returns the block containing the checks or nullptr if no checks have been 706 /// added. 707 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 708 709 /// Compute the transformed value of Index at offset StartValue using step 710 /// StepValue. 711 /// For integer induction, returns StartValue + Index * StepValue. 712 /// For pointer induction, returns StartValue[Index * StepValue]. 713 /// FIXME: The newly created binary instructions should contain nsw/nuw 714 /// flags, which can be found from the original scalar operations. 715 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 716 const DataLayout &DL, 717 const InductionDescriptor &ID) const; 718 719 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 720 /// vector loop preheader, middle block and scalar preheader. Also 721 /// allocate a loop object for the new vector loop and return it. 722 Loop *createVectorLoopSkeleton(StringRef Prefix); 723 724 /// Create new phi nodes for the induction variables to resume iteration count 725 /// in the scalar epilogue, from where the vectorized loop left off (given by 726 /// \p VectorTripCount). 727 /// In cases where the loop skeleton is more complicated (eg. epilogue 728 /// vectorization) and the resume values can come from an additional bypass 729 /// block, the \p AdditionalBypass pair provides information about the bypass 730 /// block and the end value on the edge from bypass to this loop. 731 void createInductionResumeValues( 732 Loop *L, Value *VectorTripCount, 733 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 734 735 /// Complete the loop skeleton by adding debug MDs, creating appropriate 736 /// conditional branches in the middle block, preparing the builder and 737 /// running the verifier. Take in the vector loop \p L as argument, and return 738 /// the preheader of the completed vector loop. 739 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 740 741 /// Add additional metadata to \p To that was not present on \p Orig. 742 /// 743 /// Currently this is used to add the noalias annotations based on the 744 /// inserted memchecks. Use this for instructions that are *cloned* into the 745 /// vector loop. 746 void addNewMetadata(Instruction *To, const Instruction *Orig); 747 748 /// Add metadata from one instruction to another. 749 /// 750 /// This includes both the original MDs from \p From and additional ones (\see 751 /// addNewMetadata). Use this for *newly created* instructions in the vector 752 /// loop. 753 void addMetadata(Instruction *To, Instruction *From); 754 755 /// Similar to the previous function but it adds the metadata to a 756 /// vector of instructions. 757 void addMetadata(ArrayRef<Value *> To, Instruction *From); 758 759 /// Allow subclasses to override and print debug traces before/after vplan 760 /// execution, when trace information is requested. 761 virtual void printDebugTracesAtStart(){}; 762 virtual void printDebugTracesAtEnd(){}; 763 764 /// The original loop. 765 Loop *OrigLoop; 766 767 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 768 /// dynamic knowledge to simplify SCEV expressions and converts them to a 769 /// more usable form. 770 PredicatedScalarEvolution &PSE; 771 772 /// Loop Info. 773 LoopInfo *LI; 774 775 /// Dominator Tree. 776 DominatorTree *DT; 777 778 /// Alias Analysis. 779 AAResults *AA; 780 781 /// Target Library Info. 782 const TargetLibraryInfo *TLI; 783 784 /// Target Transform Info. 785 const TargetTransformInfo *TTI; 786 787 /// Assumption Cache. 788 AssumptionCache *AC; 789 790 /// Interface to emit optimization remarks. 791 OptimizationRemarkEmitter *ORE; 792 793 /// LoopVersioning. It's only set up (non-null) if memchecks were 794 /// used. 795 /// 796 /// This is currently only used to add no-alias metadata based on the 797 /// memchecks. The actually versioning is performed manually. 798 std::unique_ptr<LoopVersioning> LVer; 799 800 /// The vectorization SIMD factor to use. Each vector will have this many 801 /// vector elements. 802 ElementCount VF; 803 804 /// The vectorization unroll factor to use. Each scalar is vectorized to this 805 /// many different vector instructions. 806 unsigned UF; 807 808 /// The builder that we use 809 IRBuilder<> Builder; 810 811 // --- Vectorization state --- 812 813 /// The vector-loop preheader. 814 BasicBlock *LoopVectorPreHeader; 815 816 /// The scalar-loop preheader. 817 BasicBlock *LoopScalarPreHeader; 818 819 /// Middle Block between the vector and the scalar. 820 BasicBlock *LoopMiddleBlock; 821 822 /// The (unique) ExitBlock of the scalar loop. Note that 823 /// there can be multiple exiting edges reaching this block. 824 BasicBlock *LoopExitBlock; 825 826 /// The vector loop body. 827 BasicBlock *LoopVectorBody; 828 829 /// The scalar loop body. 830 BasicBlock *LoopScalarBody; 831 832 /// A list of all bypass blocks. The first block is the entry of the loop. 833 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 834 835 /// The new Induction variable which was added to the new block. 836 PHINode *Induction = nullptr; 837 838 /// The induction variable of the old basic block. 839 PHINode *OldInduction = nullptr; 840 841 /// Store instructions that were predicated. 842 SmallVector<Instruction *, 4> PredicatedInstructions; 843 844 /// Trip count of the original loop. 845 Value *TripCount = nullptr; 846 847 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 848 Value *VectorTripCount = nullptr; 849 850 /// The legality analysis. 851 LoopVectorizationLegality *Legal; 852 853 /// The profitablity analysis. 854 LoopVectorizationCostModel *Cost; 855 856 // Record whether runtime checks are added. 857 bool AddedSafetyChecks = false; 858 859 // Holds the end values for each induction variable. We save the end values 860 // so we can later fix-up the external users of the induction variables. 861 DenseMap<PHINode *, Value *> IVEndValues; 862 863 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 864 // fixed up at the end of vector code generation. 865 SmallVector<PHINode *, 8> OrigPHIsToFix; 866 867 /// BFI and PSI are used to check for profile guided size optimizations. 868 BlockFrequencyInfo *BFI; 869 ProfileSummaryInfo *PSI; 870 871 // Whether this loop should be optimized for size based on profile guided size 872 // optimizatios. 873 bool OptForSizeBasedOnProfile; 874 875 /// Structure to hold information about generated runtime checks, responsible 876 /// for cleaning the checks, if vectorization turns out unprofitable. 877 GeneratedRTChecks &RTChecks; 878 }; 879 880 class InnerLoopUnroller : public InnerLoopVectorizer { 881 public: 882 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 883 LoopInfo *LI, DominatorTree *DT, 884 const TargetLibraryInfo *TLI, 885 const TargetTransformInfo *TTI, AssumptionCache *AC, 886 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 887 LoopVectorizationLegality *LVL, 888 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 889 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 890 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 891 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 892 BFI, PSI, Check) {} 893 894 private: 895 Value *getBroadcastInstrs(Value *V) override; 896 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 897 Instruction::BinaryOps Opcode = 898 Instruction::BinaryOpsEnd) override; 899 Value *reverseVector(Value *Vec) override; 900 }; 901 902 /// Encapsulate information regarding vectorization of a loop and its epilogue. 903 /// This information is meant to be updated and used across two stages of 904 /// epilogue vectorization. 905 struct EpilogueLoopVectorizationInfo { 906 ElementCount MainLoopVF = ElementCount::getFixed(0); 907 unsigned MainLoopUF = 0; 908 ElementCount EpilogueVF = ElementCount::getFixed(0); 909 unsigned EpilogueUF = 0; 910 BasicBlock *MainLoopIterationCountCheck = nullptr; 911 BasicBlock *EpilogueIterationCountCheck = nullptr; 912 BasicBlock *SCEVSafetyCheck = nullptr; 913 BasicBlock *MemSafetyCheck = nullptr; 914 Value *TripCount = nullptr; 915 Value *VectorTripCount = nullptr; 916 917 EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF, 918 unsigned EUF) 919 : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF), 920 EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) { 921 assert(EUF == 1 && 922 "A high UF for the epilogue loop is likely not beneficial."); 923 } 924 }; 925 926 /// An extension of the inner loop vectorizer that creates a skeleton for a 927 /// vectorized loop that has its epilogue (residual) also vectorized. 928 /// The idea is to run the vplan on a given loop twice, firstly to setup the 929 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 930 /// from the first step and vectorize the epilogue. This is achieved by 931 /// deriving two concrete strategy classes from this base class and invoking 932 /// them in succession from the loop vectorizer planner. 933 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 934 public: 935 InnerLoopAndEpilogueVectorizer( 936 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 937 DominatorTree *DT, const TargetLibraryInfo *TLI, 938 const TargetTransformInfo *TTI, AssumptionCache *AC, 939 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 940 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 941 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 942 GeneratedRTChecks &Checks) 943 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 944 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 945 Checks), 946 EPI(EPI) {} 947 948 // Override this function to handle the more complex control flow around the 949 // three loops. 950 BasicBlock *createVectorizedLoopSkeleton() final override { 951 return createEpilogueVectorizedLoopSkeleton(); 952 } 953 954 /// The interface for creating a vectorized skeleton using one of two 955 /// different strategies, each corresponding to one execution of the vplan 956 /// as described above. 957 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 958 959 /// Holds and updates state information required to vectorize the main loop 960 /// and its epilogue in two separate passes. This setup helps us avoid 961 /// regenerating and recomputing runtime safety checks. It also helps us to 962 /// shorten the iteration-count-check path length for the cases where the 963 /// iteration count of the loop is so small that the main vector loop is 964 /// completely skipped. 965 EpilogueLoopVectorizationInfo &EPI; 966 }; 967 968 /// A specialized derived class of inner loop vectorizer that performs 969 /// vectorization of *main* loops in the process of vectorizing loops and their 970 /// epilogues. 971 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 972 public: 973 EpilogueVectorizerMainLoop( 974 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 975 DominatorTree *DT, const TargetLibraryInfo *TLI, 976 const TargetTransformInfo *TTI, AssumptionCache *AC, 977 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 978 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 979 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 980 GeneratedRTChecks &Check) 981 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 982 EPI, LVL, CM, BFI, PSI, Check) {} 983 /// Implements the interface for creating a vectorized skeleton using the 984 /// *main loop* strategy (ie the first pass of vplan execution). 985 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 986 987 protected: 988 /// Emits an iteration count bypass check once for the main loop (when \p 989 /// ForEpilogue is false) and once for the epilogue loop (when \p 990 /// ForEpilogue is true). 991 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 992 bool ForEpilogue); 993 void printDebugTracesAtStart() override; 994 void printDebugTracesAtEnd() override; 995 }; 996 997 // A specialized derived class of inner loop vectorizer that performs 998 // vectorization of *epilogue* loops in the process of vectorizing loops and 999 // their epilogues. 1000 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 1001 public: 1002 EpilogueVectorizerEpilogueLoop( 1003 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 1004 DominatorTree *DT, const TargetLibraryInfo *TLI, 1005 const TargetTransformInfo *TTI, AssumptionCache *AC, 1006 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 1007 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 1008 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 1009 GeneratedRTChecks &Checks) 1010 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1011 EPI, LVL, CM, BFI, PSI, Checks) {} 1012 /// Implements the interface for creating a vectorized skeleton using the 1013 /// *epilogue loop* strategy (ie the second pass of vplan execution). 1014 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1015 1016 protected: 1017 /// Emits an iteration count bypass check after the main vector loop has 1018 /// finished to see if there are any iterations left to execute by either 1019 /// the vector epilogue or the scalar epilogue. 1020 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1021 BasicBlock *Bypass, 1022 BasicBlock *Insert); 1023 void printDebugTracesAtStart() override; 1024 void printDebugTracesAtEnd() override; 1025 }; 1026 } // end namespace llvm 1027 1028 /// Look for a meaningful debug location on the instruction or it's 1029 /// operands. 1030 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1031 if (!I) 1032 return I; 1033 1034 DebugLoc Empty; 1035 if (I->getDebugLoc() != Empty) 1036 return I; 1037 1038 for (Use &Op : I->operands()) { 1039 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1040 if (OpInst->getDebugLoc() != Empty) 1041 return OpInst; 1042 } 1043 1044 return I; 1045 } 1046 1047 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 1048 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 1049 const DILocation *DIL = Inst->getDebugLoc(); 1050 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1051 !isa<DbgInfoIntrinsic>(Inst)) { 1052 assert(!VF.isScalable() && "scalable vectors not yet supported."); 1053 auto NewDIL = 1054 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1055 if (NewDIL) 1056 B.SetCurrentDebugLocation(NewDIL.getValue()); 1057 else 1058 LLVM_DEBUG(dbgs() 1059 << "Failed to create new discriminator: " 1060 << DIL->getFilename() << " Line: " << DIL->getLine()); 1061 } 1062 else 1063 B.SetCurrentDebugLocation(DIL); 1064 } else 1065 B.SetCurrentDebugLocation(DebugLoc()); 1066 } 1067 1068 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 1069 /// is passed, the message relates to that particular instruction. 1070 #ifndef NDEBUG 1071 static void debugVectorizationMessage(const StringRef Prefix, 1072 const StringRef DebugMsg, 1073 Instruction *I) { 1074 dbgs() << "LV: " << Prefix << DebugMsg; 1075 if (I != nullptr) 1076 dbgs() << " " << *I; 1077 else 1078 dbgs() << '.'; 1079 dbgs() << '\n'; 1080 } 1081 #endif 1082 1083 /// Create an analysis remark that explains why vectorization failed 1084 /// 1085 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1086 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1087 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1088 /// the location of the remark. \return the remark object that can be 1089 /// streamed to. 1090 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1091 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1092 Value *CodeRegion = TheLoop->getHeader(); 1093 DebugLoc DL = TheLoop->getStartLoc(); 1094 1095 if (I) { 1096 CodeRegion = I->getParent(); 1097 // If there is no debug location attached to the instruction, revert back to 1098 // using the loop's. 1099 if (I->getDebugLoc()) 1100 DL = I->getDebugLoc(); 1101 } 1102 1103 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1104 } 1105 1106 /// Return a value for Step multiplied by VF. 1107 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) { 1108 assert(isa<ConstantInt>(Step) && "Expected an integer step"); 1109 Constant *StepVal = ConstantInt::get( 1110 Step->getType(), 1111 cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue()); 1112 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1113 } 1114 1115 namespace llvm { 1116 1117 /// Return the runtime value for VF. 1118 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) { 1119 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1120 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1121 } 1122 1123 void reportVectorizationFailure(const StringRef DebugMsg, 1124 const StringRef OREMsg, const StringRef ORETag, 1125 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1126 Instruction *I) { 1127 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1128 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1129 ORE->emit( 1130 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1131 << "loop not vectorized: " << OREMsg); 1132 } 1133 1134 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1135 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1136 Instruction *I) { 1137 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1138 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1139 ORE->emit( 1140 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1141 << Msg); 1142 } 1143 1144 } // end namespace llvm 1145 1146 #ifndef NDEBUG 1147 /// \return string containing a file name and a line # for the given loop. 1148 static std::string getDebugLocString(const Loop *L) { 1149 std::string Result; 1150 if (L) { 1151 raw_string_ostream OS(Result); 1152 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1153 LoopDbgLoc.print(OS); 1154 else 1155 // Just print the module name. 1156 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1157 OS.flush(); 1158 } 1159 return Result; 1160 } 1161 #endif 1162 1163 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1164 const Instruction *Orig) { 1165 // If the loop was versioned with memchecks, add the corresponding no-alias 1166 // metadata. 1167 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1168 LVer->annotateInstWithNoAlias(To, Orig); 1169 } 1170 1171 void InnerLoopVectorizer::addMetadata(Instruction *To, 1172 Instruction *From) { 1173 propagateMetadata(To, From); 1174 addNewMetadata(To, From); 1175 } 1176 1177 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1178 Instruction *From) { 1179 for (Value *V : To) { 1180 if (Instruction *I = dyn_cast<Instruction>(V)) 1181 addMetadata(I, From); 1182 } 1183 } 1184 1185 namespace llvm { 1186 1187 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1188 // lowered. 1189 enum ScalarEpilogueLowering { 1190 1191 // The default: allowing scalar epilogues. 1192 CM_ScalarEpilogueAllowed, 1193 1194 // Vectorization with OptForSize: don't allow epilogues. 1195 CM_ScalarEpilogueNotAllowedOptSize, 1196 1197 // A special case of vectorisation with OptForSize: loops with a very small 1198 // trip count are considered for vectorization under OptForSize, thereby 1199 // making sure the cost of their loop body is dominant, free of runtime 1200 // guards and scalar iteration overheads. 1201 CM_ScalarEpilogueNotAllowedLowTripLoop, 1202 1203 // Loop hint predicate indicating an epilogue is undesired. 1204 CM_ScalarEpilogueNotNeededUsePredicate, 1205 1206 // Directive indicating we must either tail fold or not vectorize 1207 CM_ScalarEpilogueNotAllowedUsePredicate 1208 }; 1209 1210 /// LoopVectorizationCostModel - estimates the expected speedups due to 1211 /// vectorization. 1212 /// In many cases vectorization is not profitable. This can happen because of 1213 /// a number of reasons. In this class we mainly attempt to predict the 1214 /// expected speedup/slowdowns due to the supported instruction set. We use the 1215 /// TargetTransformInfo to query the different backends for the cost of 1216 /// different operations. 1217 class LoopVectorizationCostModel { 1218 public: 1219 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1220 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1221 LoopVectorizationLegality *Legal, 1222 const TargetTransformInfo &TTI, 1223 const TargetLibraryInfo *TLI, DemandedBits *DB, 1224 AssumptionCache *AC, 1225 OptimizationRemarkEmitter *ORE, const Function *F, 1226 const LoopVectorizeHints *Hints, 1227 InterleavedAccessInfo &IAI) 1228 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1229 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1230 Hints(Hints), InterleaveInfo(IAI) {} 1231 1232 /// \return An upper bound for the vectorization factor, or None if 1233 /// vectorization and interleaving should be avoided up front. 1234 Optional<ElementCount> computeMaxVF(ElementCount UserVF, unsigned UserIC); 1235 1236 /// \return True if runtime checks are required for vectorization, and false 1237 /// otherwise. 1238 bool runtimeChecksRequired(); 1239 1240 /// \return The most profitable vectorization factor and the cost of that VF. 1241 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 1242 /// then this vectorization factor will be selected if vectorization is 1243 /// possible. 1244 VectorizationFactor selectVectorizationFactor(ElementCount MaxVF); 1245 VectorizationFactor 1246 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1247 const LoopVectorizationPlanner &LVP); 1248 1249 /// Setup cost-based decisions for user vectorization factor. 1250 void selectUserVectorizationFactor(ElementCount UserVF) { 1251 collectUniformsAndScalars(UserVF); 1252 collectInstsToScalarize(UserVF); 1253 } 1254 1255 /// \return The size (in bits) of the smallest and widest types in the code 1256 /// that needs to be vectorized. We ignore values that remain scalar such as 1257 /// 64 bit loop indices. 1258 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1259 1260 /// \return The desired interleave count. 1261 /// If interleave count has been specified by metadata it will be returned. 1262 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1263 /// are the selected vectorization factor and the cost of the selected VF. 1264 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1265 1266 /// Memory access instruction may be vectorized in more than one way. 1267 /// Form of instruction after vectorization depends on cost. 1268 /// This function takes cost-based decisions for Load/Store instructions 1269 /// and collects them in a map. This decisions map is used for building 1270 /// the lists of loop-uniform and loop-scalar instructions. 1271 /// The calculated cost is saved with widening decision in order to 1272 /// avoid redundant calculations. 1273 void setCostBasedWideningDecision(ElementCount VF); 1274 1275 /// A struct that represents some properties of the register usage 1276 /// of a loop. 1277 struct RegisterUsage { 1278 /// Holds the number of loop invariant values that are used in the loop. 1279 /// The key is ClassID of target-provided register class. 1280 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1281 /// Holds the maximum number of concurrent live intervals in the loop. 1282 /// The key is ClassID of target-provided register class. 1283 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1284 }; 1285 1286 /// \return Returns information about the register usages of the loop for the 1287 /// given vectorization factors. 1288 SmallVector<RegisterUsage, 8> 1289 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1290 1291 /// Collect values we want to ignore in the cost model. 1292 void collectValuesToIgnore(); 1293 1294 /// Split reductions into those that happen in the loop, and those that happen 1295 /// outside. In loop reductions are collected into InLoopReductionChains. 1296 void collectInLoopReductions(); 1297 1298 /// \returns The smallest bitwidth each instruction can be represented with. 1299 /// The vector equivalents of these instructions should be truncated to this 1300 /// type. 1301 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1302 return MinBWs; 1303 } 1304 1305 /// \returns True if it is more profitable to scalarize instruction \p I for 1306 /// vectorization factor \p VF. 1307 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1308 assert(VF.isVector() && 1309 "Profitable to scalarize relevant only for VF > 1."); 1310 1311 // Cost model is not run in the VPlan-native path - return conservative 1312 // result until this changes. 1313 if (EnableVPlanNativePath) 1314 return false; 1315 1316 auto Scalars = InstsToScalarize.find(VF); 1317 assert(Scalars != InstsToScalarize.end() && 1318 "VF not yet analyzed for scalarization profitability"); 1319 return Scalars->second.find(I) != Scalars->second.end(); 1320 } 1321 1322 /// Returns true if \p I is known to be uniform after vectorization. 1323 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1324 if (VF.isScalar()) 1325 return true; 1326 1327 // Cost model is not run in the VPlan-native path - return conservative 1328 // result until this changes. 1329 if (EnableVPlanNativePath) 1330 return false; 1331 1332 auto UniformsPerVF = Uniforms.find(VF); 1333 assert(UniformsPerVF != Uniforms.end() && 1334 "VF not yet analyzed for uniformity"); 1335 return UniformsPerVF->second.count(I); 1336 } 1337 1338 /// Returns true if \p I is known to be scalar after vectorization. 1339 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1340 if (VF.isScalar()) 1341 return true; 1342 1343 // Cost model is not run in the VPlan-native path - return conservative 1344 // result until this changes. 1345 if (EnableVPlanNativePath) 1346 return false; 1347 1348 auto ScalarsPerVF = Scalars.find(VF); 1349 assert(ScalarsPerVF != Scalars.end() && 1350 "Scalar values are not calculated for VF"); 1351 return ScalarsPerVF->second.count(I); 1352 } 1353 1354 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1355 /// for vectorization factor \p VF. 1356 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1357 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1358 !isProfitableToScalarize(I, VF) && 1359 !isScalarAfterVectorization(I, VF); 1360 } 1361 1362 /// Decision that was taken during cost calculation for memory instruction. 1363 enum InstWidening { 1364 CM_Unknown, 1365 CM_Widen, // For consecutive accesses with stride +1. 1366 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1367 CM_Interleave, 1368 CM_GatherScatter, 1369 CM_Scalarize 1370 }; 1371 1372 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1373 /// instruction \p I and vector width \p VF. 1374 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1375 InstructionCost Cost) { 1376 assert(VF.isVector() && "Expected VF >=2"); 1377 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1378 } 1379 1380 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1381 /// interleaving group \p Grp and vector width \p VF. 1382 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1383 ElementCount VF, InstWidening W, 1384 InstructionCost Cost) { 1385 assert(VF.isVector() && "Expected VF >=2"); 1386 /// Broadcast this decicion to all instructions inside the group. 1387 /// But the cost will be assigned to one instruction only. 1388 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1389 if (auto *I = Grp->getMember(i)) { 1390 if (Grp->getInsertPos() == I) 1391 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1392 else 1393 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1394 } 1395 } 1396 } 1397 1398 /// Return the cost model decision for the given instruction \p I and vector 1399 /// width \p VF. Return CM_Unknown if this instruction did not pass 1400 /// through the cost modeling. 1401 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1402 assert(VF.isVector() && "Expected VF to be a vector VF"); 1403 // Cost model is not run in the VPlan-native path - return conservative 1404 // result until this changes. 1405 if (EnableVPlanNativePath) 1406 return CM_GatherScatter; 1407 1408 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1409 auto Itr = WideningDecisions.find(InstOnVF); 1410 if (Itr == WideningDecisions.end()) 1411 return CM_Unknown; 1412 return Itr->second.first; 1413 } 1414 1415 /// Return the vectorization cost for the given instruction \p I and vector 1416 /// width \p VF. 1417 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1418 assert(VF.isVector() && "Expected VF >=2"); 1419 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1420 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1421 "The cost is not calculated"); 1422 return WideningDecisions[InstOnVF].second; 1423 } 1424 1425 /// Return True if instruction \p I is an optimizable truncate whose operand 1426 /// is an induction variable. Such a truncate will be removed by adding a new 1427 /// induction variable with the destination type. 1428 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1429 // If the instruction is not a truncate, return false. 1430 auto *Trunc = dyn_cast<TruncInst>(I); 1431 if (!Trunc) 1432 return false; 1433 1434 // Get the source and destination types of the truncate. 1435 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1436 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1437 1438 // If the truncate is free for the given types, return false. Replacing a 1439 // free truncate with an induction variable would add an induction variable 1440 // update instruction to each iteration of the loop. We exclude from this 1441 // check the primary induction variable since it will need an update 1442 // instruction regardless. 1443 Value *Op = Trunc->getOperand(0); 1444 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1445 return false; 1446 1447 // If the truncated value is not an induction variable, return false. 1448 return Legal->isInductionPhi(Op); 1449 } 1450 1451 /// Collects the instructions to scalarize for each predicated instruction in 1452 /// the loop. 1453 void collectInstsToScalarize(ElementCount VF); 1454 1455 /// Collect Uniform and Scalar values for the given \p VF. 1456 /// The sets depend on CM decision for Load/Store instructions 1457 /// that may be vectorized as interleave, gather-scatter or scalarized. 1458 void collectUniformsAndScalars(ElementCount VF) { 1459 // Do the analysis once. 1460 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1461 return; 1462 setCostBasedWideningDecision(VF); 1463 collectLoopUniforms(VF); 1464 collectLoopScalars(VF); 1465 } 1466 1467 /// Returns true if the target machine supports masked store operation 1468 /// for the given \p DataType and kind of access to \p Ptr. 1469 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1470 return Legal->isConsecutivePtr(Ptr) && 1471 TTI.isLegalMaskedStore(DataType, Alignment); 1472 } 1473 1474 /// Returns true if the target machine supports masked load operation 1475 /// for the given \p DataType and kind of access to \p Ptr. 1476 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1477 return Legal->isConsecutivePtr(Ptr) && 1478 TTI.isLegalMaskedLoad(DataType, Alignment); 1479 } 1480 1481 /// Returns true if the target machine supports masked scatter operation 1482 /// for the given \p DataType. 1483 bool isLegalMaskedScatter(Type *DataType, Align Alignment) const { 1484 return TTI.isLegalMaskedScatter(DataType, Alignment); 1485 } 1486 1487 /// Returns true if the target machine supports masked gather operation 1488 /// for the given \p DataType. 1489 bool isLegalMaskedGather(Type *DataType, Align Alignment) const { 1490 return TTI.isLegalMaskedGather(DataType, Alignment); 1491 } 1492 1493 /// Returns true if the target machine can represent \p V as a masked gather 1494 /// or scatter operation. 1495 bool isLegalGatherOrScatter(Value *V) { 1496 bool LI = isa<LoadInst>(V); 1497 bool SI = isa<StoreInst>(V); 1498 if (!LI && !SI) 1499 return false; 1500 auto *Ty = getMemInstValueType(V); 1501 Align Align = getLoadStoreAlignment(V); 1502 return (LI && isLegalMaskedGather(Ty, Align)) || 1503 (SI && isLegalMaskedScatter(Ty, Align)); 1504 } 1505 1506 /// Returns true if the target machine supports all of the reduction 1507 /// variables found for the given VF. 1508 bool canVectorizeReductions(ElementCount VF) { 1509 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1510 RecurrenceDescriptor RdxDesc = Reduction.second; 1511 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1512 })); 1513 } 1514 1515 /// Returns true if \p I is an instruction that will be scalarized with 1516 /// predication. Such instructions include conditional stores and 1517 /// instructions that may divide by zero. 1518 /// If a non-zero VF has been calculated, we check if I will be scalarized 1519 /// predication for that VF. 1520 bool 1521 isScalarWithPredication(Instruction *I, 1522 ElementCount VF = ElementCount::getFixed(1)) const; 1523 1524 // Returns true if \p I is an instruction that will be predicated either 1525 // through scalar predication or masked load/store or masked gather/scatter. 1526 // Superset of instructions that return true for isScalarWithPredication. 1527 bool isPredicatedInst(Instruction *I, ElementCount VF) { 1528 if (!blockNeedsPredication(I->getParent())) 1529 return false; 1530 // Loads and stores that need some form of masked operation are predicated 1531 // instructions. 1532 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1533 return Legal->isMaskRequired(I); 1534 return isScalarWithPredication(I, VF); 1535 } 1536 1537 /// Returns true if \p I is a memory instruction with consecutive memory 1538 /// access that can be widened. 1539 bool 1540 memoryInstructionCanBeWidened(Instruction *I, 1541 ElementCount VF = ElementCount::getFixed(1)); 1542 1543 /// Returns true if \p I is a memory instruction in an interleaved-group 1544 /// of memory accesses that can be vectorized with wide vector loads/stores 1545 /// and shuffles. 1546 bool 1547 interleavedAccessCanBeWidened(Instruction *I, 1548 ElementCount VF = ElementCount::getFixed(1)); 1549 1550 /// Check if \p Instr belongs to any interleaved access group. 1551 bool isAccessInterleaved(Instruction *Instr) { 1552 return InterleaveInfo.isInterleaved(Instr); 1553 } 1554 1555 /// Get the interleaved access group that \p Instr belongs to. 1556 const InterleaveGroup<Instruction> * 1557 getInterleavedAccessGroup(Instruction *Instr) { 1558 return InterleaveInfo.getInterleaveGroup(Instr); 1559 } 1560 1561 /// Returns true if we're required to use a scalar epilogue for at least 1562 /// the final iteration of the original loop. 1563 bool requiresScalarEpilogue() const { 1564 if (!isScalarEpilogueAllowed()) 1565 return false; 1566 // If we might exit from anywhere but the latch, must run the exiting 1567 // iteration in scalar form. 1568 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1569 return true; 1570 return InterleaveInfo.requiresScalarEpilogue(); 1571 } 1572 1573 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1574 /// loop hint annotation. 1575 bool isScalarEpilogueAllowed() const { 1576 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1577 } 1578 1579 /// Returns true if all loop blocks should be masked to fold tail loop. 1580 bool foldTailByMasking() const { return FoldTailByMasking; } 1581 1582 bool blockNeedsPredication(BasicBlock *BB) const { 1583 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1584 } 1585 1586 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1587 /// nodes to the chain of instructions representing the reductions. Uses a 1588 /// MapVector to ensure deterministic iteration order. 1589 using ReductionChainMap = 1590 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1591 1592 /// Return the chain of instructions representing an inloop reduction. 1593 const ReductionChainMap &getInLoopReductionChains() const { 1594 return InLoopReductionChains; 1595 } 1596 1597 /// Returns true if the Phi is part of an inloop reduction. 1598 bool isInLoopReduction(PHINode *Phi) const { 1599 return InLoopReductionChains.count(Phi); 1600 } 1601 1602 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1603 /// with factor VF. Return the cost of the instruction, including 1604 /// scalarization overhead if it's needed. 1605 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1606 1607 /// Estimate cost of a call instruction CI if it were vectorized with factor 1608 /// VF. Return the cost of the instruction, including scalarization overhead 1609 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1610 /// scalarized - 1611 /// i.e. either vector version isn't available, or is too expensive. 1612 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1613 bool &NeedToScalarize) const; 1614 1615 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1616 /// that of B. 1617 bool isMoreProfitable(const VectorizationFactor &A, 1618 const VectorizationFactor &B) const; 1619 1620 /// Invalidates decisions already taken by the cost model. 1621 void invalidateCostModelingDecisions() { 1622 WideningDecisions.clear(); 1623 Uniforms.clear(); 1624 Scalars.clear(); 1625 } 1626 1627 private: 1628 unsigned NumPredStores = 0; 1629 1630 /// \return An upper bound for the vectorization factor, a power-of-2 larger 1631 /// than zero. One is returned if vectorization should best be avoided due 1632 /// to cost. 1633 ElementCount computeFeasibleMaxVF(unsigned ConstTripCount, 1634 ElementCount UserVF); 1635 1636 /// \return the maximized element count based on the targets vector 1637 /// registers and the loop trip-count, but limited to a maximum safe VF. 1638 /// This is a helper function of computeFeasibleMaxVF. 1639 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1640 /// issue that occurred on one of the buildbots which cannot be reproduced 1641 /// without having access to the properietary compiler (see comments on 1642 /// D98509). The issue is currently under investigation and this workaround 1643 /// will be removed as soon as possible. 1644 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1645 unsigned SmallestType, 1646 unsigned WidestType, 1647 const ElementCount &MaxSafeVF); 1648 1649 /// \return the maximum legal scalable VF, based on the safe max number 1650 /// of elements. 1651 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1652 1653 /// The vectorization cost is a combination of the cost itself and a boolean 1654 /// indicating whether any of the contributing operations will actually 1655 /// operate on 1656 /// vector values after type legalization in the backend. If this latter value 1657 /// is 1658 /// false, then all operations will be scalarized (i.e. no vectorization has 1659 /// actually taken place). 1660 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1661 1662 /// Returns the expected execution cost. The unit of the cost does 1663 /// not matter because we use the 'cost' units to compare different 1664 /// vector widths. The cost that is returned is *not* normalized by 1665 /// the factor width. 1666 VectorizationCostTy expectedCost(ElementCount VF); 1667 1668 /// Returns the execution time cost of an instruction for a given vector 1669 /// width. Vector width of one means scalar. 1670 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1671 1672 /// The cost-computation logic from getInstructionCost which provides 1673 /// the vector type as an output parameter. 1674 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1675 Type *&VectorTy); 1676 1677 /// Return the cost of instructions in an inloop reduction pattern, if I is 1678 /// part of that pattern. 1679 InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF, 1680 Type *VectorTy, 1681 TTI::TargetCostKind CostKind); 1682 1683 /// Calculate vectorization cost of memory instruction \p I. 1684 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1685 1686 /// The cost computation for scalarized memory instruction. 1687 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1688 1689 /// The cost computation for interleaving group of memory instructions. 1690 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1691 1692 /// The cost computation for Gather/Scatter instruction. 1693 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1694 1695 /// The cost computation for widening instruction \p I with consecutive 1696 /// memory access. 1697 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1698 1699 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1700 /// Load: scalar load + broadcast. 1701 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1702 /// element) 1703 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1704 1705 /// Estimate the overhead of scalarizing an instruction. This is a 1706 /// convenience wrapper for the type-based getScalarizationOverhead API. 1707 InstructionCost getScalarizationOverhead(Instruction *I, 1708 ElementCount VF) const; 1709 1710 /// Returns whether the instruction is a load or store and will be a emitted 1711 /// as a vector operation. 1712 bool isConsecutiveLoadOrStore(Instruction *I); 1713 1714 /// Returns true if an artificially high cost for emulated masked memrefs 1715 /// should be used. 1716 bool useEmulatedMaskMemRefHack(Instruction *I); 1717 1718 /// Map of scalar integer values to the smallest bitwidth they can be legally 1719 /// represented as. The vector equivalents of these values should be truncated 1720 /// to this type. 1721 MapVector<Instruction *, uint64_t> MinBWs; 1722 1723 /// A type representing the costs for instructions if they were to be 1724 /// scalarized rather than vectorized. The entries are Instruction-Cost 1725 /// pairs. 1726 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1727 1728 /// A set containing all BasicBlocks that are known to present after 1729 /// vectorization as a predicated block. 1730 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1731 1732 /// Records whether it is allowed to have the original scalar loop execute at 1733 /// least once. This may be needed as a fallback loop in case runtime 1734 /// aliasing/dependence checks fail, or to handle the tail/remainder 1735 /// iterations when the trip count is unknown or doesn't divide by the VF, 1736 /// or as a peel-loop to handle gaps in interleave-groups. 1737 /// Under optsize and when the trip count is very small we don't allow any 1738 /// iterations to execute in the scalar loop. 1739 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1740 1741 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1742 bool FoldTailByMasking = false; 1743 1744 /// A map holding scalar costs for different vectorization factors. The 1745 /// presence of a cost for an instruction in the mapping indicates that the 1746 /// instruction will be scalarized when vectorizing with the associated 1747 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1748 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1749 1750 /// Holds the instructions known to be uniform after vectorization. 1751 /// The data is collected per VF. 1752 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1753 1754 /// Holds the instructions known to be scalar after vectorization. 1755 /// The data is collected per VF. 1756 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1757 1758 /// Holds the instructions (address computations) that are forced to be 1759 /// scalarized. 1760 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1761 1762 /// PHINodes of the reductions that should be expanded in-loop along with 1763 /// their associated chains of reduction operations, in program order from top 1764 /// (PHI) to bottom 1765 ReductionChainMap InLoopReductionChains; 1766 1767 /// A Map of inloop reduction operations and their immediate chain operand. 1768 /// FIXME: This can be removed once reductions can be costed correctly in 1769 /// vplan. This was added to allow quick lookup to the inloop operations, 1770 /// without having to loop through InLoopReductionChains. 1771 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1772 1773 /// Returns the expected difference in cost from scalarizing the expression 1774 /// feeding a predicated instruction \p PredInst. The instructions to 1775 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1776 /// non-negative return value implies the expression will be scalarized. 1777 /// Currently, only single-use chains are considered for scalarization. 1778 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1779 ElementCount VF); 1780 1781 /// Collect the instructions that are uniform after vectorization. An 1782 /// instruction is uniform if we represent it with a single scalar value in 1783 /// the vectorized loop corresponding to each vector iteration. Examples of 1784 /// uniform instructions include pointer operands of consecutive or 1785 /// interleaved memory accesses. Note that although uniformity implies an 1786 /// instruction will be scalar, the reverse is not true. In general, a 1787 /// scalarized instruction will be represented by VF scalar values in the 1788 /// vectorized loop, each corresponding to an iteration of the original 1789 /// scalar loop. 1790 void collectLoopUniforms(ElementCount VF); 1791 1792 /// Collect the instructions that are scalar after vectorization. An 1793 /// instruction is scalar if it is known to be uniform or will be scalarized 1794 /// during vectorization. Non-uniform scalarized instructions will be 1795 /// represented by VF values in the vectorized loop, each corresponding to an 1796 /// iteration of the original scalar loop. 1797 void collectLoopScalars(ElementCount VF); 1798 1799 /// Keeps cost model vectorization decision and cost for instructions. 1800 /// Right now it is used for memory instructions only. 1801 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1802 std::pair<InstWidening, InstructionCost>>; 1803 1804 DecisionList WideningDecisions; 1805 1806 /// Returns true if \p V is expected to be vectorized and it needs to be 1807 /// extracted. 1808 bool needsExtract(Value *V, ElementCount VF) const { 1809 Instruction *I = dyn_cast<Instruction>(V); 1810 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1811 TheLoop->isLoopInvariant(I)) 1812 return false; 1813 1814 // Assume we can vectorize V (and hence we need extraction) if the 1815 // scalars are not computed yet. This can happen, because it is called 1816 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1817 // the scalars are collected. That should be a safe assumption in most 1818 // cases, because we check if the operands have vectorizable types 1819 // beforehand in LoopVectorizationLegality. 1820 return Scalars.find(VF) == Scalars.end() || 1821 !isScalarAfterVectorization(I, VF); 1822 }; 1823 1824 /// Returns a range containing only operands needing to be extracted. 1825 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1826 ElementCount VF) const { 1827 return SmallVector<Value *, 4>(make_filter_range( 1828 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1829 } 1830 1831 /// Determines if we have the infrastructure to vectorize loop \p L and its 1832 /// epilogue, assuming the main loop is vectorized by \p VF. 1833 bool isCandidateForEpilogueVectorization(const Loop &L, 1834 const ElementCount VF) const; 1835 1836 /// Returns true if epilogue vectorization is considered profitable, and 1837 /// false otherwise. 1838 /// \p VF is the vectorization factor chosen for the original loop. 1839 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1840 1841 public: 1842 /// The loop that we evaluate. 1843 Loop *TheLoop; 1844 1845 /// Predicated scalar evolution analysis. 1846 PredicatedScalarEvolution &PSE; 1847 1848 /// Loop Info analysis. 1849 LoopInfo *LI; 1850 1851 /// Vectorization legality. 1852 LoopVectorizationLegality *Legal; 1853 1854 /// Vector target information. 1855 const TargetTransformInfo &TTI; 1856 1857 /// Target Library Info. 1858 const TargetLibraryInfo *TLI; 1859 1860 /// Demanded bits analysis. 1861 DemandedBits *DB; 1862 1863 /// Assumption cache. 1864 AssumptionCache *AC; 1865 1866 /// Interface to emit optimization remarks. 1867 OptimizationRemarkEmitter *ORE; 1868 1869 const Function *TheFunction; 1870 1871 /// Loop Vectorize Hint. 1872 const LoopVectorizeHints *Hints; 1873 1874 /// The interleave access information contains groups of interleaved accesses 1875 /// with the same stride and close to each other. 1876 InterleavedAccessInfo &InterleaveInfo; 1877 1878 /// Values to ignore in the cost model. 1879 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1880 1881 /// Values to ignore in the cost model when VF > 1. 1882 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1883 1884 /// Profitable vector factors. 1885 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1886 }; 1887 } // end namespace llvm 1888 1889 /// Helper struct to manage generating runtime checks for vectorization. 1890 /// 1891 /// The runtime checks are created up-front in temporary blocks to allow better 1892 /// estimating the cost and un-linked from the existing IR. After deciding to 1893 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1894 /// temporary blocks are completely removed. 1895 class GeneratedRTChecks { 1896 /// Basic block which contains the generated SCEV checks, if any. 1897 BasicBlock *SCEVCheckBlock = nullptr; 1898 1899 /// The value representing the result of the generated SCEV checks. If it is 1900 /// nullptr, either no SCEV checks have been generated or they have been used. 1901 Value *SCEVCheckCond = nullptr; 1902 1903 /// Basic block which contains the generated memory runtime checks, if any. 1904 BasicBlock *MemCheckBlock = nullptr; 1905 1906 /// The value representing the result of the generated memory runtime checks. 1907 /// If it is nullptr, either no memory runtime checks have been generated or 1908 /// they have been used. 1909 Instruction *MemRuntimeCheckCond = nullptr; 1910 1911 DominatorTree *DT; 1912 LoopInfo *LI; 1913 1914 SCEVExpander SCEVExp; 1915 SCEVExpander MemCheckExp; 1916 1917 public: 1918 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1919 const DataLayout &DL) 1920 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1921 MemCheckExp(SE, DL, "scev.check") {} 1922 1923 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1924 /// accurately estimate the cost of the runtime checks. The blocks are 1925 /// un-linked from the IR and is added back during vector code generation. If 1926 /// there is no vector code generation, the check blocks are removed 1927 /// completely. 1928 void Create(Loop *L, const LoopAccessInfo &LAI, 1929 const SCEVUnionPredicate &UnionPred) { 1930 1931 BasicBlock *LoopHeader = L->getHeader(); 1932 BasicBlock *Preheader = L->getLoopPreheader(); 1933 1934 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1935 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1936 // may be used by SCEVExpander. The blocks will be un-linked from their 1937 // predecessors and removed from LI & DT at the end of the function. 1938 if (!UnionPred.isAlwaysTrue()) { 1939 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1940 nullptr, "vector.scevcheck"); 1941 1942 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1943 &UnionPred, SCEVCheckBlock->getTerminator()); 1944 } 1945 1946 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1947 if (RtPtrChecking.Need) { 1948 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1949 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1950 "vector.memcheck"); 1951 1952 std::tie(std::ignore, MemRuntimeCheckCond) = 1953 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 1954 RtPtrChecking.getChecks(), MemCheckExp); 1955 assert(MemRuntimeCheckCond && 1956 "no RT checks generated although RtPtrChecking " 1957 "claimed checks are required"); 1958 } 1959 1960 if (!MemCheckBlock && !SCEVCheckBlock) 1961 return; 1962 1963 // Unhook the temporary block with the checks, update various places 1964 // accordingly. 1965 if (SCEVCheckBlock) 1966 SCEVCheckBlock->replaceAllUsesWith(Preheader); 1967 if (MemCheckBlock) 1968 MemCheckBlock->replaceAllUsesWith(Preheader); 1969 1970 if (SCEVCheckBlock) { 1971 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1972 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 1973 Preheader->getTerminator()->eraseFromParent(); 1974 } 1975 if (MemCheckBlock) { 1976 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1977 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 1978 Preheader->getTerminator()->eraseFromParent(); 1979 } 1980 1981 DT->changeImmediateDominator(LoopHeader, Preheader); 1982 if (MemCheckBlock) { 1983 DT->eraseNode(MemCheckBlock); 1984 LI->removeBlock(MemCheckBlock); 1985 } 1986 if (SCEVCheckBlock) { 1987 DT->eraseNode(SCEVCheckBlock); 1988 LI->removeBlock(SCEVCheckBlock); 1989 } 1990 } 1991 1992 /// Remove the created SCEV & memory runtime check blocks & instructions, if 1993 /// unused. 1994 ~GeneratedRTChecks() { 1995 SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT); 1996 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT); 1997 if (!SCEVCheckCond) 1998 SCEVCleaner.markResultUsed(); 1999 2000 if (!MemRuntimeCheckCond) 2001 MemCheckCleaner.markResultUsed(); 2002 2003 if (MemRuntimeCheckCond) { 2004 auto &SE = *MemCheckExp.getSE(); 2005 // Memory runtime check generation creates compares that use expanded 2006 // values. Remove them before running the SCEVExpanderCleaners. 2007 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2008 if (MemCheckExp.isInsertedInstruction(&I)) 2009 continue; 2010 SE.forgetValue(&I); 2011 SE.eraseValueFromMap(&I); 2012 I.eraseFromParent(); 2013 } 2014 } 2015 MemCheckCleaner.cleanup(); 2016 SCEVCleaner.cleanup(); 2017 2018 if (SCEVCheckCond) 2019 SCEVCheckBlock->eraseFromParent(); 2020 if (MemRuntimeCheckCond) 2021 MemCheckBlock->eraseFromParent(); 2022 } 2023 2024 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2025 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2026 /// depending on the generated condition. 2027 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, 2028 BasicBlock *LoopVectorPreHeader, 2029 BasicBlock *LoopExitBlock) { 2030 if (!SCEVCheckCond) 2031 return nullptr; 2032 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2033 if (C->isZero()) 2034 return nullptr; 2035 2036 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2037 2038 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2039 // Create new preheader for vector loop. 2040 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2041 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2042 2043 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2044 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2045 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2046 SCEVCheckBlock); 2047 2048 DT->addNewBlock(SCEVCheckBlock, Pred); 2049 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2050 2051 ReplaceInstWithInst( 2052 SCEVCheckBlock->getTerminator(), 2053 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2054 // Mark the check as used, to prevent it from being removed during cleanup. 2055 SCEVCheckCond = nullptr; 2056 return SCEVCheckBlock; 2057 } 2058 2059 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2060 /// the branches to branch to the vector preheader or \p Bypass, depending on 2061 /// the generated condition. 2062 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, 2063 BasicBlock *LoopVectorPreHeader) { 2064 // Check if we generated code that checks in runtime if arrays overlap. 2065 if (!MemRuntimeCheckCond) 2066 return nullptr; 2067 2068 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2069 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2070 MemCheckBlock); 2071 2072 DT->addNewBlock(MemCheckBlock, Pred); 2073 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2074 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2075 2076 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2077 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2078 2079 ReplaceInstWithInst( 2080 MemCheckBlock->getTerminator(), 2081 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2082 MemCheckBlock->getTerminator()->setDebugLoc( 2083 Pred->getTerminator()->getDebugLoc()); 2084 2085 // Mark the check as used, to prevent it from being removed during cleanup. 2086 MemRuntimeCheckCond = nullptr; 2087 return MemCheckBlock; 2088 } 2089 }; 2090 2091 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2092 // vectorization. The loop needs to be annotated with #pragma omp simd 2093 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2094 // vector length information is not provided, vectorization is not considered 2095 // explicit. Interleave hints are not allowed either. These limitations will be 2096 // relaxed in the future. 2097 // Please, note that we are currently forced to abuse the pragma 'clang 2098 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2099 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2100 // provides *explicit vectorization hints* (LV can bypass legal checks and 2101 // assume that vectorization is legal). However, both hints are implemented 2102 // using the same metadata (llvm.loop.vectorize, processed by 2103 // LoopVectorizeHints). This will be fixed in the future when the native IR 2104 // representation for pragma 'omp simd' is introduced. 2105 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2106 OptimizationRemarkEmitter *ORE) { 2107 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2108 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2109 2110 // Only outer loops with an explicit vectorization hint are supported. 2111 // Unannotated outer loops are ignored. 2112 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2113 return false; 2114 2115 Function *Fn = OuterLp->getHeader()->getParent(); 2116 if (!Hints.allowVectorization(Fn, OuterLp, 2117 true /*VectorizeOnlyWhenForced*/)) { 2118 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2119 return false; 2120 } 2121 2122 if (Hints.getInterleave() > 1) { 2123 // TODO: Interleave support is future work. 2124 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2125 "outer loops.\n"); 2126 Hints.emitRemarkWithHints(); 2127 return false; 2128 } 2129 2130 return true; 2131 } 2132 2133 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2134 OptimizationRemarkEmitter *ORE, 2135 SmallVectorImpl<Loop *> &V) { 2136 // Collect inner loops and outer loops without irreducible control flow. For 2137 // now, only collect outer loops that have explicit vectorization hints. If we 2138 // are stress testing the VPlan H-CFG construction, we collect the outermost 2139 // loop of every loop nest. 2140 if (L.isInnermost() || VPlanBuildStressTest || 2141 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2142 LoopBlocksRPO RPOT(&L); 2143 RPOT.perform(LI); 2144 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2145 V.push_back(&L); 2146 // TODO: Collect inner loops inside marked outer loops in case 2147 // vectorization fails for the outer loop. Do not invoke 2148 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2149 // already known to be reducible. We can use an inherited attribute for 2150 // that. 2151 return; 2152 } 2153 } 2154 for (Loop *InnerL : L) 2155 collectSupportedLoops(*InnerL, LI, ORE, V); 2156 } 2157 2158 namespace { 2159 2160 /// The LoopVectorize Pass. 2161 struct LoopVectorize : public FunctionPass { 2162 /// Pass identification, replacement for typeid 2163 static char ID; 2164 2165 LoopVectorizePass Impl; 2166 2167 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2168 bool VectorizeOnlyWhenForced = false) 2169 : FunctionPass(ID), 2170 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2171 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2172 } 2173 2174 bool runOnFunction(Function &F) override { 2175 if (skipFunction(F)) 2176 return false; 2177 2178 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2179 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2180 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2181 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2182 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2183 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2184 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2185 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2186 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2187 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2188 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2189 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2190 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2191 2192 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2193 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2194 2195 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2196 GetLAA, *ORE, PSI).MadeAnyChange; 2197 } 2198 2199 void getAnalysisUsage(AnalysisUsage &AU) const override { 2200 AU.addRequired<AssumptionCacheTracker>(); 2201 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2202 AU.addRequired<DominatorTreeWrapperPass>(); 2203 AU.addRequired<LoopInfoWrapperPass>(); 2204 AU.addRequired<ScalarEvolutionWrapperPass>(); 2205 AU.addRequired<TargetTransformInfoWrapperPass>(); 2206 AU.addRequired<AAResultsWrapperPass>(); 2207 AU.addRequired<LoopAccessLegacyAnalysis>(); 2208 AU.addRequired<DemandedBitsWrapperPass>(); 2209 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2210 AU.addRequired<InjectTLIMappingsLegacy>(); 2211 2212 // We currently do not preserve loopinfo/dominator analyses with outer loop 2213 // vectorization. Until this is addressed, mark these analyses as preserved 2214 // only for non-VPlan-native path. 2215 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2216 if (!EnableVPlanNativePath) { 2217 AU.addPreserved<LoopInfoWrapperPass>(); 2218 AU.addPreserved<DominatorTreeWrapperPass>(); 2219 } 2220 2221 AU.addPreserved<BasicAAWrapperPass>(); 2222 AU.addPreserved<GlobalsAAWrapperPass>(); 2223 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2224 } 2225 }; 2226 2227 } // end anonymous namespace 2228 2229 //===----------------------------------------------------------------------===// 2230 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2231 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2232 //===----------------------------------------------------------------------===// 2233 2234 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2235 // We need to place the broadcast of invariant variables outside the loop, 2236 // but only if it's proven safe to do so. Else, broadcast will be inside 2237 // vector loop body. 2238 Instruction *Instr = dyn_cast<Instruction>(V); 2239 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2240 (!Instr || 2241 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2242 // Place the code for broadcasting invariant variables in the new preheader. 2243 IRBuilder<>::InsertPointGuard Guard(Builder); 2244 if (SafeToHoist) 2245 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2246 2247 // Broadcast the scalar into all locations in the vector. 2248 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2249 2250 return Shuf; 2251 } 2252 2253 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2254 const InductionDescriptor &II, Value *Step, Value *Start, 2255 Instruction *EntryVal, VPValue *Def, VPValue *CastDef, 2256 VPTransformState &State) { 2257 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2258 "Expected either an induction phi-node or a truncate of it!"); 2259 2260 // Construct the initial value of the vector IV in the vector loop preheader 2261 auto CurrIP = Builder.saveIP(); 2262 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2263 if (isa<TruncInst>(EntryVal)) { 2264 assert(Start->getType()->isIntegerTy() && 2265 "Truncation requires an integer type"); 2266 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2267 Step = Builder.CreateTrunc(Step, TruncType); 2268 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2269 } 2270 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2271 Value *SteppedStart = 2272 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 2273 2274 // We create vector phi nodes for both integer and floating-point induction 2275 // variables. Here, we determine the kind of arithmetic we will perform. 2276 Instruction::BinaryOps AddOp; 2277 Instruction::BinaryOps MulOp; 2278 if (Step->getType()->isIntegerTy()) { 2279 AddOp = Instruction::Add; 2280 MulOp = Instruction::Mul; 2281 } else { 2282 AddOp = II.getInductionOpcode(); 2283 MulOp = Instruction::FMul; 2284 } 2285 2286 // Multiply the vectorization factor by the step using integer or 2287 // floating-point arithmetic as appropriate. 2288 Type *StepType = Step->getType(); 2289 if (Step->getType()->isFloatingPointTy()) 2290 StepType = IntegerType::get(StepType->getContext(), 2291 StepType->getScalarSizeInBits()); 2292 Value *RuntimeVF = getRuntimeVF(Builder, StepType, VF); 2293 if (Step->getType()->isFloatingPointTy()) 2294 RuntimeVF = Builder.CreateSIToFP(RuntimeVF, Step->getType()); 2295 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 2296 2297 // Create a vector splat to use in the induction update. 2298 // 2299 // FIXME: If the step is non-constant, we create the vector splat with 2300 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2301 // handle a constant vector splat. 2302 Value *SplatVF = isa<Constant>(Mul) 2303 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2304 : Builder.CreateVectorSplat(VF, Mul); 2305 Builder.restoreIP(CurrIP); 2306 2307 // We may need to add the step a number of times, depending on the unroll 2308 // factor. The last of those goes into the PHI. 2309 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2310 &*LoopVectorBody->getFirstInsertionPt()); 2311 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2312 Instruction *LastInduction = VecInd; 2313 for (unsigned Part = 0; Part < UF; ++Part) { 2314 State.set(Def, LastInduction, Part); 2315 2316 if (isa<TruncInst>(EntryVal)) 2317 addMetadata(LastInduction, EntryVal); 2318 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, 2319 State, Part); 2320 2321 LastInduction = cast<Instruction>( 2322 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 2323 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2324 } 2325 2326 // Move the last step to the end of the latch block. This ensures consistent 2327 // placement of all induction updates. 2328 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2329 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2330 auto *ICmp = cast<Instruction>(Br->getCondition()); 2331 LastInduction->moveBefore(ICmp); 2332 LastInduction->setName("vec.ind.next"); 2333 2334 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2335 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2336 } 2337 2338 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2339 return Cost->isScalarAfterVectorization(I, VF) || 2340 Cost->isProfitableToScalarize(I, VF); 2341 } 2342 2343 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2344 if (shouldScalarizeInstruction(IV)) 2345 return true; 2346 auto isScalarInst = [&](User *U) -> bool { 2347 auto *I = cast<Instruction>(U); 2348 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2349 }; 2350 return llvm::any_of(IV->users(), isScalarInst); 2351 } 2352 2353 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2354 const InductionDescriptor &ID, const Instruction *EntryVal, 2355 Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, 2356 unsigned Part, unsigned Lane) { 2357 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2358 "Expected either an induction phi-node or a truncate of it!"); 2359 2360 // This induction variable is not the phi from the original loop but the 2361 // newly-created IV based on the proof that casted Phi is equal to the 2362 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2363 // re-uses the same InductionDescriptor that original IV uses but we don't 2364 // have to do any recording in this case - that is done when original IV is 2365 // processed. 2366 if (isa<TruncInst>(EntryVal)) 2367 return; 2368 2369 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 2370 if (Casts.empty()) 2371 return; 2372 // Only the first Cast instruction in the Casts vector is of interest. 2373 // The rest of the Casts (if exist) have no uses outside the 2374 // induction update chain itself. 2375 if (Lane < UINT_MAX) 2376 State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); 2377 else 2378 State.set(CastDef, VectorLoopVal, Part); 2379 } 2380 2381 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2382 TruncInst *Trunc, VPValue *Def, 2383 VPValue *CastDef, 2384 VPTransformState &State) { 2385 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2386 "Primary induction variable must have an integer type"); 2387 2388 auto II = Legal->getInductionVars().find(IV); 2389 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2390 2391 auto ID = II->second; 2392 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2393 2394 // The value from the original loop to which we are mapping the new induction 2395 // variable. 2396 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2397 2398 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2399 2400 // Generate code for the induction step. Note that induction steps are 2401 // required to be loop-invariant 2402 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2403 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2404 "Induction step should be loop invariant"); 2405 if (PSE.getSE()->isSCEVable(IV->getType())) { 2406 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2407 return Exp.expandCodeFor(Step, Step->getType(), 2408 LoopVectorPreHeader->getTerminator()); 2409 } 2410 return cast<SCEVUnknown>(Step)->getValue(); 2411 }; 2412 2413 // The scalar value to broadcast. This is derived from the canonical 2414 // induction variable. If a truncation type is given, truncate the canonical 2415 // induction variable and step. Otherwise, derive these values from the 2416 // induction descriptor. 2417 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2418 Value *ScalarIV = Induction; 2419 if (IV != OldInduction) { 2420 ScalarIV = IV->getType()->isIntegerTy() 2421 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2422 : Builder.CreateCast(Instruction::SIToFP, Induction, 2423 IV->getType()); 2424 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2425 ScalarIV->setName("offset.idx"); 2426 } 2427 if (Trunc) { 2428 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2429 assert(Step->getType()->isIntegerTy() && 2430 "Truncation requires an integer step"); 2431 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2432 Step = Builder.CreateTrunc(Step, TruncType); 2433 } 2434 return ScalarIV; 2435 }; 2436 2437 // Create the vector values from the scalar IV, in the absence of creating a 2438 // vector IV. 2439 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2440 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2441 for (unsigned Part = 0; Part < UF; ++Part) { 2442 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2443 Value *EntryPart = 2444 getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step, 2445 ID.getInductionOpcode()); 2446 State.set(Def, EntryPart, Part); 2447 if (Trunc) 2448 addMetadata(EntryPart, Trunc); 2449 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, 2450 State, Part); 2451 } 2452 }; 2453 2454 // Fast-math-flags propagate from the original induction instruction. 2455 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 2456 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 2457 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 2458 2459 // Now do the actual transformations, and start with creating the step value. 2460 Value *Step = CreateStepValue(ID.getStep()); 2461 if (VF.isZero() || VF.isScalar()) { 2462 Value *ScalarIV = CreateScalarIV(Step); 2463 CreateSplatIV(ScalarIV, Step); 2464 return; 2465 } 2466 2467 // Determine if we want a scalar version of the induction variable. This is 2468 // true if the induction variable itself is not widened, or if it has at 2469 // least one user in the loop that is not widened. 2470 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2471 if (!NeedsScalarIV) { 2472 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2473 State); 2474 return; 2475 } 2476 2477 // Try to create a new independent vector induction variable. If we can't 2478 // create the phi node, we will splat the scalar induction variable in each 2479 // loop iteration. 2480 if (!shouldScalarizeInstruction(EntryVal)) { 2481 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2482 State); 2483 Value *ScalarIV = CreateScalarIV(Step); 2484 // Create scalar steps that can be used by instructions we will later 2485 // scalarize. Note that the addition of the scalar steps will not increase 2486 // the number of instructions in the loop in the common case prior to 2487 // InstCombine. We will be trading one vector extract for each scalar step. 2488 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2489 return; 2490 } 2491 2492 // All IV users are scalar instructions, so only emit a scalar IV, not a 2493 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2494 // predicate used by the masked loads/stores. 2495 Value *ScalarIV = CreateScalarIV(Step); 2496 if (!Cost->isScalarEpilogueAllowed()) 2497 CreateSplatIV(ScalarIV, Step); 2498 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2499 } 2500 2501 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2502 Instruction::BinaryOps BinOp) { 2503 // Create and check the types. 2504 auto *ValVTy = cast<VectorType>(Val->getType()); 2505 ElementCount VLen = ValVTy->getElementCount(); 2506 2507 Type *STy = Val->getType()->getScalarType(); 2508 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2509 "Induction Step must be an integer or FP"); 2510 assert(Step->getType() == STy && "Step has wrong type"); 2511 2512 SmallVector<Constant *, 8> Indices; 2513 2514 // Create a vector of consecutive numbers from zero to VF. 2515 VectorType *InitVecValVTy = ValVTy; 2516 Type *InitVecValSTy = STy; 2517 if (STy->isFloatingPointTy()) { 2518 InitVecValSTy = 2519 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2520 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2521 } 2522 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2523 2524 // Add on StartIdx 2525 Value *StartIdxSplat = Builder.CreateVectorSplat( 2526 VLen, ConstantInt::get(InitVecValSTy, StartIdx)); 2527 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2528 2529 if (STy->isIntegerTy()) { 2530 Step = Builder.CreateVectorSplat(VLen, Step); 2531 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2532 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2533 // which can be found from the original scalar operations. 2534 Step = Builder.CreateMul(InitVec, Step); 2535 return Builder.CreateAdd(Val, Step, "induction"); 2536 } 2537 2538 // Floating point induction. 2539 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2540 "Binary Opcode should be specified for FP induction"); 2541 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2542 Step = Builder.CreateVectorSplat(VLen, Step); 2543 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2544 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2545 } 2546 2547 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2548 Instruction *EntryVal, 2549 const InductionDescriptor &ID, 2550 VPValue *Def, VPValue *CastDef, 2551 VPTransformState &State) { 2552 // We shouldn't have to build scalar steps if we aren't vectorizing. 2553 assert(VF.isVector() && "VF should be greater than one"); 2554 // Get the value type and ensure it and the step have the same integer type. 2555 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2556 assert(ScalarIVTy == Step->getType() && 2557 "Val and Step should have the same type"); 2558 2559 // We build scalar steps for both integer and floating-point induction 2560 // variables. Here, we determine the kind of arithmetic we will perform. 2561 Instruction::BinaryOps AddOp; 2562 Instruction::BinaryOps MulOp; 2563 if (ScalarIVTy->isIntegerTy()) { 2564 AddOp = Instruction::Add; 2565 MulOp = Instruction::Mul; 2566 } else { 2567 AddOp = ID.getInductionOpcode(); 2568 MulOp = Instruction::FMul; 2569 } 2570 2571 // Determine the number of scalars we need to generate for each unroll 2572 // iteration. If EntryVal is uniform, we only need to generate the first 2573 // lane. Otherwise, we generate all VF values. 2574 bool IsUniform = 2575 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF); 2576 unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue(); 2577 // Compute the scalar steps and save the results in State. 2578 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2579 ScalarIVTy->getScalarSizeInBits()); 2580 Type *VecIVTy = nullptr; 2581 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2582 if (!IsUniform && VF.isScalable()) { 2583 VecIVTy = VectorType::get(ScalarIVTy, VF); 2584 UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF)); 2585 SplatStep = Builder.CreateVectorSplat(VF, Step); 2586 SplatIV = Builder.CreateVectorSplat(VF, ScalarIV); 2587 } 2588 2589 for (unsigned Part = 0; Part < UF; ++Part) { 2590 Value *StartIdx0 = 2591 createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF); 2592 2593 if (!IsUniform && VF.isScalable()) { 2594 auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0); 2595 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2596 if (ScalarIVTy->isFloatingPointTy()) 2597 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2598 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2599 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2600 State.set(Def, Add, Part); 2601 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2602 Part); 2603 // It's useful to record the lane values too for the known minimum number 2604 // of elements so we do those below. This improves the code quality when 2605 // trying to extract the first element, for example. 2606 } 2607 2608 if (ScalarIVTy->isFloatingPointTy()) 2609 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2610 2611 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2612 Value *StartIdx = Builder.CreateBinOp( 2613 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2614 // The step returned by `createStepForVF` is a runtime-evaluated value 2615 // when VF is scalable. Otherwise, it should be folded into a Constant. 2616 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2617 "Expected StartIdx to be folded to a constant when VF is not " 2618 "scalable"); 2619 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2620 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2621 State.set(Def, Add, VPIteration(Part, Lane)); 2622 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2623 Part, Lane); 2624 } 2625 } 2626 } 2627 2628 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2629 const VPIteration &Instance, 2630 VPTransformState &State) { 2631 Value *ScalarInst = State.get(Def, Instance); 2632 Value *VectorValue = State.get(Def, Instance.Part); 2633 VectorValue = Builder.CreateInsertElement( 2634 VectorValue, ScalarInst, 2635 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2636 State.set(Def, VectorValue, Instance.Part); 2637 } 2638 2639 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2640 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2641 return Builder.CreateVectorReverse(Vec, "reverse"); 2642 } 2643 2644 // Return whether we allow using masked interleave-groups (for dealing with 2645 // strided loads/stores that reside in predicated blocks, or for dealing 2646 // with gaps). 2647 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2648 // If an override option has been passed in for interleaved accesses, use it. 2649 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2650 return EnableMaskedInterleavedMemAccesses; 2651 2652 return TTI.enableMaskedInterleavedAccessVectorization(); 2653 } 2654 2655 // Try to vectorize the interleave group that \p Instr belongs to. 2656 // 2657 // E.g. Translate following interleaved load group (factor = 3): 2658 // for (i = 0; i < N; i+=3) { 2659 // R = Pic[i]; // Member of index 0 2660 // G = Pic[i+1]; // Member of index 1 2661 // B = Pic[i+2]; // Member of index 2 2662 // ... // do something to R, G, B 2663 // } 2664 // To: 2665 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2666 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2667 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2668 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2669 // 2670 // Or translate following interleaved store group (factor = 3): 2671 // for (i = 0; i < N; i+=3) { 2672 // ... do something to R, G, B 2673 // Pic[i] = R; // Member of index 0 2674 // Pic[i+1] = G; // Member of index 1 2675 // Pic[i+2] = B; // Member of index 2 2676 // } 2677 // To: 2678 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2679 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2680 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2681 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2682 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2683 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2684 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2685 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2686 VPValue *BlockInMask) { 2687 Instruction *Instr = Group->getInsertPos(); 2688 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2689 2690 // Prepare for the vector type of the interleaved load/store. 2691 Type *ScalarTy = getMemInstValueType(Instr); 2692 unsigned InterleaveFactor = Group->getFactor(); 2693 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2694 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2695 2696 // Prepare for the new pointers. 2697 SmallVector<Value *, 2> AddrParts; 2698 unsigned Index = Group->getIndex(Instr); 2699 2700 // TODO: extend the masked interleaved-group support to reversed access. 2701 assert((!BlockInMask || !Group->isReverse()) && 2702 "Reversed masked interleave-group not supported."); 2703 2704 // If the group is reverse, adjust the index to refer to the last vector lane 2705 // instead of the first. We adjust the index from the first vector lane, 2706 // rather than directly getting the pointer for lane VF - 1, because the 2707 // pointer operand of the interleaved access is supposed to be uniform. For 2708 // uniform instructions, we're only required to generate a value for the 2709 // first vector lane in each unroll iteration. 2710 if (Group->isReverse()) 2711 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2712 2713 for (unsigned Part = 0; Part < UF; Part++) { 2714 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2715 setDebugLocFromInst(Builder, AddrPart); 2716 2717 // Notice current instruction could be any index. Need to adjust the address 2718 // to the member of index 0. 2719 // 2720 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2721 // b = A[i]; // Member of index 0 2722 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2723 // 2724 // E.g. A[i+1] = a; // Member of index 1 2725 // A[i] = b; // Member of index 0 2726 // A[i+2] = c; // Member of index 2 (Current instruction) 2727 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2728 2729 bool InBounds = false; 2730 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2731 InBounds = gep->isInBounds(); 2732 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2733 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2734 2735 // Cast to the vector pointer type. 2736 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2737 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2738 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2739 } 2740 2741 setDebugLocFromInst(Builder, Instr); 2742 Value *PoisonVec = PoisonValue::get(VecTy); 2743 2744 Value *MaskForGaps = nullptr; 2745 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2746 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2747 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2748 } 2749 2750 // Vectorize the interleaved load group. 2751 if (isa<LoadInst>(Instr)) { 2752 // For each unroll part, create a wide load for the group. 2753 SmallVector<Value *, 2> NewLoads; 2754 for (unsigned Part = 0; Part < UF; Part++) { 2755 Instruction *NewLoad; 2756 if (BlockInMask || MaskForGaps) { 2757 assert(useMaskedInterleavedAccesses(*TTI) && 2758 "masked interleaved groups are not allowed."); 2759 Value *GroupMask = MaskForGaps; 2760 if (BlockInMask) { 2761 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2762 Value *ShuffledMask = Builder.CreateShuffleVector( 2763 BlockInMaskPart, 2764 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2765 "interleaved.mask"); 2766 GroupMask = MaskForGaps 2767 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2768 MaskForGaps) 2769 : ShuffledMask; 2770 } 2771 NewLoad = 2772 Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(), 2773 GroupMask, PoisonVec, "wide.masked.vec"); 2774 } 2775 else 2776 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2777 Group->getAlign(), "wide.vec"); 2778 Group->addMetadata(NewLoad); 2779 NewLoads.push_back(NewLoad); 2780 } 2781 2782 // For each member in the group, shuffle out the appropriate data from the 2783 // wide loads. 2784 unsigned J = 0; 2785 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2786 Instruction *Member = Group->getMember(I); 2787 2788 // Skip the gaps in the group. 2789 if (!Member) 2790 continue; 2791 2792 auto StrideMask = 2793 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2794 for (unsigned Part = 0; Part < UF; Part++) { 2795 Value *StridedVec = Builder.CreateShuffleVector( 2796 NewLoads[Part], StrideMask, "strided.vec"); 2797 2798 // If this member has different type, cast the result type. 2799 if (Member->getType() != ScalarTy) { 2800 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2801 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2802 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2803 } 2804 2805 if (Group->isReverse()) 2806 StridedVec = reverseVector(StridedVec); 2807 2808 State.set(VPDefs[J], StridedVec, Part); 2809 } 2810 ++J; 2811 } 2812 return; 2813 } 2814 2815 // The sub vector type for current instruction. 2816 auto *SubVT = VectorType::get(ScalarTy, VF); 2817 2818 // Vectorize the interleaved store group. 2819 for (unsigned Part = 0; Part < UF; Part++) { 2820 // Collect the stored vector from each member. 2821 SmallVector<Value *, 4> StoredVecs; 2822 for (unsigned i = 0; i < InterleaveFactor; i++) { 2823 // Interleaved store group doesn't allow a gap, so each index has a member 2824 assert(Group->getMember(i) && "Fail to get a member from an interleaved store group"); 2825 2826 Value *StoredVec = State.get(StoredValues[i], Part); 2827 2828 if (Group->isReverse()) 2829 StoredVec = reverseVector(StoredVec); 2830 2831 // If this member has different type, cast it to a unified type. 2832 2833 if (StoredVec->getType() != SubVT) 2834 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2835 2836 StoredVecs.push_back(StoredVec); 2837 } 2838 2839 // Concatenate all vectors into a wide vector. 2840 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2841 2842 // Interleave the elements in the wide vector. 2843 Value *IVec = Builder.CreateShuffleVector( 2844 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2845 "interleaved.vec"); 2846 2847 Instruction *NewStoreInstr; 2848 if (BlockInMask) { 2849 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2850 Value *ShuffledMask = Builder.CreateShuffleVector( 2851 BlockInMaskPart, 2852 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2853 "interleaved.mask"); 2854 NewStoreInstr = Builder.CreateMaskedStore( 2855 IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); 2856 } 2857 else 2858 NewStoreInstr = 2859 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2860 2861 Group->addMetadata(NewStoreInstr); 2862 } 2863 } 2864 2865 void InnerLoopVectorizer::vectorizeMemoryInstruction( 2866 Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, 2867 VPValue *StoredValue, VPValue *BlockInMask) { 2868 // Attempt to issue a wide load. 2869 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2870 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2871 2872 assert((LI || SI) && "Invalid Load/Store instruction"); 2873 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2874 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2875 2876 LoopVectorizationCostModel::InstWidening Decision = 2877 Cost->getWideningDecision(Instr, VF); 2878 assert((Decision == LoopVectorizationCostModel::CM_Widen || 2879 Decision == LoopVectorizationCostModel::CM_Widen_Reverse || 2880 Decision == LoopVectorizationCostModel::CM_GatherScatter) && 2881 "CM decision is not to widen the memory instruction"); 2882 2883 Type *ScalarDataTy = getMemInstValueType(Instr); 2884 2885 auto *DataTy = VectorType::get(ScalarDataTy, VF); 2886 const Align Alignment = getLoadStoreAlignment(Instr); 2887 2888 // Determine if the pointer operand of the access is either consecutive or 2889 // reverse consecutive. 2890 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2891 bool ConsecutiveStride = 2892 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2893 bool CreateGatherScatter = 2894 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2895 2896 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2897 // gather/scatter. Otherwise Decision should have been to Scalarize. 2898 assert((ConsecutiveStride || CreateGatherScatter) && 2899 "The instruction should be scalarized"); 2900 (void)ConsecutiveStride; 2901 2902 VectorParts BlockInMaskParts(UF); 2903 bool isMaskRequired = BlockInMask; 2904 if (isMaskRequired) 2905 for (unsigned Part = 0; Part < UF; ++Part) 2906 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2907 2908 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2909 // Calculate the pointer for the specific unroll-part. 2910 GetElementPtrInst *PartPtr = nullptr; 2911 2912 bool InBounds = false; 2913 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2914 InBounds = gep->isInBounds(); 2915 if (Reverse) { 2916 // If the address is consecutive but reversed, then the 2917 // wide store needs to start at the last vector element. 2918 // RunTimeVF = VScale * VF.getKnownMinValue() 2919 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 2920 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF); 2921 // NumElt = -Part * RunTimeVF 2922 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 2923 // LastLane = 1 - RunTimeVF 2924 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 2925 PartPtr = 2926 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 2927 PartPtr->setIsInBounds(InBounds); 2928 PartPtr = cast<GetElementPtrInst>( 2929 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 2930 PartPtr->setIsInBounds(InBounds); 2931 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2932 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2933 } else { 2934 Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF); 2935 PartPtr = cast<GetElementPtrInst>( 2936 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 2937 PartPtr->setIsInBounds(InBounds); 2938 } 2939 2940 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2941 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2942 }; 2943 2944 // Handle Stores: 2945 if (SI) { 2946 setDebugLocFromInst(Builder, SI); 2947 2948 for (unsigned Part = 0; Part < UF; ++Part) { 2949 Instruction *NewSI = nullptr; 2950 Value *StoredVal = State.get(StoredValue, Part); 2951 if (CreateGatherScatter) { 2952 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2953 Value *VectorGep = State.get(Addr, Part); 2954 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2955 MaskPart); 2956 } else { 2957 if (Reverse) { 2958 // If we store to reverse consecutive memory locations, then we need 2959 // to reverse the order of elements in the stored value. 2960 StoredVal = reverseVector(StoredVal); 2961 // We don't want to update the value in the map as it might be used in 2962 // another expression. So don't call resetVectorValue(StoredVal). 2963 } 2964 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 2965 if (isMaskRequired) 2966 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2967 BlockInMaskParts[Part]); 2968 else 2969 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2970 } 2971 addMetadata(NewSI, SI); 2972 } 2973 return; 2974 } 2975 2976 // Handle loads. 2977 assert(LI && "Must have a load instruction"); 2978 setDebugLocFromInst(Builder, LI); 2979 for (unsigned Part = 0; Part < UF; ++Part) { 2980 Value *NewLI; 2981 if (CreateGatherScatter) { 2982 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2983 Value *VectorGep = State.get(Addr, Part); 2984 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2985 nullptr, "wide.masked.gather"); 2986 addMetadata(NewLI, LI); 2987 } else { 2988 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 2989 if (isMaskRequired) 2990 NewLI = Builder.CreateMaskedLoad( 2991 VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy), 2992 "wide.masked.load"); 2993 else 2994 NewLI = 2995 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2996 2997 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2998 addMetadata(NewLI, LI); 2999 if (Reverse) 3000 NewLI = reverseVector(NewLI); 3001 } 3002 3003 State.set(Def, NewLI, Part); 3004 } 3005 } 3006 3007 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def, 3008 VPUser &User, 3009 const VPIteration &Instance, 3010 bool IfPredicateInstr, 3011 VPTransformState &State) { 3012 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 3013 3014 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 3015 // the first lane and part. 3016 if (isa<NoAliasScopeDeclInst>(Instr)) 3017 if (!Instance.isFirstIteration()) 3018 return; 3019 3020 setDebugLocFromInst(Builder, Instr); 3021 3022 // Does this instruction return a value ? 3023 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3024 3025 Instruction *Cloned = Instr->clone(); 3026 if (!IsVoidRetTy) 3027 Cloned->setName(Instr->getName() + ".cloned"); 3028 3029 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 3030 Builder.GetInsertPoint()); 3031 // Replace the operands of the cloned instructions with their scalar 3032 // equivalents in the new loop. 3033 for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { 3034 auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); 3035 auto InputInstance = Instance; 3036 if (!Operand || !OrigLoop->contains(Operand) || 3037 (Cost->isUniformAfterVectorization(Operand, State.VF))) 3038 InputInstance.Lane = VPLane::getFirstLane(); 3039 auto *NewOp = State.get(User.getOperand(op), InputInstance); 3040 Cloned->setOperand(op, NewOp); 3041 } 3042 addNewMetadata(Cloned, Instr); 3043 3044 // Place the cloned scalar in the new loop. 3045 Builder.Insert(Cloned); 3046 3047 State.set(Def, Cloned, Instance); 3048 3049 // If we just cloned a new assumption, add it the assumption cache. 3050 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 3051 AC->registerAssumption(II); 3052 3053 // End if-block. 3054 if (IfPredicateInstr) 3055 PredicatedInstructions.push_back(Cloned); 3056 } 3057 3058 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3059 Value *End, Value *Step, 3060 Instruction *DL) { 3061 BasicBlock *Header = L->getHeader(); 3062 BasicBlock *Latch = L->getLoopLatch(); 3063 // As we're just creating this loop, it's possible no latch exists 3064 // yet. If so, use the header as this will be a single block loop. 3065 if (!Latch) 3066 Latch = Header; 3067 3068 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 3069 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3070 setDebugLocFromInst(Builder, OldInst); 3071 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 3072 3073 Builder.SetInsertPoint(Latch->getTerminator()); 3074 setDebugLocFromInst(Builder, OldInst); 3075 3076 // Create i+1 and fill the PHINode. 3077 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 3078 Induction->addIncoming(Start, L->getLoopPreheader()); 3079 Induction->addIncoming(Next, Latch); 3080 // Create the compare. 3081 Value *ICmp = Builder.CreateICmpEQ(Next, End); 3082 Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 3083 3084 // Now we have two terminators. Remove the old one from the block. 3085 Latch->getTerminator()->eraseFromParent(); 3086 3087 return Induction; 3088 } 3089 3090 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3091 if (TripCount) 3092 return TripCount; 3093 3094 assert(L && "Create Trip Count for null loop."); 3095 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3096 // Find the loop boundaries. 3097 ScalarEvolution *SE = PSE.getSE(); 3098 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3099 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 3100 "Invalid loop count"); 3101 3102 Type *IdxTy = Legal->getWidestInductionType(); 3103 assert(IdxTy && "No type for induction"); 3104 3105 // The exit count might have the type of i64 while the phi is i32. This can 3106 // happen if we have an induction variable that is sign extended before the 3107 // compare. The only way that we get a backedge taken count is that the 3108 // induction variable was signed and as such will not overflow. In such a case 3109 // truncation is legal. 3110 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3111 IdxTy->getPrimitiveSizeInBits()) 3112 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3113 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3114 3115 // Get the total trip count from the count by adding 1. 3116 const SCEV *ExitCount = SE->getAddExpr( 3117 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3118 3119 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3120 3121 // Expand the trip count and place the new instructions in the preheader. 3122 // Notice that the pre-header does not change, only the loop body. 3123 SCEVExpander Exp(*SE, DL, "induction"); 3124 3125 // Count holds the overall loop count (N). 3126 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3127 L->getLoopPreheader()->getTerminator()); 3128 3129 if (TripCount->getType()->isPointerTy()) 3130 TripCount = 3131 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3132 L->getLoopPreheader()->getTerminator()); 3133 3134 return TripCount; 3135 } 3136 3137 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3138 if (VectorTripCount) 3139 return VectorTripCount; 3140 3141 Value *TC = getOrCreateTripCount(L); 3142 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3143 3144 Type *Ty = TC->getType(); 3145 // This is where we can make the step a runtime constant. 3146 Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF); 3147 3148 // If the tail is to be folded by masking, round the number of iterations N 3149 // up to a multiple of Step instead of rounding down. This is done by first 3150 // adding Step-1 and then rounding down. Note that it's ok if this addition 3151 // overflows: the vector induction variable will eventually wrap to zero given 3152 // that it starts at zero and its Step is a power of two; the loop will then 3153 // exit, with the last early-exit vector comparison also producing all-true. 3154 if (Cost->foldTailByMasking()) { 3155 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3156 "VF*UF must be a power of 2 when folding tail by masking"); 3157 assert(!VF.isScalable() && 3158 "Tail folding not yet supported for scalable vectors"); 3159 TC = Builder.CreateAdd( 3160 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 3161 } 3162 3163 // Now we need to generate the expression for the part of the loop that the 3164 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3165 // iterations are not required for correctness, or N - Step, otherwise. Step 3166 // is equal to the vectorization factor (number of SIMD elements) times the 3167 // unroll factor (number of SIMD instructions). 3168 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3169 3170 // There are two cases where we need to ensure (at least) the last iteration 3171 // runs in the scalar remainder loop. Thus, if the step evenly divides 3172 // the trip count, we set the remainder to be equal to the step. If the step 3173 // does not evenly divide the trip count, no adjustment is necessary since 3174 // there will already be scalar iterations. Note that the minimum iterations 3175 // check ensures that N >= Step. The cases are: 3176 // 1) If there is a non-reversed interleaved group that may speculatively 3177 // access memory out-of-bounds. 3178 // 2) If any instruction may follow a conditionally taken exit. That is, if 3179 // the loop contains multiple exiting blocks, or a single exiting block 3180 // which is not the latch. 3181 if (VF.isVector() && Cost->requiresScalarEpilogue()) { 3182 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3183 R = Builder.CreateSelect(IsZero, Step, R); 3184 } 3185 3186 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3187 3188 return VectorTripCount; 3189 } 3190 3191 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3192 const DataLayout &DL) { 3193 // Verify that V is a vector type with same number of elements as DstVTy. 3194 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3195 unsigned VF = DstFVTy->getNumElements(); 3196 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3197 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3198 Type *SrcElemTy = SrcVecTy->getElementType(); 3199 Type *DstElemTy = DstFVTy->getElementType(); 3200 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3201 "Vector elements must have same size"); 3202 3203 // Do a direct cast if element types are castable. 3204 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3205 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3206 } 3207 // V cannot be directly casted to desired vector type. 3208 // May happen when V is a floating point vector but DstVTy is a vector of 3209 // pointers or vice-versa. Handle this using a two-step bitcast using an 3210 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3211 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3212 "Only one type should be a pointer type"); 3213 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3214 "Only one type should be a floating point type"); 3215 Type *IntTy = 3216 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3217 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3218 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3219 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3220 } 3221 3222 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3223 BasicBlock *Bypass) { 3224 Value *Count = getOrCreateTripCount(L); 3225 // Reuse existing vector loop preheader for TC checks. 3226 // Note that new preheader block is generated for vector loop. 3227 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3228 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3229 3230 // Generate code to check if the loop's trip count is less than VF * UF, or 3231 // equal to it in case a scalar epilogue is required; this implies that the 3232 // vector trip count is zero. This check also covers the case where adding one 3233 // to the backedge-taken count overflowed leading to an incorrect trip count 3234 // of zero. In this case we will also jump to the scalar loop. 3235 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 3236 : ICmpInst::ICMP_ULT; 3237 3238 // If tail is to be folded, vector loop takes care of all iterations. 3239 Value *CheckMinIters = Builder.getFalse(); 3240 if (!Cost->foldTailByMasking()) { 3241 Value *Step = 3242 createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF); 3243 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3244 } 3245 // Create new preheader for vector loop. 3246 LoopVectorPreHeader = 3247 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3248 "vector.ph"); 3249 3250 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3251 DT->getNode(Bypass)->getIDom()) && 3252 "TC check is expected to dominate Bypass"); 3253 3254 // Update dominator for Bypass & LoopExit. 3255 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3256 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3257 3258 ReplaceInstWithInst( 3259 TCCheckBlock->getTerminator(), 3260 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3261 LoopBypassBlocks.push_back(TCCheckBlock); 3262 } 3263 3264 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3265 3266 BasicBlock *const SCEVCheckBlock = 3267 RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); 3268 if (!SCEVCheckBlock) 3269 return nullptr; 3270 3271 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3272 (OptForSizeBasedOnProfile && 3273 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3274 "Cannot SCEV check stride or overflow when optimizing for size"); 3275 3276 3277 // Update dominator only if this is first RT check. 3278 if (LoopBypassBlocks.empty()) { 3279 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3280 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3281 } 3282 3283 LoopBypassBlocks.push_back(SCEVCheckBlock); 3284 AddedSafetyChecks = true; 3285 return SCEVCheckBlock; 3286 } 3287 3288 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 3289 BasicBlock *Bypass) { 3290 // VPlan-native path does not do any analysis for runtime checks currently. 3291 if (EnableVPlanNativePath) 3292 return nullptr; 3293 3294 BasicBlock *const MemCheckBlock = 3295 RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); 3296 3297 // Check if we generated code that checks in runtime if arrays overlap. We put 3298 // the checks into a separate block to make the more common case of few 3299 // elements faster. 3300 if (!MemCheckBlock) 3301 return nullptr; 3302 3303 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3304 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3305 "Cannot emit memory checks when optimizing for size, unless forced " 3306 "to vectorize."); 3307 ORE->emit([&]() { 3308 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3309 L->getStartLoc(), L->getHeader()) 3310 << "Code-size may be reduced by not forcing " 3311 "vectorization, or by source-code modifications " 3312 "eliminating the need for runtime checks " 3313 "(e.g., adding 'restrict')."; 3314 }); 3315 } 3316 3317 LoopBypassBlocks.push_back(MemCheckBlock); 3318 3319 AddedSafetyChecks = true; 3320 3321 // We currently don't use LoopVersioning for the actual loop cloning but we 3322 // still use it to add the noalias metadata. 3323 LVer = std::make_unique<LoopVersioning>( 3324 *Legal->getLAI(), 3325 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3326 DT, PSE.getSE()); 3327 LVer->prepareNoAliasMetadata(); 3328 return MemCheckBlock; 3329 } 3330 3331 Value *InnerLoopVectorizer::emitTransformedIndex( 3332 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3333 const InductionDescriptor &ID) const { 3334 3335 SCEVExpander Exp(*SE, DL, "induction"); 3336 auto Step = ID.getStep(); 3337 auto StartValue = ID.getStartValue(); 3338 assert(Index->getType() == Step->getType() && 3339 "Index type does not match StepValue type"); 3340 3341 // Note: the IR at this point is broken. We cannot use SE to create any new 3342 // SCEV and then expand it, hoping that SCEV's simplification will give us 3343 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3344 // lead to various SCEV crashes. So all we can do is to use builder and rely 3345 // on InstCombine for future simplifications. Here we handle some trivial 3346 // cases only. 3347 auto CreateAdd = [&B](Value *X, Value *Y) { 3348 assert(X->getType() == Y->getType() && "Types don't match!"); 3349 if (auto *CX = dyn_cast<ConstantInt>(X)) 3350 if (CX->isZero()) 3351 return Y; 3352 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3353 if (CY->isZero()) 3354 return X; 3355 return B.CreateAdd(X, Y); 3356 }; 3357 3358 auto CreateMul = [&B](Value *X, Value *Y) { 3359 assert(X->getType() == Y->getType() && "Types don't match!"); 3360 if (auto *CX = dyn_cast<ConstantInt>(X)) 3361 if (CX->isOne()) 3362 return Y; 3363 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3364 if (CY->isOne()) 3365 return X; 3366 return B.CreateMul(X, Y); 3367 }; 3368 3369 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3370 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3371 // the DomTree is not kept up-to-date for additional blocks generated in the 3372 // vector loop. By using the header as insertion point, we guarantee that the 3373 // expanded instructions dominate all their uses. 3374 auto GetInsertPoint = [this, &B]() { 3375 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3376 if (InsertBB != LoopVectorBody && 3377 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3378 return LoopVectorBody->getTerminator(); 3379 return &*B.GetInsertPoint(); 3380 }; 3381 3382 switch (ID.getKind()) { 3383 case InductionDescriptor::IK_IntInduction: { 3384 assert(Index->getType() == StartValue->getType() && 3385 "Index type does not match StartValue type"); 3386 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3387 return B.CreateSub(StartValue, Index); 3388 auto *Offset = CreateMul( 3389 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3390 return CreateAdd(StartValue, Offset); 3391 } 3392 case InductionDescriptor::IK_PtrInduction: { 3393 assert(isa<SCEVConstant>(Step) && 3394 "Expected constant step for pointer induction"); 3395 return B.CreateGEP( 3396 StartValue->getType()->getPointerElementType(), StartValue, 3397 CreateMul(Index, 3398 Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()))); 3399 } 3400 case InductionDescriptor::IK_FpInduction: { 3401 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3402 auto InductionBinOp = ID.getInductionBinOp(); 3403 assert(InductionBinOp && 3404 (InductionBinOp->getOpcode() == Instruction::FAdd || 3405 InductionBinOp->getOpcode() == Instruction::FSub) && 3406 "Original bin op should be defined for FP induction"); 3407 3408 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3409 Value *MulExp = B.CreateFMul(StepValue, Index); 3410 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3411 "induction"); 3412 } 3413 case InductionDescriptor::IK_NoInduction: 3414 return nullptr; 3415 } 3416 llvm_unreachable("invalid enum"); 3417 } 3418 3419 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3420 LoopScalarBody = OrigLoop->getHeader(); 3421 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3422 LoopExitBlock = OrigLoop->getUniqueExitBlock(); 3423 assert(LoopExitBlock && "Must have an exit block"); 3424 assert(LoopVectorPreHeader && "Invalid loop structure"); 3425 3426 LoopMiddleBlock = 3427 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3428 LI, nullptr, Twine(Prefix) + "middle.block"); 3429 LoopScalarPreHeader = 3430 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3431 nullptr, Twine(Prefix) + "scalar.ph"); 3432 3433 // Set up branch from middle block to the exit and scalar preheader blocks. 3434 // completeLoopSkeleton will update the condition to use an iteration check, 3435 // if required to decide whether to execute the remainder. 3436 BranchInst *BrInst = 3437 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue()); 3438 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3439 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3440 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3441 3442 // We intentionally don't let SplitBlock to update LoopInfo since 3443 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3444 // LoopVectorBody is explicitly added to the correct place few lines later. 3445 LoopVectorBody = 3446 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3447 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3448 3449 // Update dominator for loop exit. 3450 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3451 3452 // Create and register the new vector loop. 3453 Loop *Lp = LI->AllocateLoop(); 3454 Loop *ParentLoop = OrigLoop->getParentLoop(); 3455 3456 // Insert the new loop into the loop nest and register the new basic blocks 3457 // before calling any utilities such as SCEV that require valid LoopInfo. 3458 if (ParentLoop) { 3459 ParentLoop->addChildLoop(Lp); 3460 } else { 3461 LI->addTopLevelLoop(Lp); 3462 } 3463 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3464 return Lp; 3465 } 3466 3467 void InnerLoopVectorizer::createInductionResumeValues( 3468 Loop *L, Value *VectorTripCount, 3469 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3470 assert(VectorTripCount && L && "Expected valid arguments"); 3471 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3472 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3473 "Inconsistent information about additional bypass."); 3474 // We are going to resume the execution of the scalar loop. 3475 // Go over all of the induction variables that we found and fix the 3476 // PHIs that are left in the scalar version of the loop. 3477 // The starting values of PHI nodes depend on the counter of the last 3478 // iteration in the vectorized loop. 3479 // If we come from a bypass edge then we need to start from the original 3480 // start value. 3481 for (auto &InductionEntry : Legal->getInductionVars()) { 3482 PHINode *OrigPhi = InductionEntry.first; 3483 InductionDescriptor II = InductionEntry.second; 3484 3485 // Create phi nodes to merge from the backedge-taken check block. 3486 PHINode *BCResumeVal = 3487 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3488 LoopScalarPreHeader->getTerminator()); 3489 // Copy original phi DL over to the new one. 3490 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3491 Value *&EndValue = IVEndValues[OrigPhi]; 3492 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3493 if (OrigPhi == OldInduction) { 3494 // We know what the end value is. 3495 EndValue = VectorTripCount; 3496 } else { 3497 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3498 3499 // Fast-math-flags propagate from the original induction instruction. 3500 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3501 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3502 3503 Type *StepType = II.getStep()->getType(); 3504 Instruction::CastOps CastOp = 3505 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3506 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3507 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3508 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3509 EndValue->setName("ind.end"); 3510 3511 // Compute the end value for the additional bypass (if applicable). 3512 if (AdditionalBypass.first) { 3513 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3514 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3515 StepType, true); 3516 CRD = 3517 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3518 EndValueFromAdditionalBypass = 3519 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3520 EndValueFromAdditionalBypass->setName("ind.end"); 3521 } 3522 } 3523 // The new PHI merges the original incoming value, in case of a bypass, 3524 // or the value at the end of the vectorized loop. 3525 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3526 3527 // Fix the scalar body counter (PHI node). 3528 // The old induction's phi node in the scalar body needs the truncated 3529 // value. 3530 for (BasicBlock *BB : LoopBypassBlocks) 3531 BCResumeVal->addIncoming(II.getStartValue(), BB); 3532 3533 if (AdditionalBypass.first) 3534 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3535 EndValueFromAdditionalBypass); 3536 3537 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3538 } 3539 } 3540 3541 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3542 MDNode *OrigLoopID) { 3543 assert(L && "Expected valid loop."); 3544 3545 // The trip counts should be cached by now. 3546 Value *Count = getOrCreateTripCount(L); 3547 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3548 3549 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3550 3551 // Add a check in the middle block to see if we have completed 3552 // all of the iterations in the first vector loop. 3553 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3554 // If tail is to be folded, we know we don't need to run the remainder. 3555 if (!Cost->foldTailByMasking()) { 3556 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3557 Count, VectorTripCount, "cmp.n", 3558 LoopMiddleBlock->getTerminator()); 3559 3560 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3561 // of the corresponding compare because they may have ended up with 3562 // different line numbers and we want to avoid awkward line stepping while 3563 // debugging. Eg. if the compare has got a line number inside the loop. 3564 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3565 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3566 } 3567 3568 // Get ready to start creating new instructions into the vectorized body. 3569 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3570 "Inconsistent vector loop preheader"); 3571 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3572 3573 Optional<MDNode *> VectorizedLoopID = 3574 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3575 LLVMLoopVectorizeFollowupVectorized}); 3576 if (VectorizedLoopID.hasValue()) { 3577 L->setLoopID(VectorizedLoopID.getValue()); 3578 3579 // Do not setAlreadyVectorized if loop attributes have been defined 3580 // explicitly. 3581 return LoopVectorPreHeader; 3582 } 3583 3584 // Keep all loop hints from the original loop on the vector loop (we'll 3585 // replace the vectorizer-specific hints below). 3586 if (MDNode *LID = OrigLoop->getLoopID()) 3587 L->setLoopID(LID); 3588 3589 LoopVectorizeHints Hints(L, true, *ORE); 3590 Hints.setAlreadyVectorized(); 3591 3592 #ifdef EXPENSIVE_CHECKS 3593 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3594 LI->verify(*DT); 3595 #endif 3596 3597 return LoopVectorPreHeader; 3598 } 3599 3600 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3601 /* 3602 In this function we generate a new loop. The new loop will contain 3603 the vectorized instructions while the old loop will continue to run the 3604 scalar remainder. 3605 3606 [ ] <-- loop iteration number check. 3607 / | 3608 / v 3609 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3610 | / | 3611 | / v 3612 || [ ] <-- vector pre header. 3613 |/ | 3614 | v 3615 | [ ] \ 3616 | [ ]_| <-- vector loop. 3617 | | 3618 | v 3619 | -[ ] <--- middle-block. 3620 | / | 3621 | / v 3622 -|- >[ ] <--- new preheader. 3623 | | 3624 | v 3625 | [ ] \ 3626 | [ ]_| <-- old scalar loop to handle remainder. 3627 \ | 3628 \ v 3629 >[ ] <-- exit block. 3630 ... 3631 */ 3632 3633 // Get the metadata of the original loop before it gets modified. 3634 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3635 3636 // Create an empty vector loop, and prepare basic blocks for the runtime 3637 // checks. 3638 Loop *Lp = createVectorLoopSkeleton(""); 3639 3640 // Now, compare the new count to zero. If it is zero skip the vector loop and 3641 // jump to the scalar loop. This check also covers the case where the 3642 // backedge-taken count is uint##_max: adding one to it will overflow leading 3643 // to an incorrect trip count of zero. In this (rare) case we will also jump 3644 // to the scalar loop. 3645 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3646 3647 // Generate the code to check any assumptions that we've made for SCEV 3648 // expressions. 3649 emitSCEVChecks(Lp, LoopScalarPreHeader); 3650 3651 // Generate the code that checks in runtime if arrays overlap. We put the 3652 // checks into a separate block to make the more common case of few elements 3653 // faster. 3654 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3655 3656 // Some loops have a single integer induction variable, while other loops 3657 // don't. One example is c++ iterators that often have multiple pointer 3658 // induction variables. In the code below we also support a case where we 3659 // don't have a single induction variable. 3660 // 3661 // We try to obtain an induction variable from the original loop as hard 3662 // as possible. However if we don't find one that: 3663 // - is an integer 3664 // - counts from zero, stepping by one 3665 // - is the size of the widest induction variable type 3666 // then we create a new one. 3667 OldInduction = Legal->getPrimaryInduction(); 3668 Type *IdxTy = Legal->getWidestInductionType(); 3669 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3670 // The loop step is equal to the vectorization factor (num of SIMD elements) 3671 // times the unroll factor (num of SIMD instructions). 3672 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3673 Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF); 3674 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3675 Induction = 3676 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3677 getDebugLocFromInstOrOperands(OldInduction)); 3678 3679 // Emit phis for the new starting index of the scalar loop. 3680 createInductionResumeValues(Lp, CountRoundDown); 3681 3682 return completeLoopSkeleton(Lp, OrigLoopID); 3683 } 3684 3685 // Fix up external users of the induction variable. At this point, we are 3686 // in LCSSA form, with all external PHIs that use the IV having one input value, 3687 // coming from the remainder loop. We need those PHIs to also have a correct 3688 // value for the IV when arriving directly from the middle block. 3689 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3690 const InductionDescriptor &II, 3691 Value *CountRoundDown, Value *EndValue, 3692 BasicBlock *MiddleBlock) { 3693 // There are two kinds of external IV usages - those that use the value 3694 // computed in the last iteration (the PHI) and those that use the penultimate 3695 // value (the value that feeds into the phi from the loop latch). 3696 // We allow both, but they, obviously, have different values. 3697 3698 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3699 3700 DenseMap<Value *, Value *> MissingVals; 3701 3702 // An external user of the last iteration's value should see the value that 3703 // the remainder loop uses to initialize its own IV. 3704 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3705 for (User *U : PostInc->users()) { 3706 Instruction *UI = cast<Instruction>(U); 3707 if (!OrigLoop->contains(UI)) { 3708 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3709 MissingVals[UI] = EndValue; 3710 } 3711 } 3712 3713 // An external user of the penultimate value need to see EndValue - Step. 3714 // The simplest way to get this is to recompute it from the constituent SCEVs, 3715 // that is Start + (Step * (CRD - 1)). 3716 for (User *U : OrigPhi->users()) { 3717 auto *UI = cast<Instruction>(U); 3718 if (!OrigLoop->contains(UI)) { 3719 const DataLayout &DL = 3720 OrigLoop->getHeader()->getModule()->getDataLayout(); 3721 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3722 3723 IRBuilder<> B(MiddleBlock->getTerminator()); 3724 3725 // Fast-math-flags propagate from the original induction instruction. 3726 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3727 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3728 3729 Value *CountMinusOne = B.CreateSub( 3730 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3731 Value *CMO = 3732 !II.getStep()->getType()->isIntegerTy() 3733 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3734 II.getStep()->getType()) 3735 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3736 CMO->setName("cast.cmo"); 3737 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3738 Escape->setName("ind.escape"); 3739 MissingVals[UI] = Escape; 3740 } 3741 } 3742 3743 for (auto &I : MissingVals) { 3744 PHINode *PHI = cast<PHINode>(I.first); 3745 // One corner case we have to handle is two IVs "chasing" each-other, 3746 // that is %IV2 = phi [...], [ %IV1, %latch ] 3747 // In this case, if IV1 has an external use, we need to avoid adding both 3748 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3749 // don't already have an incoming value for the middle block. 3750 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3751 PHI->addIncoming(I.second, MiddleBlock); 3752 } 3753 } 3754 3755 namespace { 3756 3757 struct CSEDenseMapInfo { 3758 static bool canHandle(const Instruction *I) { 3759 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3760 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3761 } 3762 3763 static inline Instruction *getEmptyKey() { 3764 return DenseMapInfo<Instruction *>::getEmptyKey(); 3765 } 3766 3767 static inline Instruction *getTombstoneKey() { 3768 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3769 } 3770 3771 static unsigned getHashValue(const Instruction *I) { 3772 assert(canHandle(I) && "Unknown instruction!"); 3773 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3774 I->value_op_end())); 3775 } 3776 3777 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3778 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3779 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3780 return LHS == RHS; 3781 return LHS->isIdenticalTo(RHS); 3782 } 3783 }; 3784 3785 } // end anonymous namespace 3786 3787 ///Perform cse of induction variable instructions. 3788 static void cse(BasicBlock *BB) { 3789 // Perform simple cse. 3790 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3791 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3792 Instruction *In = &*I++; 3793 3794 if (!CSEDenseMapInfo::canHandle(In)) 3795 continue; 3796 3797 // Check if we can replace this instruction with any of the 3798 // visited instructions. 3799 if (Instruction *V = CSEMap.lookup(In)) { 3800 In->replaceAllUsesWith(V); 3801 In->eraseFromParent(); 3802 continue; 3803 } 3804 3805 CSEMap[In] = In; 3806 } 3807 } 3808 3809 InstructionCost 3810 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3811 bool &NeedToScalarize) const { 3812 Function *F = CI->getCalledFunction(); 3813 Type *ScalarRetTy = CI->getType(); 3814 SmallVector<Type *, 4> Tys, ScalarTys; 3815 for (auto &ArgOp : CI->arg_operands()) 3816 ScalarTys.push_back(ArgOp->getType()); 3817 3818 // Estimate cost of scalarized vector call. The source operands are assumed 3819 // to be vectors, so we need to extract individual elements from there, 3820 // execute VF scalar calls, and then gather the result into the vector return 3821 // value. 3822 InstructionCost ScalarCallCost = 3823 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3824 if (VF.isScalar()) 3825 return ScalarCallCost; 3826 3827 // Compute corresponding vector type for return value and arguments. 3828 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3829 for (Type *ScalarTy : ScalarTys) 3830 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3831 3832 // Compute costs of unpacking argument values for the scalar calls and 3833 // packing the return values to a vector. 3834 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3835 3836 InstructionCost Cost = 3837 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3838 3839 // If we can't emit a vector call for this function, then the currently found 3840 // cost is the cost we need to return. 3841 NeedToScalarize = true; 3842 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3843 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3844 3845 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3846 return Cost; 3847 3848 // If the corresponding vector cost is cheaper, return its cost. 3849 InstructionCost VectorCallCost = 3850 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3851 if (VectorCallCost < Cost) { 3852 NeedToScalarize = false; 3853 Cost = VectorCallCost; 3854 } 3855 return Cost; 3856 } 3857 3858 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3859 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3860 return Elt; 3861 return VectorType::get(Elt, VF); 3862 } 3863 3864 InstructionCost 3865 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3866 ElementCount VF) const { 3867 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3868 assert(ID && "Expected intrinsic call!"); 3869 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3870 FastMathFlags FMF; 3871 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3872 FMF = FPMO->getFastMathFlags(); 3873 3874 SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end()); 3875 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3876 SmallVector<Type *> ParamTys; 3877 std::transform(FTy->param_begin(), FTy->param_end(), 3878 std::back_inserter(ParamTys), 3879 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3880 3881 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3882 dyn_cast<IntrinsicInst>(CI)); 3883 return TTI.getIntrinsicInstrCost(CostAttrs, 3884 TargetTransformInfo::TCK_RecipThroughput); 3885 } 3886 3887 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3888 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3889 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3890 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3891 } 3892 3893 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3894 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3895 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3896 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3897 } 3898 3899 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3900 // For every instruction `I` in MinBWs, truncate the operands, create a 3901 // truncated version of `I` and reextend its result. InstCombine runs 3902 // later and will remove any ext/trunc pairs. 3903 SmallPtrSet<Value *, 4> Erased; 3904 for (const auto &KV : Cost->getMinimalBitwidths()) { 3905 // If the value wasn't vectorized, we must maintain the original scalar 3906 // type. The absence of the value from State indicates that it 3907 // wasn't vectorized. 3908 VPValue *Def = State.Plan->getVPValue(KV.first); 3909 if (!State.hasAnyVectorValue(Def)) 3910 continue; 3911 for (unsigned Part = 0; Part < UF; ++Part) { 3912 Value *I = State.get(Def, Part); 3913 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3914 continue; 3915 Type *OriginalTy = I->getType(); 3916 Type *ScalarTruncatedTy = 3917 IntegerType::get(OriginalTy->getContext(), KV.second); 3918 auto *TruncatedTy = FixedVectorType::get( 3919 ScalarTruncatedTy, 3920 cast<FixedVectorType>(OriginalTy)->getNumElements()); 3921 if (TruncatedTy == OriginalTy) 3922 continue; 3923 3924 IRBuilder<> B(cast<Instruction>(I)); 3925 auto ShrinkOperand = [&](Value *V) -> Value * { 3926 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3927 if (ZI->getSrcTy() == TruncatedTy) 3928 return ZI->getOperand(0); 3929 return B.CreateZExtOrTrunc(V, TruncatedTy); 3930 }; 3931 3932 // The actual instruction modification depends on the instruction type, 3933 // unfortunately. 3934 Value *NewI = nullptr; 3935 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3936 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3937 ShrinkOperand(BO->getOperand(1))); 3938 3939 // Any wrapping introduced by shrinking this operation shouldn't be 3940 // considered undefined behavior. So, we can't unconditionally copy 3941 // arithmetic wrapping flags to NewI. 3942 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3943 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3944 NewI = 3945 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3946 ShrinkOperand(CI->getOperand(1))); 3947 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3948 NewI = B.CreateSelect(SI->getCondition(), 3949 ShrinkOperand(SI->getTrueValue()), 3950 ShrinkOperand(SI->getFalseValue())); 3951 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3952 switch (CI->getOpcode()) { 3953 default: 3954 llvm_unreachable("Unhandled cast!"); 3955 case Instruction::Trunc: 3956 NewI = ShrinkOperand(CI->getOperand(0)); 3957 break; 3958 case Instruction::SExt: 3959 NewI = B.CreateSExtOrTrunc( 3960 CI->getOperand(0), 3961 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3962 break; 3963 case Instruction::ZExt: 3964 NewI = B.CreateZExtOrTrunc( 3965 CI->getOperand(0), 3966 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3967 break; 3968 } 3969 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3970 auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType()) 3971 ->getNumElements(); 3972 auto *O0 = B.CreateZExtOrTrunc( 3973 SI->getOperand(0), 3974 FixedVectorType::get(ScalarTruncatedTy, Elements0)); 3975 auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType()) 3976 ->getNumElements(); 3977 auto *O1 = B.CreateZExtOrTrunc( 3978 SI->getOperand(1), 3979 FixedVectorType::get(ScalarTruncatedTy, Elements1)); 3980 3981 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3982 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3983 // Don't do anything with the operands, just extend the result. 3984 continue; 3985 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3986 auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType()) 3987 ->getNumElements(); 3988 auto *O0 = B.CreateZExtOrTrunc( 3989 IE->getOperand(0), 3990 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3991 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3992 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3993 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3994 auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType()) 3995 ->getNumElements(); 3996 auto *O0 = B.CreateZExtOrTrunc( 3997 EE->getOperand(0), 3998 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3999 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 4000 } else { 4001 // If we don't know what to do, be conservative and don't do anything. 4002 continue; 4003 } 4004 4005 // Lastly, extend the result. 4006 NewI->takeName(cast<Instruction>(I)); 4007 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 4008 I->replaceAllUsesWith(Res); 4009 cast<Instruction>(I)->eraseFromParent(); 4010 Erased.insert(I); 4011 State.reset(Def, Res, Part); 4012 } 4013 } 4014 4015 // We'll have created a bunch of ZExts that are now parentless. Clean up. 4016 for (const auto &KV : Cost->getMinimalBitwidths()) { 4017 // If the value wasn't vectorized, we must maintain the original scalar 4018 // type. The absence of the value from State indicates that it 4019 // wasn't vectorized. 4020 VPValue *Def = State.Plan->getVPValue(KV.first); 4021 if (!State.hasAnyVectorValue(Def)) 4022 continue; 4023 for (unsigned Part = 0; Part < UF; ++Part) { 4024 Value *I = State.get(Def, Part); 4025 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 4026 if (Inst && Inst->use_empty()) { 4027 Value *NewI = Inst->getOperand(0); 4028 Inst->eraseFromParent(); 4029 State.reset(Def, NewI, Part); 4030 } 4031 } 4032 } 4033 } 4034 4035 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 4036 // Insert truncates and extends for any truncated instructions as hints to 4037 // InstCombine. 4038 if (VF.isVector()) 4039 truncateToMinimalBitwidths(State); 4040 4041 // Fix widened non-induction PHIs by setting up the PHI operands. 4042 if (OrigPHIsToFix.size()) { 4043 assert(EnableVPlanNativePath && 4044 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 4045 fixNonInductionPHIs(State); 4046 } 4047 4048 // At this point every instruction in the original loop is widened to a 4049 // vector form. Now we need to fix the recurrences in the loop. These PHI 4050 // nodes are currently empty because we did not want to introduce cycles. 4051 // This is the second stage of vectorizing recurrences. 4052 fixCrossIterationPHIs(State); 4053 4054 // Forget the original basic block. 4055 PSE.getSE()->forgetLoop(OrigLoop); 4056 4057 // Fix-up external users of the induction variables. 4058 for (auto &Entry : Legal->getInductionVars()) 4059 fixupIVUsers(Entry.first, Entry.second, 4060 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4061 IVEndValues[Entry.first], LoopMiddleBlock); 4062 4063 fixLCSSAPHIs(State); 4064 for (Instruction *PI : PredicatedInstructions) 4065 sinkScalarOperands(&*PI); 4066 4067 // Remove redundant induction instructions. 4068 cse(LoopVectorBody); 4069 4070 // Set/update profile weights for the vector and remainder loops as original 4071 // loop iterations are now distributed among them. Note that original loop 4072 // represented by LoopScalarBody becomes remainder loop after vectorization. 4073 // 4074 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 4075 // end up getting slightly roughened result but that should be OK since 4076 // profile is not inherently precise anyway. Note also possible bypass of 4077 // vector code caused by legality checks is ignored, assigning all the weight 4078 // to the vector loop, optimistically. 4079 // 4080 // For scalable vectorization we can't know at compile time how many iterations 4081 // of the loop are handled in one vector iteration, so instead assume a pessimistic 4082 // vscale of '1'. 4083 setProfileInfoAfterUnrolling( 4084 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 4085 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 4086 } 4087 4088 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 4089 // In order to support recurrences we need to be able to vectorize Phi nodes. 4090 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4091 // stage #2: We now need to fix the recurrences by adding incoming edges to 4092 // the currently empty PHI nodes. At this point every instruction in the 4093 // original loop is widened to a vector form so we can use them to construct 4094 // the incoming edges. 4095 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); 4096 for (VPRecipeBase &R : Header->phis()) { 4097 auto *PhiR = dyn_cast<VPWidenPHIRecipe>(&R); 4098 if (!PhiR) 4099 continue; 4100 auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 4101 if (PhiR->getRecurrenceDescriptor()) { 4102 fixReduction(PhiR, State); 4103 } else if (Legal->isFirstOrderRecurrence(OrigPhi)) 4104 fixFirstOrderRecurrence(OrigPhi, State); 4105 } 4106 } 4107 4108 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi, 4109 VPTransformState &State) { 4110 // This is the second phase of vectorizing first-order recurrences. An 4111 // overview of the transformation is described below. Suppose we have the 4112 // following loop. 4113 // 4114 // for (int i = 0; i < n; ++i) 4115 // b[i] = a[i] - a[i - 1]; 4116 // 4117 // There is a first-order recurrence on "a". For this loop, the shorthand 4118 // scalar IR looks like: 4119 // 4120 // scalar.ph: 4121 // s_init = a[-1] 4122 // br scalar.body 4123 // 4124 // scalar.body: 4125 // i = phi [0, scalar.ph], [i+1, scalar.body] 4126 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4127 // s2 = a[i] 4128 // b[i] = s2 - s1 4129 // br cond, scalar.body, ... 4130 // 4131 // In this example, s1 is a recurrence because it's value depends on the 4132 // previous iteration. In the first phase of vectorization, we created a 4133 // temporary value for s1. We now complete the vectorization and produce the 4134 // shorthand vector IR shown below (for VF = 4, UF = 1). 4135 // 4136 // vector.ph: 4137 // v_init = vector(..., ..., ..., a[-1]) 4138 // br vector.body 4139 // 4140 // vector.body 4141 // i = phi [0, vector.ph], [i+4, vector.body] 4142 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4143 // v2 = a[i, i+1, i+2, i+3]; 4144 // v3 = vector(v1(3), v2(0, 1, 2)) 4145 // b[i, i+1, i+2, i+3] = v2 - v3 4146 // br cond, vector.body, middle.block 4147 // 4148 // middle.block: 4149 // x = v2(3) 4150 // br scalar.ph 4151 // 4152 // scalar.ph: 4153 // s_init = phi [x, middle.block], [a[-1], otherwise] 4154 // br scalar.body 4155 // 4156 // After execution completes the vector loop, we extract the next value of 4157 // the recurrence (x) to use as the initial value in the scalar loop. 4158 4159 // Get the original loop preheader and single loop latch. 4160 auto *Preheader = OrigLoop->getLoopPreheader(); 4161 auto *Latch = OrigLoop->getLoopLatch(); 4162 4163 // Get the initial and previous values of the scalar recurrence. 4164 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4165 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4166 4167 // Create a vector from the initial value. 4168 auto *VectorInit = ScalarInit; 4169 if (VF.isVector()) { 4170 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4171 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 4172 VectorInit = Builder.CreateInsertElement( 4173 PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4174 Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init"); 4175 } 4176 4177 VPValue *PhiDef = State.Plan->getVPValue(Phi); 4178 VPValue *PreviousDef = State.Plan->getVPValue(Previous); 4179 // We constructed a temporary phi node in the first phase of vectorization. 4180 // This phi node will eventually be deleted. 4181 Builder.SetInsertPoint(cast<Instruction>(State.get(PhiDef, 0))); 4182 4183 // Create a phi node for the new recurrence. The current value will either be 4184 // the initial value inserted into a vector or loop-varying vector value. 4185 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4186 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4187 4188 // Get the vectorized previous value of the last part UF - 1. It appears last 4189 // among all unrolled iterations, due to the order of their construction. 4190 Value *PreviousLastPart = State.get(PreviousDef, UF - 1); 4191 4192 // Find and set the insertion point after the previous value if it is an 4193 // instruction. 4194 BasicBlock::iterator InsertPt; 4195 // Note that the previous value may have been constant-folded so it is not 4196 // guaranteed to be an instruction in the vector loop. 4197 // FIXME: Loop invariant values do not form recurrences. We should deal with 4198 // them earlier. 4199 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart)) 4200 InsertPt = LoopVectorBody->getFirstInsertionPt(); 4201 else { 4202 Instruction *PreviousInst = cast<Instruction>(PreviousLastPart); 4203 if (isa<PHINode>(PreviousLastPart)) 4204 // If the previous value is a phi node, we should insert after all the phi 4205 // nodes in the block containing the PHI to avoid breaking basic block 4206 // verification. Note that the basic block may be different to 4207 // LoopVectorBody, in case we predicate the loop. 4208 InsertPt = PreviousInst->getParent()->getFirstInsertionPt(); 4209 else 4210 InsertPt = ++PreviousInst->getIterator(); 4211 } 4212 Builder.SetInsertPoint(&*InsertPt); 4213 4214 // We will construct a vector for the recurrence by combining the values for 4215 // the current and previous iterations. This is the required shuffle mask. 4216 assert(!VF.isScalable()); 4217 SmallVector<int, 8> ShuffleMask(VF.getKnownMinValue()); 4218 ShuffleMask[0] = VF.getKnownMinValue() - 1; 4219 for (unsigned I = 1; I < VF.getKnownMinValue(); ++I) 4220 ShuffleMask[I] = I + VF.getKnownMinValue() - 1; 4221 4222 // The vector from which to take the initial value for the current iteration 4223 // (actual or unrolled). Initially, this is the vector phi node. 4224 Value *Incoming = VecPhi; 4225 4226 // Shuffle the current and previous vector and update the vector parts. 4227 for (unsigned Part = 0; Part < UF; ++Part) { 4228 Value *PreviousPart = State.get(PreviousDef, Part); 4229 Value *PhiPart = State.get(PhiDef, Part); 4230 auto *Shuffle = 4231 VF.isVector() 4232 ? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask) 4233 : Incoming; 4234 PhiPart->replaceAllUsesWith(Shuffle); 4235 cast<Instruction>(PhiPart)->eraseFromParent(); 4236 State.reset(PhiDef, Shuffle, Part); 4237 Incoming = PreviousPart; 4238 } 4239 4240 // Fix the latch value of the new recurrence in the vector loop. 4241 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4242 4243 // Extract the last vector element in the middle block. This will be the 4244 // initial value for the recurrence when jumping to the scalar loop. 4245 auto *ExtractForScalar = Incoming; 4246 if (VF.isVector()) { 4247 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4248 ExtractForScalar = Builder.CreateExtractElement( 4249 ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1), 4250 "vector.recur.extract"); 4251 } 4252 // Extract the second last element in the middle block if the 4253 // Phi is used outside the loop. We need to extract the phi itself 4254 // and not the last element (the phi update in the current iteration). This 4255 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4256 // when the scalar loop is not run at all. 4257 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4258 if (VF.isVector()) 4259 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4260 Incoming, Builder.getInt32(VF.getKnownMinValue() - 2), 4261 "vector.recur.extract.for.phi"); 4262 // When loop is unrolled without vectorizing, initialize 4263 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 4264 // `Incoming`. This is analogous to the vectorized case above: extracting the 4265 // second last element when VF > 1. 4266 else if (UF > 1) 4267 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4268 4269 // Fix the initial value of the original recurrence in the scalar loop. 4270 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4271 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4272 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4273 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4274 Start->addIncoming(Incoming, BB); 4275 } 4276 4277 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4278 Phi->setName("scalar.recur"); 4279 4280 // Finally, fix users of the recurrence outside the loop. The users will need 4281 // either the last value of the scalar recurrence or the last value of the 4282 // vector recurrence we extracted in the middle block. Since the loop is in 4283 // LCSSA form, we just need to find all the phi nodes for the original scalar 4284 // recurrence in the exit block, and then add an edge for the middle block. 4285 // Note that LCSSA does not imply single entry when the original scalar loop 4286 // had multiple exiting edges (as we always run the last iteration in the 4287 // scalar epilogue); in that case, the exiting path through middle will be 4288 // dynamically dead and the value picked for the phi doesn't matter. 4289 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4290 if (any_of(LCSSAPhi.incoming_values(), 4291 [Phi](Value *V) { return V == Phi; })) 4292 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4293 } 4294 4295 static bool useOrderedReductions(RecurrenceDescriptor &RdxDesc) { 4296 return EnableStrictReductions && RdxDesc.isOrdered(); 4297 } 4298 4299 void InnerLoopVectorizer::fixReduction(VPWidenPHIRecipe *PhiR, 4300 VPTransformState &State) { 4301 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 4302 // Get it's reduction variable descriptor. 4303 assert(Legal->isReductionVariable(OrigPhi) && 4304 "Unable to find the reduction variable"); 4305 RecurrenceDescriptor RdxDesc = *PhiR->getRecurrenceDescriptor(); 4306 4307 RecurKind RK = RdxDesc.getRecurrenceKind(); 4308 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4309 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4310 setDebugLocFromInst(Builder, ReductionStartValue); 4311 bool IsInLoopReductionPhi = Cost->isInLoopReduction(OrigPhi); 4312 4313 VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst); 4314 // This is the vector-clone of the value that leaves the loop. 4315 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4316 4317 // Wrap flags are in general invalid after vectorization, clear them. 4318 clearReductionWrapFlags(RdxDesc, State); 4319 4320 // Fix the vector-loop phi. 4321 4322 // Reductions do not have to start at zero. They can start with 4323 // any loop invariant values. 4324 BasicBlock *VectorLoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4325 4326 bool IsOrdered = State.VF.isVector() && IsInLoopReductionPhi && 4327 useOrderedReductions(RdxDesc); 4328 4329 for (unsigned Part = 0; Part < UF; ++Part) { 4330 if (IsOrdered && Part > 0) 4331 break; 4332 Value *VecRdxPhi = State.get(PhiR->getVPSingleValue(), Part); 4333 Value *Val = State.get(PhiR->getBackedgeValue(), Part); 4334 if (IsOrdered) 4335 Val = State.get(PhiR->getBackedgeValue(), UF - 1); 4336 4337 cast<PHINode>(VecRdxPhi)->addIncoming(Val, VectorLoopLatch); 4338 } 4339 4340 // Before each round, move the insertion point right between 4341 // the PHIs and the values we are going to write. 4342 // This allows us to write both PHINodes and the extractelement 4343 // instructions. 4344 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4345 4346 setDebugLocFromInst(Builder, LoopExitInst); 4347 4348 Type *PhiTy = OrigPhi->getType(); 4349 // If tail is folded by masking, the vector value to leave the loop should be 4350 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4351 // instead of the former. For an inloop reduction the reduction will already 4352 // be predicated, and does not need to be handled here. 4353 if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) { 4354 for (unsigned Part = 0; Part < UF; ++Part) { 4355 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4356 Value *Sel = nullptr; 4357 for (User *U : VecLoopExitInst->users()) { 4358 if (isa<SelectInst>(U)) { 4359 assert(!Sel && "Reduction exit feeding two selects"); 4360 Sel = U; 4361 } else 4362 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4363 } 4364 assert(Sel && "Reduction exit feeds no select"); 4365 State.reset(LoopExitInstDef, Sel, Part); 4366 4367 // If the target can create a predicated operator for the reduction at no 4368 // extra cost in the loop (for example a predicated vadd), it can be 4369 // cheaper for the select to remain in the loop than be sunk out of it, 4370 // and so use the select value for the phi instead of the old 4371 // LoopExitValue. 4372 if (PreferPredicatedReductionSelect || 4373 TTI->preferPredicatedReductionSelect( 4374 RdxDesc.getOpcode(), PhiTy, 4375 TargetTransformInfo::ReductionFlags())) { 4376 auto *VecRdxPhi = 4377 cast<PHINode>(State.get(PhiR->getVPSingleValue(), Part)); 4378 VecRdxPhi->setIncomingValueForBlock( 4379 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4380 } 4381 } 4382 } 4383 4384 // If the vector reduction can be performed in a smaller type, we truncate 4385 // then extend the loop exit value to enable InstCombine to evaluate the 4386 // entire expression in the smaller type. 4387 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 4388 assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!"); 4389 assert(!VF.isScalable() && "scalable vectors not yet supported."); 4390 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4391 Builder.SetInsertPoint( 4392 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4393 VectorParts RdxParts(UF); 4394 for (unsigned Part = 0; Part < UF; ++Part) { 4395 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4396 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4397 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4398 : Builder.CreateZExt(Trunc, VecTy); 4399 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 4400 UI != RdxParts[Part]->user_end();) 4401 if (*UI != Trunc) { 4402 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 4403 RdxParts[Part] = Extnd; 4404 } else { 4405 ++UI; 4406 } 4407 } 4408 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4409 for (unsigned Part = 0; Part < UF; ++Part) { 4410 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4411 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4412 } 4413 } 4414 4415 // Reduce all of the unrolled parts into a single vector. 4416 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4417 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4418 4419 // The middle block terminator has already been assigned a DebugLoc here (the 4420 // OrigLoop's single latch terminator). We want the whole middle block to 4421 // appear to execute on this line because: (a) it is all compiler generated, 4422 // (b) these instructions are always executed after evaluating the latch 4423 // conditional branch, and (c) other passes may add new predecessors which 4424 // terminate on this line. This is the easiest way to ensure we don't 4425 // accidentally cause an extra step back into the loop while debugging. 4426 setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator()); 4427 if (IsOrdered) 4428 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 4429 else { 4430 // Floating-point operations should have some FMF to enable the reduction. 4431 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4432 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4433 for (unsigned Part = 1; Part < UF; ++Part) { 4434 Value *RdxPart = State.get(LoopExitInstDef, Part); 4435 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4436 ReducedPartRdx = Builder.CreateBinOp( 4437 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4438 } else { 4439 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4440 } 4441 } 4442 } 4443 4444 // Create the reduction after the loop. Note that inloop reductions create the 4445 // target reduction in the loop using a Reduction recipe. 4446 if (VF.isVector() && !IsInLoopReductionPhi) { 4447 ReducedPartRdx = 4448 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx); 4449 // If the reduction can be performed in a smaller type, we need to extend 4450 // the reduction to the wider type before we branch to the original loop. 4451 if (PhiTy != RdxDesc.getRecurrenceType()) 4452 ReducedPartRdx = RdxDesc.isSigned() 4453 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4454 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4455 } 4456 4457 // Create a phi node that merges control-flow from the backedge-taken check 4458 // block and the middle block. 4459 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4460 LoopScalarPreHeader->getTerminator()); 4461 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4462 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4463 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4464 4465 // Now, we need to fix the users of the reduction variable 4466 // inside and outside of the scalar remainder loop. 4467 4468 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4469 // in the exit blocks. See comment on analogous loop in 4470 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4471 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4472 if (any_of(LCSSAPhi.incoming_values(), 4473 [LoopExitInst](Value *V) { return V == LoopExitInst; })) 4474 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4475 4476 // Fix the scalar loop reduction variable with the incoming reduction sum 4477 // from the vector body and from the backedge value. 4478 int IncomingEdgeBlockIdx = 4479 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4480 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4481 // Pick the other block. 4482 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4483 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4484 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4485 } 4486 4487 void InnerLoopVectorizer::clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc, 4488 VPTransformState &State) { 4489 RecurKind RK = RdxDesc.getRecurrenceKind(); 4490 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4491 return; 4492 4493 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4494 assert(LoopExitInstr && "null loop exit instruction"); 4495 SmallVector<Instruction *, 8> Worklist; 4496 SmallPtrSet<Instruction *, 8> Visited; 4497 Worklist.push_back(LoopExitInstr); 4498 Visited.insert(LoopExitInstr); 4499 4500 while (!Worklist.empty()) { 4501 Instruction *Cur = Worklist.pop_back_val(); 4502 if (isa<OverflowingBinaryOperator>(Cur)) 4503 for (unsigned Part = 0; Part < UF; ++Part) { 4504 Value *V = State.get(State.Plan->getVPValue(Cur), Part); 4505 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4506 } 4507 4508 for (User *U : Cur->users()) { 4509 Instruction *UI = cast<Instruction>(U); 4510 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4511 Visited.insert(UI).second) 4512 Worklist.push_back(UI); 4513 } 4514 } 4515 } 4516 4517 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4518 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4519 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4520 // Some phis were already hand updated by the reduction and recurrence 4521 // code above, leave them alone. 4522 continue; 4523 4524 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4525 // Non-instruction incoming values will have only one value. 4526 4527 VPLane Lane = VPLane::getFirstLane(); 4528 if (isa<Instruction>(IncomingValue) && 4529 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4530 VF)) 4531 Lane = VPLane::getLastLaneForVF(VF); 4532 4533 // Can be a loop invariant incoming value or the last scalar value to be 4534 // extracted from the vectorized loop. 4535 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4536 Value *lastIncomingValue = 4537 OrigLoop->isLoopInvariant(IncomingValue) 4538 ? IncomingValue 4539 : State.get(State.Plan->getVPValue(IncomingValue), 4540 VPIteration(UF - 1, Lane)); 4541 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4542 } 4543 } 4544 4545 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4546 // The basic block and loop containing the predicated instruction. 4547 auto *PredBB = PredInst->getParent(); 4548 auto *VectorLoop = LI->getLoopFor(PredBB); 4549 4550 // Initialize a worklist with the operands of the predicated instruction. 4551 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4552 4553 // Holds instructions that we need to analyze again. An instruction may be 4554 // reanalyzed if we don't yet know if we can sink it or not. 4555 SmallVector<Instruction *, 8> InstsToReanalyze; 4556 4557 // Returns true if a given use occurs in the predicated block. Phi nodes use 4558 // their operands in their corresponding predecessor blocks. 4559 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4560 auto *I = cast<Instruction>(U.getUser()); 4561 BasicBlock *BB = I->getParent(); 4562 if (auto *Phi = dyn_cast<PHINode>(I)) 4563 BB = Phi->getIncomingBlock( 4564 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4565 return BB == PredBB; 4566 }; 4567 4568 // Iteratively sink the scalarized operands of the predicated instruction 4569 // into the block we created for it. When an instruction is sunk, it's 4570 // operands are then added to the worklist. The algorithm ends after one pass 4571 // through the worklist doesn't sink a single instruction. 4572 bool Changed; 4573 do { 4574 // Add the instructions that need to be reanalyzed to the worklist, and 4575 // reset the changed indicator. 4576 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4577 InstsToReanalyze.clear(); 4578 Changed = false; 4579 4580 while (!Worklist.empty()) { 4581 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4582 4583 // We can't sink an instruction if it is a phi node, is already in the 4584 // predicated block, is not in the loop, or may have side effects. 4585 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4586 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4587 continue; 4588 4589 // It's legal to sink the instruction if all its uses occur in the 4590 // predicated block. Otherwise, there's nothing to do yet, and we may 4591 // need to reanalyze the instruction. 4592 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4593 InstsToReanalyze.push_back(I); 4594 continue; 4595 } 4596 4597 // Move the instruction to the beginning of the predicated block, and add 4598 // it's operands to the worklist. 4599 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4600 Worklist.insert(I->op_begin(), I->op_end()); 4601 4602 // The sinking may have enabled other instructions to be sunk, so we will 4603 // need to iterate. 4604 Changed = true; 4605 } 4606 } while (Changed); 4607 } 4608 4609 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4610 for (PHINode *OrigPhi : OrigPHIsToFix) { 4611 VPWidenPHIRecipe *VPPhi = 4612 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4613 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4614 // Make sure the builder has a valid insert point. 4615 Builder.SetInsertPoint(NewPhi); 4616 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4617 VPValue *Inc = VPPhi->getIncomingValue(i); 4618 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4619 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4620 } 4621 } 4622 } 4623 4624 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, 4625 VPUser &Operands, unsigned UF, 4626 ElementCount VF, bool IsPtrLoopInvariant, 4627 SmallBitVector &IsIndexLoopInvariant, 4628 VPTransformState &State) { 4629 // Construct a vector GEP by widening the operands of the scalar GEP as 4630 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4631 // results in a vector of pointers when at least one operand of the GEP 4632 // is vector-typed. Thus, to keep the representation compact, we only use 4633 // vector-typed operands for loop-varying values. 4634 4635 if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4636 // If we are vectorizing, but the GEP has only loop-invariant operands, 4637 // the GEP we build (by only using vector-typed operands for 4638 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4639 // produce a vector of pointers, we need to either arbitrarily pick an 4640 // operand to broadcast, or broadcast a clone of the original GEP. 4641 // Here, we broadcast a clone of the original. 4642 // 4643 // TODO: If at some point we decide to scalarize instructions having 4644 // loop-invariant operands, this special case will no longer be 4645 // required. We would add the scalarization decision to 4646 // collectLoopScalars() and teach getVectorValue() to broadcast 4647 // the lane-zero scalar value. 4648 auto *Clone = Builder.Insert(GEP->clone()); 4649 for (unsigned Part = 0; Part < UF; ++Part) { 4650 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4651 State.set(VPDef, EntryPart, Part); 4652 addMetadata(EntryPart, GEP); 4653 } 4654 } else { 4655 // If the GEP has at least one loop-varying operand, we are sure to 4656 // produce a vector of pointers. But if we are only unrolling, we want 4657 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4658 // produce with the code below will be scalar (if VF == 1) or vector 4659 // (otherwise). Note that for the unroll-only case, we still maintain 4660 // values in the vector mapping with initVector, as we do for other 4661 // instructions. 4662 for (unsigned Part = 0; Part < UF; ++Part) { 4663 // The pointer operand of the new GEP. If it's loop-invariant, we 4664 // won't broadcast it. 4665 auto *Ptr = IsPtrLoopInvariant 4666 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 4667 : State.get(Operands.getOperand(0), Part); 4668 4669 // Collect all the indices for the new GEP. If any index is 4670 // loop-invariant, we won't broadcast it. 4671 SmallVector<Value *, 4> Indices; 4672 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { 4673 VPValue *Operand = Operands.getOperand(I); 4674 if (IsIndexLoopInvariant[I - 1]) 4675 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 4676 else 4677 Indices.push_back(State.get(Operand, Part)); 4678 } 4679 4680 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4681 // but it should be a vector, otherwise. 4682 auto *NewGEP = 4683 GEP->isInBounds() 4684 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4685 Indices) 4686 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4687 assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) && 4688 "NewGEP is not a pointer vector"); 4689 State.set(VPDef, NewGEP, Part); 4690 addMetadata(NewGEP, GEP); 4691 } 4692 } 4693 } 4694 4695 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4696 RecurrenceDescriptor *RdxDesc, 4697 VPWidenPHIRecipe *PhiR, 4698 VPTransformState &State) { 4699 PHINode *P = cast<PHINode>(PN); 4700 if (EnableVPlanNativePath) { 4701 // Currently we enter here in the VPlan-native path for non-induction 4702 // PHIs where all control flow is uniform. We simply widen these PHIs. 4703 // Create a vector phi with no operands - the vector phi operands will be 4704 // set at the end of vector code generation. 4705 Type *VecTy = (State.VF.isScalar()) 4706 ? PN->getType() 4707 : VectorType::get(PN->getType(), State.VF); 4708 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4709 State.set(PhiR, VecPhi, 0); 4710 OrigPHIsToFix.push_back(P); 4711 4712 return; 4713 } 4714 4715 assert(PN->getParent() == OrigLoop->getHeader() && 4716 "Non-header phis should have been handled elsewhere"); 4717 4718 VPValue *StartVPV = PhiR->getStartValue(); 4719 Value *StartV = StartVPV ? StartVPV->getLiveInIRValue() : nullptr; 4720 // In order to support recurrences we need to be able to vectorize Phi nodes. 4721 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4722 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4723 // this value when we vectorize all of the instructions that use the PHI. 4724 if (RdxDesc || Legal->isFirstOrderRecurrence(P)) { 4725 Value *Iden = nullptr; 4726 bool ScalarPHI = 4727 (State.VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN)); 4728 Type *VecTy = 4729 ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF); 4730 4731 if (RdxDesc) { 4732 assert(Legal->isReductionVariable(P) && StartV && 4733 "RdxDesc should only be set for reduction variables; in that case " 4734 "a StartV is also required"); 4735 RecurKind RK = RdxDesc->getRecurrenceKind(); 4736 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) { 4737 // MinMax reduction have the start value as their identify. 4738 if (ScalarPHI) { 4739 Iden = StartV; 4740 } else { 4741 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4742 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4743 StartV = Iden = 4744 Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident"); 4745 } 4746 } else { 4747 Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity( 4748 RK, VecTy->getScalarType(), RdxDesc->getFastMathFlags()); 4749 Iden = IdenC; 4750 4751 if (!ScalarPHI) { 4752 Iden = ConstantVector::getSplat(State.VF, IdenC); 4753 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4754 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4755 Constant *Zero = Builder.getInt32(0); 4756 StartV = Builder.CreateInsertElement(Iden, StartV, Zero); 4757 } 4758 } 4759 } 4760 4761 bool IsOrdered = State.VF.isVector() && 4762 Cost->isInLoopReduction(cast<PHINode>(PN)) && 4763 useOrderedReductions(*RdxDesc); 4764 4765 for (unsigned Part = 0; Part < State.UF; ++Part) { 4766 // This is phase one of vectorizing PHIs. 4767 if (Part > 0 && IsOrdered) 4768 return; 4769 Value *EntryPart = PHINode::Create( 4770 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4771 State.set(PhiR, EntryPart, Part); 4772 if (StartV) { 4773 // Make sure to add the reduction start value only to the 4774 // first unroll part. 4775 Value *StartVal = (Part == 0) ? StartV : Iden; 4776 cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader); 4777 } 4778 } 4779 return; 4780 } 4781 4782 assert(!Legal->isReductionVariable(P) && 4783 "reductions should be handled above"); 4784 4785 setDebugLocFromInst(Builder, P); 4786 4787 // This PHINode must be an induction variable. 4788 // Make sure that we know about it. 4789 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4790 4791 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4792 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4793 4794 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4795 // which can be found from the original scalar operations. 4796 switch (II.getKind()) { 4797 case InductionDescriptor::IK_NoInduction: 4798 llvm_unreachable("Unknown induction"); 4799 case InductionDescriptor::IK_IntInduction: 4800 case InductionDescriptor::IK_FpInduction: 4801 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4802 case InductionDescriptor::IK_PtrInduction: { 4803 // Handle the pointer induction variable case. 4804 assert(P->getType()->isPointerTy() && "Unexpected type."); 4805 4806 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4807 // This is the normalized GEP that starts counting at zero. 4808 Value *PtrInd = 4809 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4810 // Determine the number of scalars we need to generate for each unroll 4811 // iteration. If the instruction is uniform, we only need to generate the 4812 // first lane. Otherwise, we generate all VF values. 4813 bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); 4814 assert((IsUniform || !VF.isScalable()) && 4815 "Currently unsupported for scalable vectors"); 4816 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 4817 4818 for (unsigned Part = 0; Part < UF; ++Part) { 4819 Value *PartStart = createStepForVF( 4820 Builder, ConstantInt::get(PtrInd->getType(), Part), VF); 4821 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4822 Value *Idx = Builder.CreateAdd( 4823 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 4824 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4825 Value *SclrGep = 4826 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4827 SclrGep->setName("next.gep"); 4828 State.set(PhiR, SclrGep, VPIteration(Part, Lane)); 4829 } 4830 } 4831 return; 4832 } 4833 assert(isa<SCEVConstant>(II.getStep()) && 4834 "Induction step not a SCEV constant!"); 4835 Type *PhiType = II.getStep()->getType(); 4836 4837 // Build a pointer phi 4838 Value *ScalarStartValue = II.getStartValue(); 4839 Type *ScStValueType = ScalarStartValue->getType(); 4840 PHINode *NewPointerPhi = 4841 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4842 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4843 4844 // A pointer induction, performed by using a gep 4845 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4846 Instruction *InductionLoc = LoopLatch->getTerminator(); 4847 const SCEV *ScalarStep = II.getStep(); 4848 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4849 Value *ScalarStepValue = 4850 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4851 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); 4852 Value *NumUnrolledElems = 4853 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 4854 Value *InductionGEP = GetElementPtrInst::Create( 4855 ScStValueType->getPointerElementType(), NewPointerPhi, 4856 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 4857 InductionLoc); 4858 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4859 4860 // Create UF many actual address geps that use the pointer 4861 // phi as base and a vectorized version of the step value 4862 // (<step*0, ..., step*N>) as offset. 4863 for (unsigned Part = 0; Part < State.UF; ++Part) { 4864 Type *VecPhiType = VectorType::get(PhiType, State.VF); 4865 Value *StartOffsetScalar = 4866 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 4867 Value *StartOffset = 4868 Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 4869 // Create a vector of consecutive numbers from zero to VF. 4870 StartOffset = 4871 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); 4872 4873 Value *GEP = Builder.CreateGEP( 4874 ScStValueType->getPointerElementType(), NewPointerPhi, 4875 Builder.CreateMul( 4876 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), 4877 "vector.gep")); 4878 State.set(PhiR, GEP, Part); 4879 } 4880 } 4881 } 4882 } 4883 4884 /// A helper function for checking whether an integer division-related 4885 /// instruction may divide by zero (in which case it must be predicated if 4886 /// executed conditionally in the scalar code). 4887 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4888 /// Non-zero divisors that are non compile-time constants will not be 4889 /// converted into multiplication, so we will still end up scalarizing 4890 /// the division, but can do so w/o predication. 4891 static bool mayDivideByZero(Instruction &I) { 4892 assert((I.getOpcode() == Instruction::UDiv || 4893 I.getOpcode() == Instruction::SDiv || 4894 I.getOpcode() == Instruction::URem || 4895 I.getOpcode() == Instruction::SRem) && 4896 "Unexpected instruction"); 4897 Value *Divisor = I.getOperand(1); 4898 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4899 return !CInt || CInt->isZero(); 4900 } 4901 4902 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def, 4903 VPUser &User, 4904 VPTransformState &State) { 4905 switch (I.getOpcode()) { 4906 case Instruction::Call: 4907 case Instruction::Br: 4908 case Instruction::PHI: 4909 case Instruction::GetElementPtr: 4910 case Instruction::Select: 4911 llvm_unreachable("This instruction is handled by a different recipe."); 4912 case Instruction::UDiv: 4913 case Instruction::SDiv: 4914 case Instruction::SRem: 4915 case Instruction::URem: 4916 case Instruction::Add: 4917 case Instruction::FAdd: 4918 case Instruction::Sub: 4919 case Instruction::FSub: 4920 case Instruction::FNeg: 4921 case Instruction::Mul: 4922 case Instruction::FMul: 4923 case Instruction::FDiv: 4924 case Instruction::FRem: 4925 case Instruction::Shl: 4926 case Instruction::LShr: 4927 case Instruction::AShr: 4928 case Instruction::And: 4929 case Instruction::Or: 4930 case Instruction::Xor: { 4931 // Just widen unops and binops. 4932 setDebugLocFromInst(Builder, &I); 4933 4934 for (unsigned Part = 0; Part < UF; ++Part) { 4935 SmallVector<Value *, 2> Ops; 4936 for (VPValue *VPOp : User.operands()) 4937 Ops.push_back(State.get(VPOp, Part)); 4938 4939 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4940 4941 if (auto *VecOp = dyn_cast<Instruction>(V)) 4942 VecOp->copyIRFlags(&I); 4943 4944 // Use this vector value for all users of the original instruction. 4945 State.set(Def, V, Part); 4946 addMetadata(V, &I); 4947 } 4948 4949 break; 4950 } 4951 case Instruction::ICmp: 4952 case Instruction::FCmp: { 4953 // Widen compares. Generate vector compares. 4954 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4955 auto *Cmp = cast<CmpInst>(&I); 4956 setDebugLocFromInst(Builder, Cmp); 4957 for (unsigned Part = 0; Part < UF; ++Part) { 4958 Value *A = State.get(User.getOperand(0), Part); 4959 Value *B = State.get(User.getOperand(1), Part); 4960 Value *C = nullptr; 4961 if (FCmp) { 4962 // Propagate fast math flags. 4963 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4964 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4965 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4966 } else { 4967 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4968 } 4969 State.set(Def, C, Part); 4970 addMetadata(C, &I); 4971 } 4972 4973 break; 4974 } 4975 4976 case Instruction::ZExt: 4977 case Instruction::SExt: 4978 case Instruction::FPToUI: 4979 case Instruction::FPToSI: 4980 case Instruction::FPExt: 4981 case Instruction::PtrToInt: 4982 case Instruction::IntToPtr: 4983 case Instruction::SIToFP: 4984 case Instruction::UIToFP: 4985 case Instruction::Trunc: 4986 case Instruction::FPTrunc: 4987 case Instruction::BitCast: { 4988 auto *CI = cast<CastInst>(&I); 4989 setDebugLocFromInst(Builder, CI); 4990 4991 /// Vectorize casts. 4992 Type *DestTy = 4993 (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); 4994 4995 for (unsigned Part = 0; Part < UF; ++Part) { 4996 Value *A = State.get(User.getOperand(0), Part); 4997 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4998 State.set(Def, Cast, Part); 4999 addMetadata(Cast, &I); 5000 } 5001 break; 5002 } 5003 default: 5004 // This instruction is not vectorized by simple widening. 5005 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 5006 llvm_unreachable("Unhandled instruction!"); 5007 } // end of switch. 5008 } 5009 5010 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 5011 VPUser &ArgOperands, 5012 VPTransformState &State) { 5013 assert(!isa<DbgInfoIntrinsic>(I) && 5014 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 5015 setDebugLocFromInst(Builder, &I); 5016 5017 Module *M = I.getParent()->getParent()->getParent(); 5018 auto *CI = cast<CallInst>(&I); 5019 5020 SmallVector<Type *, 4> Tys; 5021 for (Value *ArgOperand : CI->arg_operands()) 5022 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 5023 5024 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 5025 5026 // The flag shows whether we use Intrinsic or a usual Call for vectorized 5027 // version of the instruction. 5028 // Is it beneficial to perform intrinsic call compared to lib call? 5029 bool NeedToScalarize = false; 5030 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 5031 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 5032 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 5033 assert((UseVectorIntrinsic || !NeedToScalarize) && 5034 "Instruction should be scalarized elsewhere."); 5035 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 5036 "Either the intrinsic cost or vector call cost must be valid"); 5037 5038 for (unsigned Part = 0; Part < UF; ++Part) { 5039 SmallVector<Value *, 4> Args; 5040 for (auto &I : enumerate(ArgOperands.operands())) { 5041 // Some intrinsics have a scalar argument - don't replace it with a 5042 // vector. 5043 Value *Arg; 5044 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 5045 Arg = State.get(I.value(), Part); 5046 else 5047 Arg = State.get(I.value(), VPIteration(0, 0)); 5048 Args.push_back(Arg); 5049 } 5050 5051 Function *VectorF; 5052 if (UseVectorIntrinsic) { 5053 // Use vector version of the intrinsic. 5054 Type *TysForDecl[] = {CI->getType()}; 5055 if (VF.isVector()) 5056 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 5057 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 5058 assert(VectorF && "Can't retrieve vector intrinsic."); 5059 } else { 5060 // Use vector version of the function call. 5061 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 5062 #ifndef NDEBUG 5063 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 5064 "Can't create vector function."); 5065 #endif 5066 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 5067 } 5068 SmallVector<OperandBundleDef, 1> OpBundles; 5069 CI->getOperandBundlesAsDefs(OpBundles); 5070 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 5071 5072 if (isa<FPMathOperator>(V)) 5073 V->copyFastMathFlags(CI); 5074 5075 State.set(Def, V, Part); 5076 addMetadata(V, &I); 5077 } 5078 } 5079 5080 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, 5081 VPUser &Operands, 5082 bool InvariantCond, 5083 VPTransformState &State) { 5084 setDebugLocFromInst(Builder, &I); 5085 5086 // The condition can be loop invariant but still defined inside the 5087 // loop. This means that we can't just use the original 'cond' value. 5088 // We have to take the 'vectorized' value and pick the first lane. 5089 // Instcombine will make this a no-op. 5090 auto *InvarCond = InvariantCond 5091 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 5092 : nullptr; 5093 5094 for (unsigned Part = 0; Part < UF; ++Part) { 5095 Value *Cond = 5096 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); 5097 Value *Op0 = State.get(Operands.getOperand(1), Part); 5098 Value *Op1 = State.get(Operands.getOperand(2), Part); 5099 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); 5100 State.set(VPDef, Sel, Part); 5101 addMetadata(Sel, &I); 5102 } 5103 } 5104 5105 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 5106 // We should not collect Scalars more than once per VF. Right now, this 5107 // function is called from collectUniformsAndScalars(), which already does 5108 // this check. Collecting Scalars for VF=1 does not make any sense. 5109 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 5110 "This function should not be visited twice for the same VF"); 5111 5112 SmallSetVector<Instruction *, 8> Worklist; 5113 5114 // These sets are used to seed the analysis with pointers used by memory 5115 // accesses that will remain scalar. 5116 SmallSetVector<Instruction *, 8> ScalarPtrs; 5117 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 5118 auto *Latch = TheLoop->getLoopLatch(); 5119 5120 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 5121 // The pointer operands of loads and stores will be scalar as long as the 5122 // memory access is not a gather or scatter operation. The value operand of a 5123 // store will remain scalar if the store is scalarized. 5124 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 5125 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 5126 assert(WideningDecision != CM_Unknown && 5127 "Widening decision should be ready at this moment"); 5128 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 5129 if (Ptr == Store->getValueOperand()) 5130 return WideningDecision == CM_Scalarize; 5131 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 5132 "Ptr is neither a value or pointer operand"); 5133 return WideningDecision != CM_GatherScatter; 5134 }; 5135 5136 // A helper that returns true if the given value is a bitcast or 5137 // getelementptr instruction contained in the loop. 5138 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 5139 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 5140 isa<GetElementPtrInst>(V)) && 5141 !TheLoop->isLoopInvariant(V); 5142 }; 5143 5144 auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { 5145 if (!isa<PHINode>(Ptr) || 5146 !Legal->getInductionVars().count(cast<PHINode>(Ptr))) 5147 return false; 5148 auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; 5149 if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) 5150 return false; 5151 return isScalarUse(MemAccess, Ptr); 5152 }; 5153 5154 // A helper that evaluates a memory access's use of a pointer. If the 5155 // pointer is actually the pointer induction of a loop, it is being 5156 // inserted into Worklist. If the use will be a scalar use, and the 5157 // pointer is only used by memory accesses, we place the pointer in 5158 // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. 5159 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5160 if (isScalarPtrInduction(MemAccess, Ptr)) { 5161 Worklist.insert(cast<Instruction>(Ptr)); 5162 Instruction *Update = cast<Instruction>( 5163 cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); 5164 Worklist.insert(Update); 5165 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr 5166 << "\n"); 5167 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update 5168 << "\n"); 5169 return; 5170 } 5171 // We only care about bitcast and getelementptr instructions contained in 5172 // the loop. 5173 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5174 return; 5175 5176 // If the pointer has already been identified as scalar (e.g., if it was 5177 // also identified as uniform), there's nothing to do. 5178 auto *I = cast<Instruction>(Ptr); 5179 if (Worklist.count(I)) 5180 return; 5181 5182 // If the use of the pointer will be a scalar use, and all users of the 5183 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 5184 // place the pointer in PossibleNonScalarPtrs. 5185 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 5186 return isa<LoadInst>(U) || isa<StoreInst>(U); 5187 })) 5188 ScalarPtrs.insert(I); 5189 else 5190 PossibleNonScalarPtrs.insert(I); 5191 }; 5192 5193 // We seed the scalars analysis with three classes of instructions: (1) 5194 // instructions marked uniform-after-vectorization and (2) bitcast, 5195 // getelementptr and (pointer) phi instructions used by memory accesses 5196 // requiring a scalar use. 5197 // 5198 // (1) Add to the worklist all instructions that have been identified as 5199 // uniform-after-vectorization. 5200 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5201 5202 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5203 // memory accesses requiring a scalar use. The pointer operands of loads and 5204 // stores will be scalar as long as the memory accesses is not a gather or 5205 // scatter operation. The value operand of a store will remain scalar if the 5206 // store is scalarized. 5207 for (auto *BB : TheLoop->blocks()) 5208 for (auto &I : *BB) { 5209 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5210 evaluatePtrUse(Load, Load->getPointerOperand()); 5211 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5212 evaluatePtrUse(Store, Store->getPointerOperand()); 5213 evaluatePtrUse(Store, Store->getValueOperand()); 5214 } 5215 } 5216 for (auto *I : ScalarPtrs) 5217 if (!PossibleNonScalarPtrs.count(I)) { 5218 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5219 Worklist.insert(I); 5220 } 5221 5222 // Insert the forced scalars. 5223 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5224 // induction variable when the PHI user is scalarized. 5225 auto ForcedScalar = ForcedScalars.find(VF); 5226 if (ForcedScalar != ForcedScalars.end()) 5227 for (auto *I : ForcedScalar->second) 5228 Worklist.insert(I); 5229 5230 // Expand the worklist by looking through any bitcasts and getelementptr 5231 // instructions we've already identified as scalar. This is similar to the 5232 // expansion step in collectLoopUniforms(); however, here we're only 5233 // expanding to include additional bitcasts and getelementptr instructions. 5234 unsigned Idx = 0; 5235 while (Idx != Worklist.size()) { 5236 Instruction *Dst = Worklist[Idx++]; 5237 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5238 continue; 5239 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5240 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 5241 auto *J = cast<Instruction>(U); 5242 return !TheLoop->contains(J) || Worklist.count(J) || 5243 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5244 isScalarUse(J, Src)); 5245 })) { 5246 Worklist.insert(Src); 5247 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5248 } 5249 } 5250 5251 // An induction variable will remain scalar if all users of the induction 5252 // variable and induction variable update remain scalar. 5253 for (auto &Induction : Legal->getInductionVars()) { 5254 auto *Ind = Induction.first; 5255 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5256 5257 // If tail-folding is applied, the primary induction variable will be used 5258 // to feed a vector compare. 5259 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 5260 continue; 5261 5262 // Determine if all users of the induction variable are scalar after 5263 // vectorization. 5264 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5265 auto *I = cast<Instruction>(U); 5266 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5267 }); 5268 if (!ScalarInd) 5269 continue; 5270 5271 // Determine if all users of the induction variable update instruction are 5272 // scalar after vectorization. 5273 auto ScalarIndUpdate = 5274 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5275 auto *I = cast<Instruction>(U); 5276 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5277 }); 5278 if (!ScalarIndUpdate) 5279 continue; 5280 5281 // The induction variable and its update instruction will remain scalar. 5282 Worklist.insert(Ind); 5283 Worklist.insert(IndUpdate); 5284 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5285 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 5286 << "\n"); 5287 } 5288 5289 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5290 } 5291 5292 bool LoopVectorizationCostModel::isScalarWithPredication( 5293 Instruction *I, ElementCount VF) const { 5294 if (!blockNeedsPredication(I->getParent())) 5295 return false; 5296 switch(I->getOpcode()) { 5297 default: 5298 break; 5299 case Instruction::Load: 5300 case Instruction::Store: { 5301 if (!Legal->isMaskRequired(I)) 5302 return false; 5303 auto *Ptr = getLoadStorePointerOperand(I); 5304 auto *Ty = getMemInstValueType(I); 5305 // We have already decided how to vectorize this instruction, get that 5306 // result. 5307 if (VF.isVector()) { 5308 InstWidening WideningDecision = getWideningDecision(I, VF); 5309 assert(WideningDecision != CM_Unknown && 5310 "Widening decision should be ready at this moment"); 5311 return WideningDecision == CM_Scalarize; 5312 } 5313 const Align Alignment = getLoadStoreAlignment(I); 5314 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 5315 isLegalMaskedGather(Ty, Alignment)) 5316 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 5317 isLegalMaskedScatter(Ty, Alignment)); 5318 } 5319 case Instruction::UDiv: 5320 case Instruction::SDiv: 5321 case Instruction::SRem: 5322 case Instruction::URem: 5323 return mayDivideByZero(*I); 5324 } 5325 return false; 5326 } 5327 5328 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5329 Instruction *I, ElementCount VF) { 5330 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5331 assert(getWideningDecision(I, VF) == CM_Unknown && 5332 "Decision should not be set yet."); 5333 auto *Group = getInterleavedAccessGroup(I); 5334 assert(Group && "Must have a group."); 5335 5336 // If the instruction's allocated size doesn't equal it's type size, it 5337 // requires padding and will be scalarized. 5338 auto &DL = I->getModule()->getDataLayout(); 5339 auto *ScalarTy = getMemInstValueType(I); 5340 if (hasIrregularType(ScalarTy, DL)) 5341 return false; 5342 5343 // Check if masking is required. 5344 // A Group may need masking for one of two reasons: it resides in a block that 5345 // needs predication, or it was decided to use masking to deal with gaps. 5346 bool PredicatedAccessRequiresMasking = 5347 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 5348 bool AccessWithGapsRequiresMasking = 5349 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 5350 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 5351 return true; 5352 5353 // If masked interleaving is required, we expect that the user/target had 5354 // enabled it, because otherwise it either wouldn't have been created or 5355 // it should have been invalidated by the CostModel. 5356 assert(useMaskedInterleavedAccesses(TTI) && 5357 "Masked interleave-groups for predicated accesses are not enabled."); 5358 5359 auto *Ty = getMemInstValueType(I); 5360 const Align Alignment = getLoadStoreAlignment(I); 5361 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5362 : TTI.isLegalMaskedStore(Ty, Alignment); 5363 } 5364 5365 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5366 Instruction *I, ElementCount VF) { 5367 // Get and ensure we have a valid memory instruction. 5368 LoadInst *LI = dyn_cast<LoadInst>(I); 5369 StoreInst *SI = dyn_cast<StoreInst>(I); 5370 assert((LI || SI) && "Invalid memory instruction"); 5371 5372 auto *Ptr = getLoadStorePointerOperand(I); 5373 5374 // In order to be widened, the pointer should be consecutive, first of all. 5375 if (!Legal->isConsecutivePtr(Ptr)) 5376 return false; 5377 5378 // If the instruction is a store located in a predicated block, it will be 5379 // scalarized. 5380 if (isScalarWithPredication(I)) 5381 return false; 5382 5383 // If the instruction's allocated size doesn't equal it's type size, it 5384 // requires padding and will be scalarized. 5385 auto &DL = I->getModule()->getDataLayout(); 5386 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5387 if (hasIrregularType(ScalarTy, DL)) 5388 return false; 5389 5390 return true; 5391 } 5392 5393 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5394 // We should not collect Uniforms more than once per VF. Right now, 5395 // this function is called from collectUniformsAndScalars(), which 5396 // already does this check. Collecting Uniforms for VF=1 does not make any 5397 // sense. 5398 5399 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5400 "This function should not be visited twice for the same VF"); 5401 5402 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5403 // not analyze again. Uniforms.count(VF) will return 1. 5404 Uniforms[VF].clear(); 5405 5406 // We now know that the loop is vectorizable! 5407 // Collect instructions inside the loop that will remain uniform after 5408 // vectorization. 5409 5410 // Global values, params and instructions outside of current loop are out of 5411 // scope. 5412 auto isOutOfScope = [&](Value *V) -> bool { 5413 Instruction *I = dyn_cast<Instruction>(V); 5414 return (!I || !TheLoop->contains(I)); 5415 }; 5416 5417 SetVector<Instruction *> Worklist; 5418 BasicBlock *Latch = TheLoop->getLoopLatch(); 5419 5420 // Instructions that are scalar with predication must not be considered 5421 // uniform after vectorization, because that would create an erroneous 5422 // replicating region where only a single instance out of VF should be formed. 5423 // TODO: optimize such seldom cases if found important, see PR40816. 5424 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5425 if (isOutOfScope(I)) { 5426 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5427 << *I << "\n"); 5428 return; 5429 } 5430 if (isScalarWithPredication(I, VF)) { 5431 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5432 << *I << "\n"); 5433 return; 5434 } 5435 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5436 Worklist.insert(I); 5437 }; 5438 5439 // Start with the conditional branch. If the branch condition is an 5440 // instruction contained in the loop that is only used by the branch, it is 5441 // uniform. 5442 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5443 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5444 addToWorklistIfAllowed(Cmp); 5445 5446 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5447 InstWidening WideningDecision = getWideningDecision(I, VF); 5448 assert(WideningDecision != CM_Unknown && 5449 "Widening decision should be ready at this moment"); 5450 5451 // A uniform memory op is itself uniform. We exclude uniform stores 5452 // here as they demand the last lane, not the first one. 5453 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5454 assert(WideningDecision == CM_Scalarize); 5455 return true; 5456 } 5457 5458 return (WideningDecision == CM_Widen || 5459 WideningDecision == CM_Widen_Reverse || 5460 WideningDecision == CM_Interleave); 5461 }; 5462 5463 5464 // Returns true if Ptr is the pointer operand of a memory access instruction 5465 // I, and I is known to not require scalarization. 5466 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5467 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5468 }; 5469 5470 // Holds a list of values which are known to have at least one uniform use. 5471 // Note that there may be other uses which aren't uniform. A "uniform use" 5472 // here is something which only demands lane 0 of the unrolled iterations; 5473 // it does not imply that all lanes produce the same value (e.g. this is not 5474 // the usual meaning of uniform) 5475 SetVector<Value *> HasUniformUse; 5476 5477 // Scan the loop for instructions which are either a) known to have only 5478 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5479 for (auto *BB : TheLoop->blocks()) 5480 for (auto &I : *BB) { 5481 // If there's no pointer operand, there's nothing to do. 5482 auto *Ptr = getLoadStorePointerOperand(&I); 5483 if (!Ptr) 5484 continue; 5485 5486 // A uniform memory op is itself uniform. We exclude uniform stores 5487 // here as they demand the last lane, not the first one. 5488 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5489 addToWorklistIfAllowed(&I); 5490 5491 if (isUniformDecision(&I, VF)) { 5492 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5493 HasUniformUse.insert(Ptr); 5494 } 5495 } 5496 5497 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5498 // demanding) users. Since loops are assumed to be in LCSSA form, this 5499 // disallows uses outside the loop as well. 5500 for (auto *V : HasUniformUse) { 5501 if (isOutOfScope(V)) 5502 continue; 5503 auto *I = cast<Instruction>(V); 5504 auto UsersAreMemAccesses = 5505 llvm::all_of(I->users(), [&](User *U) -> bool { 5506 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5507 }); 5508 if (UsersAreMemAccesses) 5509 addToWorklistIfAllowed(I); 5510 } 5511 5512 // Expand Worklist in topological order: whenever a new instruction 5513 // is added , its users should be already inside Worklist. It ensures 5514 // a uniform instruction will only be used by uniform instructions. 5515 unsigned idx = 0; 5516 while (idx != Worklist.size()) { 5517 Instruction *I = Worklist[idx++]; 5518 5519 for (auto OV : I->operand_values()) { 5520 // isOutOfScope operands cannot be uniform instructions. 5521 if (isOutOfScope(OV)) 5522 continue; 5523 // First order recurrence Phi's should typically be considered 5524 // non-uniform. 5525 auto *OP = dyn_cast<PHINode>(OV); 5526 if (OP && Legal->isFirstOrderRecurrence(OP)) 5527 continue; 5528 // If all the users of the operand are uniform, then add the 5529 // operand into the uniform worklist. 5530 auto *OI = cast<Instruction>(OV); 5531 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5532 auto *J = cast<Instruction>(U); 5533 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5534 })) 5535 addToWorklistIfAllowed(OI); 5536 } 5537 } 5538 5539 // For an instruction to be added into Worklist above, all its users inside 5540 // the loop should also be in Worklist. However, this condition cannot be 5541 // true for phi nodes that form a cyclic dependence. We must process phi 5542 // nodes separately. An induction variable will remain uniform if all users 5543 // of the induction variable and induction variable update remain uniform. 5544 // The code below handles both pointer and non-pointer induction variables. 5545 for (auto &Induction : Legal->getInductionVars()) { 5546 auto *Ind = Induction.first; 5547 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5548 5549 // Determine if all users of the induction variable are uniform after 5550 // vectorization. 5551 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5552 auto *I = cast<Instruction>(U); 5553 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5554 isVectorizedMemAccessUse(I, Ind); 5555 }); 5556 if (!UniformInd) 5557 continue; 5558 5559 // Determine if all users of the induction variable update instruction are 5560 // uniform after vectorization. 5561 auto UniformIndUpdate = 5562 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5563 auto *I = cast<Instruction>(U); 5564 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5565 isVectorizedMemAccessUse(I, IndUpdate); 5566 }); 5567 if (!UniformIndUpdate) 5568 continue; 5569 5570 // The induction variable and its update instruction will remain uniform. 5571 addToWorklistIfAllowed(Ind); 5572 addToWorklistIfAllowed(IndUpdate); 5573 } 5574 5575 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5576 } 5577 5578 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5579 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5580 5581 if (Legal->getRuntimePointerChecking()->Need) { 5582 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5583 "runtime pointer checks needed. Enable vectorization of this " 5584 "loop with '#pragma clang loop vectorize(enable)' when " 5585 "compiling with -Os/-Oz", 5586 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5587 return true; 5588 } 5589 5590 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5591 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5592 "runtime SCEV checks needed. Enable vectorization of this " 5593 "loop with '#pragma clang loop vectorize(enable)' when " 5594 "compiling with -Os/-Oz", 5595 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5596 return true; 5597 } 5598 5599 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5600 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5601 reportVectorizationFailure("Runtime stride check for small trip count", 5602 "runtime stride == 1 checks needed. Enable vectorization of " 5603 "this loop without such check by compiling with -Os/-Oz", 5604 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5605 return true; 5606 } 5607 5608 return false; 5609 } 5610 5611 ElementCount 5612 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 5613 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 5614 reportVectorizationInfo( 5615 "Disabling scalable vectorization, because target does not " 5616 "support scalable vectors.", 5617 "ScalableVectorsUnsupported", ORE, TheLoop); 5618 return ElementCount::getScalable(0); 5619 } 5620 5621 auto MaxScalableVF = ElementCount::getScalable( 5622 std::numeric_limits<ElementCount::ScalarTy>::max()); 5623 5624 // Disable scalable vectorization if the loop contains unsupported reductions. 5625 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 5626 // FIXME: While for scalable vectors this is currently sufficient, this should 5627 // be replaced by a more detailed mechanism that filters out specific VFs, 5628 // instead of invalidating vectorization for a whole set of VFs based on the 5629 // MaxVF. 5630 if (!canVectorizeReductions(MaxScalableVF)) { 5631 reportVectorizationInfo( 5632 "Scalable vectorization not supported for the reduction " 5633 "operations found in this loop.", 5634 "ScalableVFUnfeasible", ORE, TheLoop); 5635 return ElementCount::getScalable(0); 5636 } 5637 5638 if (Legal->isSafeForAnyVectorWidth()) 5639 return MaxScalableVF; 5640 5641 // Limit MaxScalableVF by the maximum safe dependence distance. 5642 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5643 MaxScalableVF = ElementCount::getScalable( 5644 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5645 if (!MaxScalableVF) 5646 reportVectorizationInfo( 5647 "Max legal vector width too small, scalable vectorization " 5648 "unfeasible.", 5649 "ScalableVFUnfeasible", ORE, TheLoop); 5650 5651 return MaxScalableVF; 5652 } 5653 5654 ElementCount 5655 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5656 ElementCount UserVF) { 5657 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5658 unsigned SmallestType, WidestType; 5659 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5660 5661 // Get the maximum safe dependence distance in bits computed by LAA. 5662 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5663 // the memory accesses that is most restrictive (involved in the smallest 5664 // dependence distance). 5665 unsigned MaxSafeElements = 5666 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 5667 5668 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 5669 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 5670 5671 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 5672 << ".\n"); 5673 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 5674 << ".\n"); 5675 5676 // First analyze the UserVF, fall back if the UserVF should be ignored. 5677 if (UserVF) { 5678 auto MaxSafeUserVF = 5679 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 5680 5681 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) 5682 return UserVF; 5683 5684 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 5685 5686 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 5687 // is better to ignore the hint and let the compiler choose a suitable VF. 5688 if (!UserVF.isScalable()) { 5689 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5690 << " is unsafe, clamping to max safe VF=" 5691 << MaxSafeFixedVF << ".\n"); 5692 ORE->emit([&]() { 5693 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5694 TheLoop->getStartLoc(), 5695 TheLoop->getHeader()) 5696 << "User-specified vectorization factor " 5697 << ore::NV("UserVectorizationFactor", UserVF) 5698 << " is unsafe, clamping to maximum safe vectorization factor " 5699 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 5700 }); 5701 return MaxSafeFixedVF; 5702 } 5703 5704 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5705 << " is unsafe. Ignoring scalable UserVF.\n"); 5706 ORE->emit([&]() { 5707 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5708 TheLoop->getStartLoc(), 5709 TheLoop->getHeader()) 5710 << "User-specified vectorization factor " 5711 << ore::NV("UserVectorizationFactor", UserVF) 5712 << " is unsafe. Ignoring the hint to let the compiler pick a " 5713 "suitable VF."; 5714 }); 5715 } 5716 5717 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5718 << " / " << WidestType << " bits.\n"); 5719 5720 ElementCount MaxFixedVF = ElementCount::getFixed(1); 5721 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5722 WidestType, MaxSafeFixedVF)) 5723 MaxFixedVF = MaxVF; 5724 5725 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5726 WidestType, MaxSafeScalableVF)) 5727 // FIXME: Return scalable VF as well (to be added in future patch). 5728 if (MaxVF.isScalable()) 5729 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5730 << "\n"); 5731 5732 return MaxFixedVF; 5733 } 5734 5735 Optional<ElementCount> 5736 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5737 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5738 // TODO: It may by useful to do since it's still likely to be dynamically 5739 // uniform if the target can skip. 5740 reportVectorizationFailure( 5741 "Not inserting runtime ptr check for divergent target", 5742 "runtime pointer checks needed. Not enabled for divergent target", 5743 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5744 return None; 5745 } 5746 5747 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5748 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5749 if (TC == 1) { 5750 reportVectorizationFailure("Single iteration (non) loop", 5751 "loop trip count is one, irrelevant for vectorization", 5752 "SingleIterationLoop", ORE, TheLoop); 5753 return None; 5754 } 5755 5756 switch (ScalarEpilogueStatus) { 5757 case CM_ScalarEpilogueAllowed: 5758 return computeFeasibleMaxVF(TC, UserVF); 5759 case CM_ScalarEpilogueNotAllowedUsePredicate: 5760 LLVM_FALLTHROUGH; 5761 case CM_ScalarEpilogueNotNeededUsePredicate: 5762 LLVM_DEBUG( 5763 dbgs() << "LV: vector predicate hint/switch found.\n" 5764 << "LV: Not allowing scalar epilogue, creating predicated " 5765 << "vector loop.\n"); 5766 break; 5767 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5768 // fallthrough as a special case of OptForSize 5769 case CM_ScalarEpilogueNotAllowedOptSize: 5770 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5771 LLVM_DEBUG( 5772 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5773 else 5774 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5775 << "count.\n"); 5776 5777 // Bail if runtime checks are required, which are not good when optimising 5778 // for size. 5779 if (runtimeChecksRequired()) 5780 return None; 5781 5782 break; 5783 } 5784 5785 // The only loops we can vectorize without a scalar epilogue, are loops with 5786 // a bottom-test and a single exiting block. We'd have to handle the fact 5787 // that not every instruction executes on the last iteration. This will 5788 // require a lane mask which varies through the vector loop body. (TODO) 5789 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5790 // If there was a tail-folding hint/switch, but we can't fold the tail by 5791 // masking, fallback to a vectorization with a scalar epilogue. 5792 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5793 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5794 "scalar epilogue instead.\n"); 5795 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5796 return computeFeasibleMaxVF(TC, UserVF); 5797 } 5798 return None; 5799 } 5800 5801 // Now try the tail folding 5802 5803 // Invalidate interleave groups that require an epilogue if we can't mask 5804 // the interleave-group. 5805 if (!useMaskedInterleavedAccesses(TTI)) { 5806 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5807 "No decisions should have been taken at this point"); 5808 // Note: There is no need to invalidate any cost modeling decisions here, as 5809 // non where taken so far. 5810 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5811 } 5812 5813 ElementCount MaxVF = computeFeasibleMaxVF(TC, UserVF); 5814 assert(!MaxVF.isScalable() && 5815 "Scalable vectors do not yet support tail folding"); 5816 assert((UserVF.isNonZero() || isPowerOf2_32(MaxVF.getFixedValue())) && 5817 "MaxVF must be a power of 2"); 5818 unsigned MaxVFtimesIC = 5819 UserIC ? MaxVF.getFixedValue() * UserIC : MaxVF.getFixedValue(); 5820 // Avoid tail folding if the trip count is known to be a multiple of any VF we 5821 // chose. 5822 ScalarEvolution *SE = PSE.getSE(); 5823 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5824 const SCEV *ExitCount = SE->getAddExpr( 5825 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5826 const SCEV *Rem = SE->getURemExpr( 5827 SE->applyLoopGuards(ExitCount, TheLoop), 5828 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5829 if (Rem->isZero()) { 5830 // Accept MaxVF if we do not have a tail. 5831 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5832 return MaxVF; 5833 } 5834 5835 // If we don't know the precise trip count, or if the trip count that we 5836 // found modulo the vectorization factor is not zero, try to fold the tail 5837 // by masking. 5838 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5839 if (Legal->prepareToFoldTailByMasking()) { 5840 FoldTailByMasking = true; 5841 return MaxVF; 5842 } 5843 5844 // If there was a tail-folding hint/switch, but we can't fold the tail by 5845 // masking, fallback to a vectorization with a scalar epilogue. 5846 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5847 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5848 "scalar epilogue instead.\n"); 5849 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5850 return MaxVF; 5851 } 5852 5853 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5854 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5855 return None; 5856 } 5857 5858 if (TC == 0) { 5859 reportVectorizationFailure( 5860 "Unable to calculate the loop count due to complex control flow", 5861 "unable to calculate the loop count due to complex control flow", 5862 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5863 return None; 5864 } 5865 5866 reportVectorizationFailure( 5867 "Cannot optimize for size and vectorize at the same time.", 5868 "cannot optimize for size and vectorize at the same time. " 5869 "Enable vectorization of this loop with '#pragma clang loop " 5870 "vectorize(enable)' when compiling with -Os/-Oz", 5871 "NoTailLoopWithOptForSize", ORE, TheLoop); 5872 return None; 5873 } 5874 5875 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5876 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5877 const ElementCount &MaxSafeVF) { 5878 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5879 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5880 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5881 : TargetTransformInfo::RGK_FixedWidthVector); 5882 5883 // Convenience function to return the minimum of two ElementCounts. 5884 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5885 assert((LHS.isScalable() == RHS.isScalable()) && 5886 "Scalable flags must match"); 5887 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5888 }; 5889 5890 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5891 // Note that both WidestRegister and WidestType may not be a powers of 2. 5892 auto MaxVectorElementCount = ElementCount::get( 5893 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5894 ComputeScalableMaxVF); 5895 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5896 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5897 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5898 5899 if (!MaxVectorElementCount) { 5900 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5901 return ElementCount::getFixed(1); 5902 } 5903 5904 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5905 if (ConstTripCount && 5906 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5907 isPowerOf2_32(ConstTripCount)) { 5908 // We need to clamp the VF to be the ConstTripCount. There is no point in 5909 // choosing a higher viable VF as done in the loop below. If 5910 // MaxVectorElementCount is scalable, we only fall back on a fixed VF when 5911 // the TC is less than or equal to the known number of lanes. 5912 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5913 << ConstTripCount << "\n"); 5914 return TripCountEC; 5915 } 5916 5917 ElementCount MaxVF = MaxVectorElementCount; 5918 if (TTI.shouldMaximizeVectorBandwidth() || 5919 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5920 auto MaxVectorElementCountMaxBW = ElementCount::get( 5921 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5922 ComputeScalableMaxVF); 5923 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5924 5925 // Collect all viable vectorization factors larger than the default MaxVF 5926 // (i.e. MaxVectorElementCount). 5927 SmallVector<ElementCount, 8> VFs; 5928 for (ElementCount VS = MaxVectorElementCount * 2; 5929 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5930 VFs.push_back(VS); 5931 5932 // For each VF calculate its register usage. 5933 auto RUs = calculateRegisterUsage(VFs); 5934 5935 // Select the largest VF which doesn't require more registers than existing 5936 // ones. 5937 for (int i = RUs.size() - 1; i >= 0; --i) { 5938 bool Selected = true; 5939 for (auto &pair : RUs[i].MaxLocalUsers) { 5940 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5941 if (pair.second > TargetNumRegisters) 5942 Selected = false; 5943 } 5944 if (Selected) { 5945 MaxVF = VFs[i]; 5946 break; 5947 } 5948 } 5949 if (ElementCount MinVF = 5950 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5951 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5952 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5953 << ") with target's minimum: " << MinVF << '\n'); 5954 MaxVF = MinVF; 5955 } 5956 } 5957 } 5958 return MaxVF; 5959 } 5960 5961 bool LoopVectorizationCostModel::isMoreProfitable( 5962 const VectorizationFactor &A, const VectorizationFactor &B) const { 5963 InstructionCost::CostType CostA = *A.Cost.getValue(); 5964 InstructionCost::CostType CostB = *B.Cost.getValue(); 5965 5966 // To avoid the need for FP division: 5967 // (CostA / A.Width) < (CostB / B.Width) 5968 // <=> (CostA * B.Width) < (CostB * A.Width) 5969 return (CostA * B.Width.getKnownMinValue()) < 5970 (CostB * A.Width.getKnownMinValue()); 5971 } 5972 5973 VectorizationFactor 5974 LoopVectorizationCostModel::selectVectorizationFactor(ElementCount MaxVF) { 5975 // FIXME: This can be fixed for scalable vectors later, because at this stage 5976 // the LoopVectorizer will only consider vectorizing a loop with scalable 5977 // vectors when the loop has a hint to enable vectorization for a given VF. 5978 assert(!MaxVF.isScalable() && "scalable vectors not yet supported"); 5979 5980 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5981 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5982 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5983 5984 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 5985 VectorizationFactor ChosenFactor = ScalarCost; 5986 5987 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5988 if (ForceVectorization && MaxVF.isVector()) { 5989 // Ignore scalar width, because the user explicitly wants vectorization. 5990 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5991 // evaluation. 5992 ChosenFactor.Cost = std::numeric_limits<InstructionCost::CostType>::max(); 5993 } 5994 5995 for (auto i = ElementCount::getFixed(2); ElementCount::isKnownLE(i, MaxVF); 5996 i *= 2) { 5997 // Notice that the vector loop needs to be executed less times, so 5998 // we need to divide the cost of the vector loops by the width of 5999 // the vector elements. 6000 VectorizationCostTy C = expectedCost(i); 6001 6002 assert(C.first.isValid() && "Unexpected invalid cost for vector loop"); 6003 VectorizationFactor Candidate(i, C.first); 6004 LLVM_DEBUG( 6005 dbgs() << "LV: Vector loop of width " << i << " costs: " 6006 << (*Candidate.Cost.getValue() / Candidate.Width.getFixedValue()) 6007 << ".\n"); 6008 6009 if (!C.second && !ForceVectorization) { 6010 LLVM_DEBUG( 6011 dbgs() << "LV: Not considering vector loop of width " << i 6012 << " because it will not generate any vector instructions.\n"); 6013 continue; 6014 } 6015 6016 // If profitable add it to ProfitableVF list. 6017 if (isMoreProfitable(Candidate, ScalarCost)) 6018 ProfitableVFs.push_back(Candidate); 6019 6020 if (isMoreProfitable(Candidate, ChosenFactor)) 6021 ChosenFactor = Candidate; 6022 } 6023 6024 if (!EnableCondStoresVectorization && NumPredStores) { 6025 reportVectorizationFailure("There are conditional stores.", 6026 "store that is conditionally executed prevents vectorization", 6027 "ConditionalStore", ORE, TheLoop); 6028 ChosenFactor = ScalarCost; 6029 } 6030 6031 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 6032 *ChosenFactor.Cost.getValue() >= *ScalarCost.Cost.getValue()) 6033 dbgs() 6034 << "LV: Vectorization seems to be not beneficial, " 6035 << "but was forced by a user.\n"); 6036 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 6037 return ChosenFactor; 6038 } 6039 6040 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 6041 const Loop &L, ElementCount VF) const { 6042 // Cross iteration phis such as reductions need special handling and are 6043 // currently unsupported. 6044 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 6045 return Legal->isFirstOrderRecurrence(&Phi) || 6046 Legal->isReductionVariable(&Phi); 6047 })) 6048 return false; 6049 6050 // Phis with uses outside of the loop require special handling and are 6051 // currently unsupported. 6052 for (auto &Entry : Legal->getInductionVars()) { 6053 // Look for uses of the value of the induction at the last iteration. 6054 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 6055 for (User *U : PostInc->users()) 6056 if (!L.contains(cast<Instruction>(U))) 6057 return false; 6058 // Look for uses of penultimate value of the induction. 6059 for (User *U : Entry.first->users()) 6060 if (!L.contains(cast<Instruction>(U))) 6061 return false; 6062 } 6063 6064 // Induction variables that are widened require special handling that is 6065 // currently not supported. 6066 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 6067 return !(this->isScalarAfterVectorization(Entry.first, VF) || 6068 this->isProfitableToScalarize(Entry.first, VF)); 6069 })) 6070 return false; 6071 6072 return true; 6073 } 6074 6075 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 6076 const ElementCount VF) const { 6077 // FIXME: We need a much better cost-model to take different parameters such 6078 // as register pressure, code size increase and cost of extra branches into 6079 // account. For now we apply a very crude heuristic and only consider loops 6080 // with vectorization factors larger than a certain value. 6081 // We also consider epilogue vectorization unprofitable for targets that don't 6082 // consider interleaving beneficial (eg. MVE). 6083 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 6084 return false; 6085 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 6086 return true; 6087 return false; 6088 } 6089 6090 VectorizationFactor 6091 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 6092 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 6093 VectorizationFactor Result = VectorizationFactor::Disabled(); 6094 if (!EnableEpilogueVectorization) { 6095 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 6096 return Result; 6097 } 6098 6099 if (!isScalarEpilogueAllowed()) { 6100 LLVM_DEBUG( 6101 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 6102 "allowed.\n";); 6103 return Result; 6104 } 6105 6106 // FIXME: This can be fixed for scalable vectors later, because at this stage 6107 // the LoopVectorizer will only consider vectorizing a loop with scalable 6108 // vectors when the loop has a hint to enable vectorization for a given VF. 6109 if (MainLoopVF.isScalable()) { 6110 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not " 6111 "yet supported.\n"); 6112 return Result; 6113 } 6114 6115 // Not really a cost consideration, but check for unsupported cases here to 6116 // simplify the logic. 6117 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 6118 LLVM_DEBUG( 6119 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 6120 "not a supported candidate.\n";); 6121 return Result; 6122 } 6123 6124 if (EpilogueVectorizationForceVF > 1) { 6125 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 6126 if (LVP.hasPlanWithVFs( 6127 {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)})) 6128 return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0}; 6129 else { 6130 LLVM_DEBUG( 6131 dbgs() 6132 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 6133 return Result; 6134 } 6135 } 6136 6137 if (TheLoop->getHeader()->getParent()->hasOptSize() || 6138 TheLoop->getHeader()->getParent()->hasMinSize()) { 6139 LLVM_DEBUG( 6140 dbgs() 6141 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 6142 return Result; 6143 } 6144 6145 if (!isEpilogueVectorizationProfitable(MainLoopVF)) 6146 return Result; 6147 6148 for (auto &NextVF : ProfitableVFs) 6149 if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) && 6150 (Result.Width.getFixedValue() == 1 || 6151 isMoreProfitable(NextVF, Result)) && 6152 LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width})) 6153 Result = NextVF; 6154 6155 if (Result != VectorizationFactor::Disabled()) 6156 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 6157 << Result.Width.getFixedValue() << "\n";); 6158 return Result; 6159 } 6160 6161 std::pair<unsigned, unsigned> 6162 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6163 unsigned MinWidth = -1U; 6164 unsigned MaxWidth = 8; 6165 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6166 6167 // For each block. 6168 for (BasicBlock *BB : TheLoop->blocks()) { 6169 // For each instruction in the loop. 6170 for (Instruction &I : BB->instructionsWithoutDebug()) { 6171 Type *T = I.getType(); 6172 6173 // Skip ignored values. 6174 if (ValuesToIgnore.count(&I)) 6175 continue; 6176 6177 // Only examine Loads, Stores and PHINodes. 6178 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6179 continue; 6180 6181 // Examine PHI nodes that are reduction variables. Update the type to 6182 // account for the recurrence type. 6183 if (auto *PN = dyn_cast<PHINode>(&I)) { 6184 if (!Legal->isReductionVariable(PN)) 6185 continue; 6186 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN]; 6187 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 6188 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 6189 RdxDesc.getRecurrenceType(), 6190 TargetTransformInfo::ReductionFlags())) 6191 continue; 6192 T = RdxDesc.getRecurrenceType(); 6193 } 6194 6195 // Examine the stored values. 6196 if (auto *ST = dyn_cast<StoreInst>(&I)) 6197 T = ST->getValueOperand()->getType(); 6198 6199 // Ignore loaded pointer types and stored pointer types that are not 6200 // vectorizable. 6201 // 6202 // FIXME: The check here attempts to predict whether a load or store will 6203 // be vectorized. We only know this for certain after a VF has 6204 // been selected. Here, we assume that if an access can be 6205 // vectorized, it will be. We should also look at extending this 6206 // optimization to non-pointer types. 6207 // 6208 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6209 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 6210 continue; 6211 6212 MinWidth = std::min(MinWidth, 6213 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6214 MaxWidth = std::max(MaxWidth, 6215 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6216 } 6217 } 6218 6219 return {MinWidth, MaxWidth}; 6220 } 6221 6222 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 6223 unsigned LoopCost) { 6224 // -- The interleave heuristics -- 6225 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6226 // There are many micro-architectural considerations that we can't predict 6227 // at this level. For example, frontend pressure (on decode or fetch) due to 6228 // code size, or the number and capabilities of the execution ports. 6229 // 6230 // We use the following heuristics to select the interleave count: 6231 // 1. If the code has reductions, then we interleave to break the cross 6232 // iteration dependency. 6233 // 2. If the loop is really small, then we interleave to reduce the loop 6234 // overhead. 6235 // 3. We don't interleave if we think that we will spill registers to memory 6236 // due to the increased register pressure. 6237 6238 if (!isScalarEpilogueAllowed()) 6239 return 1; 6240 6241 // We used the distance for the interleave count. 6242 if (Legal->getMaxSafeDepDistBytes() != -1U) 6243 return 1; 6244 6245 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6246 const bool HasReductions = !Legal->getReductionVars().empty(); 6247 // Do not interleave loops with a relatively small known or estimated trip 6248 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6249 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6250 // because with the above conditions interleaving can expose ILP and break 6251 // cross iteration dependences for reductions. 6252 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6253 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6254 return 1; 6255 6256 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6257 // We divide by these constants so assume that we have at least one 6258 // instruction that uses at least one register. 6259 for (auto& pair : R.MaxLocalUsers) { 6260 pair.second = std::max(pair.second, 1U); 6261 } 6262 6263 // We calculate the interleave count using the following formula. 6264 // Subtract the number of loop invariants from the number of available 6265 // registers. These registers are used by all of the interleaved instances. 6266 // Next, divide the remaining registers by the number of registers that is 6267 // required by the loop, in order to estimate how many parallel instances 6268 // fit without causing spills. All of this is rounded down if necessary to be 6269 // a power of two. We want power of two interleave count to simplify any 6270 // addressing operations or alignment considerations. 6271 // We also want power of two interleave counts to ensure that the induction 6272 // variable of the vector loop wraps to zero, when tail is folded by masking; 6273 // this currently happens when OptForSize, in which case IC is set to 1 above. 6274 unsigned IC = UINT_MAX; 6275 6276 for (auto& pair : R.MaxLocalUsers) { 6277 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6278 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6279 << " registers of " 6280 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6281 if (VF.isScalar()) { 6282 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6283 TargetNumRegisters = ForceTargetNumScalarRegs; 6284 } else { 6285 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6286 TargetNumRegisters = ForceTargetNumVectorRegs; 6287 } 6288 unsigned MaxLocalUsers = pair.second; 6289 unsigned LoopInvariantRegs = 0; 6290 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6291 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6292 6293 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6294 // Don't count the induction variable as interleaved. 6295 if (EnableIndVarRegisterHeur) { 6296 TmpIC = 6297 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6298 std::max(1U, (MaxLocalUsers - 1))); 6299 } 6300 6301 IC = std::min(IC, TmpIC); 6302 } 6303 6304 // Clamp the interleave ranges to reasonable counts. 6305 unsigned MaxInterleaveCount = 6306 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6307 6308 // Check if the user has overridden the max. 6309 if (VF.isScalar()) { 6310 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6311 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6312 } else { 6313 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6314 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6315 } 6316 6317 // If trip count is known or estimated compile time constant, limit the 6318 // interleave count to be less than the trip count divided by VF, provided it 6319 // is at least 1. 6320 // 6321 // For scalable vectors we can't know if interleaving is beneficial. It may 6322 // not be beneficial for small loops if none of the lanes in the second vector 6323 // iterations is enabled. However, for larger loops, there is likely to be a 6324 // similar benefit as for fixed-width vectors. For now, we choose to leave 6325 // the InterleaveCount as if vscale is '1', although if some information about 6326 // the vector is known (e.g. min vector size), we can make a better decision. 6327 if (BestKnownTC) { 6328 MaxInterleaveCount = 6329 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6330 // Make sure MaxInterleaveCount is greater than 0. 6331 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6332 } 6333 6334 assert(MaxInterleaveCount > 0 && 6335 "Maximum interleave count must be greater than 0"); 6336 6337 // Clamp the calculated IC to be between the 1 and the max interleave count 6338 // that the target and trip count allows. 6339 if (IC > MaxInterleaveCount) 6340 IC = MaxInterleaveCount; 6341 else 6342 // Make sure IC is greater than 0. 6343 IC = std::max(1u, IC); 6344 6345 assert(IC > 0 && "Interleave count must be greater than 0."); 6346 6347 // If we did not calculate the cost for VF (because the user selected the VF) 6348 // then we calculate the cost of VF here. 6349 if (LoopCost == 0) { 6350 assert(expectedCost(VF).first.isValid() && "Expected a valid cost"); 6351 LoopCost = *expectedCost(VF).first.getValue(); 6352 } 6353 6354 assert(LoopCost && "Non-zero loop cost expected"); 6355 6356 // Interleave if we vectorized this loop and there is a reduction that could 6357 // benefit from interleaving. 6358 if (VF.isVector() && HasReductions) { 6359 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6360 return IC; 6361 } 6362 6363 // Note that if we've already vectorized the loop we will have done the 6364 // runtime check and so interleaving won't require further checks. 6365 bool InterleavingRequiresRuntimePointerCheck = 6366 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6367 6368 // We want to interleave small loops in order to reduce the loop overhead and 6369 // potentially expose ILP opportunities. 6370 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6371 << "LV: IC is " << IC << '\n' 6372 << "LV: VF is " << VF << '\n'); 6373 const bool AggressivelyInterleaveReductions = 6374 TTI.enableAggressiveInterleaving(HasReductions); 6375 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6376 // We assume that the cost overhead is 1 and we use the cost model 6377 // to estimate the cost of the loop and interleave until the cost of the 6378 // loop overhead is about 5% of the cost of the loop. 6379 unsigned SmallIC = 6380 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6381 6382 // Interleave until store/load ports (estimated by max interleave count) are 6383 // saturated. 6384 unsigned NumStores = Legal->getNumStores(); 6385 unsigned NumLoads = Legal->getNumLoads(); 6386 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6387 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6388 6389 // If we have a scalar reduction (vector reductions are already dealt with 6390 // by this point), we can increase the critical path length if the loop 6391 // we're interleaving is inside another loop. Limit, by default to 2, so the 6392 // critical path only gets increased by one reduction operation. 6393 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6394 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6395 SmallIC = std::min(SmallIC, F); 6396 StoresIC = std::min(StoresIC, F); 6397 LoadsIC = std::min(LoadsIC, F); 6398 } 6399 6400 if (EnableLoadStoreRuntimeInterleave && 6401 std::max(StoresIC, LoadsIC) > SmallIC) { 6402 LLVM_DEBUG( 6403 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6404 return std::max(StoresIC, LoadsIC); 6405 } 6406 6407 // If there are scalar reductions and TTI has enabled aggressive 6408 // interleaving for reductions, we will interleave to expose ILP. 6409 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6410 AggressivelyInterleaveReductions) { 6411 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6412 // Interleave no less than SmallIC but not as aggressive as the normal IC 6413 // to satisfy the rare situation when resources are too limited. 6414 return std::max(IC / 2, SmallIC); 6415 } else { 6416 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6417 return SmallIC; 6418 } 6419 } 6420 6421 // Interleave if this is a large loop (small loops are already dealt with by 6422 // this point) that could benefit from interleaving. 6423 if (AggressivelyInterleaveReductions) { 6424 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6425 return IC; 6426 } 6427 6428 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6429 return 1; 6430 } 6431 6432 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6433 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6434 // This function calculates the register usage by measuring the highest number 6435 // of values that are alive at a single location. Obviously, this is a very 6436 // rough estimation. We scan the loop in a topological order in order and 6437 // assign a number to each instruction. We use RPO to ensure that defs are 6438 // met before their users. We assume that each instruction that has in-loop 6439 // users starts an interval. We record every time that an in-loop value is 6440 // used, so we have a list of the first and last occurrences of each 6441 // instruction. Next, we transpose this data structure into a multi map that 6442 // holds the list of intervals that *end* at a specific location. This multi 6443 // map allows us to perform a linear search. We scan the instructions linearly 6444 // and record each time that a new interval starts, by placing it in a set. 6445 // If we find this value in the multi-map then we remove it from the set. 6446 // The max register usage is the maximum size of the set. 6447 // We also search for instructions that are defined outside the loop, but are 6448 // used inside the loop. We need this number separately from the max-interval 6449 // usage number because when we unroll, loop-invariant values do not take 6450 // more register. 6451 LoopBlocksDFS DFS(TheLoop); 6452 DFS.perform(LI); 6453 6454 RegisterUsage RU; 6455 6456 // Each 'key' in the map opens a new interval. The values 6457 // of the map are the index of the 'last seen' usage of the 6458 // instruction that is the key. 6459 using IntervalMap = DenseMap<Instruction *, unsigned>; 6460 6461 // Maps instruction to its index. 6462 SmallVector<Instruction *, 64> IdxToInstr; 6463 // Marks the end of each interval. 6464 IntervalMap EndPoint; 6465 // Saves the list of instruction indices that are used in the loop. 6466 SmallPtrSet<Instruction *, 8> Ends; 6467 // Saves the list of values that are used in the loop but are 6468 // defined outside the loop, such as arguments and constants. 6469 SmallPtrSet<Value *, 8> LoopInvariants; 6470 6471 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6472 for (Instruction &I : BB->instructionsWithoutDebug()) { 6473 IdxToInstr.push_back(&I); 6474 6475 // Save the end location of each USE. 6476 for (Value *U : I.operands()) { 6477 auto *Instr = dyn_cast<Instruction>(U); 6478 6479 // Ignore non-instruction values such as arguments, constants, etc. 6480 if (!Instr) 6481 continue; 6482 6483 // If this instruction is outside the loop then record it and continue. 6484 if (!TheLoop->contains(Instr)) { 6485 LoopInvariants.insert(Instr); 6486 continue; 6487 } 6488 6489 // Overwrite previous end points. 6490 EndPoint[Instr] = IdxToInstr.size(); 6491 Ends.insert(Instr); 6492 } 6493 } 6494 } 6495 6496 // Saves the list of intervals that end with the index in 'key'. 6497 using InstrList = SmallVector<Instruction *, 2>; 6498 DenseMap<unsigned, InstrList> TransposeEnds; 6499 6500 // Transpose the EndPoints to a list of values that end at each index. 6501 for (auto &Interval : EndPoint) 6502 TransposeEnds[Interval.second].push_back(Interval.first); 6503 6504 SmallPtrSet<Instruction *, 8> OpenIntervals; 6505 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6506 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6507 6508 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6509 6510 // A lambda that gets the register usage for the given type and VF. 6511 const auto &TTICapture = TTI; 6512 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) { 6513 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6514 return 0U; 6515 return TTICapture.getRegUsageForType(VectorType::get(Ty, VF)); 6516 }; 6517 6518 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6519 Instruction *I = IdxToInstr[i]; 6520 6521 // Remove all of the instructions that end at this location. 6522 InstrList &List = TransposeEnds[i]; 6523 for (Instruction *ToRemove : List) 6524 OpenIntervals.erase(ToRemove); 6525 6526 // Ignore instructions that are never used within the loop. 6527 if (!Ends.count(I)) 6528 continue; 6529 6530 // Skip ignored values. 6531 if (ValuesToIgnore.count(I)) 6532 continue; 6533 6534 // For each VF find the maximum usage of registers. 6535 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6536 // Count the number of live intervals. 6537 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6538 6539 if (VFs[j].isScalar()) { 6540 for (auto Inst : OpenIntervals) { 6541 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6542 if (RegUsage.find(ClassID) == RegUsage.end()) 6543 RegUsage[ClassID] = 1; 6544 else 6545 RegUsage[ClassID] += 1; 6546 } 6547 } else { 6548 collectUniformsAndScalars(VFs[j]); 6549 for (auto Inst : OpenIntervals) { 6550 // Skip ignored values for VF > 1. 6551 if (VecValuesToIgnore.count(Inst)) 6552 continue; 6553 if (isScalarAfterVectorization(Inst, VFs[j])) { 6554 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6555 if (RegUsage.find(ClassID) == RegUsage.end()) 6556 RegUsage[ClassID] = 1; 6557 else 6558 RegUsage[ClassID] += 1; 6559 } else { 6560 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6561 if (RegUsage.find(ClassID) == RegUsage.end()) 6562 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6563 else 6564 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6565 } 6566 } 6567 } 6568 6569 for (auto& pair : RegUsage) { 6570 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6571 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6572 else 6573 MaxUsages[j][pair.first] = pair.second; 6574 } 6575 } 6576 6577 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6578 << OpenIntervals.size() << '\n'); 6579 6580 // Add the current instruction to the list of open intervals. 6581 OpenIntervals.insert(I); 6582 } 6583 6584 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6585 SmallMapVector<unsigned, unsigned, 4> Invariant; 6586 6587 for (auto Inst : LoopInvariants) { 6588 unsigned Usage = 6589 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6590 unsigned ClassID = 6591 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6592 if (Invariant.find(ClassID) == Invariant.end()) 6593 Invariant[ClassID] = Usage; 6594 else 6595 Invariant[ClassID] += Usage; 6596 } 6597 6598 LLVM_DEBUG({ 6599 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6600 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6601 << " item\n"; 6602 for (const auto &pair : MaxUsages[i]) { 6603 dbgs() << "LV(REG): RegisterClass: " 6604 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6605 << " registers\n"; 6606 } 6607 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6608 << " item\n"; 6609 for (const auto &pair : Invariant) { 6610 dbgs() << "LV(REG): RegisterClass: " 6611 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6612 << " registers\n"; 6613 } 6614 }); 6615 6616 RU.LoopInvariantRegs = Invariant; 6617 RU.MaxLocalUsers = MaxUsages[i]; 6618 RUs[i] = RU; 6619 } 6620 6621 return RUs; 6622 } 6623 6624 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6625 // TODO: Cost model for emulated masked load/store is completely 6626 // broken. This hack guides the cost model to use an artificially 6627 // high enough value to practically disable vectorization with such 6628 // operations, except where previously deployed legality hack allowed 6629 // using very low cost values. This is to avoid regressions coming simply 6630 // from moving "masked load/store" check from legality to cost model. 6631 // Masked Load/Gather emulation was previously never allowed. 6632 // Limited number of Masked Store/Scatter emulation was allowed. 6633 assert(isPredicatedInst(I, ElementCount::getFixed(1)) && 6634 "Expecting a scalar emulated instruction"); 6635 return isa<LoadInst>(I) || 6636 (isa<StoreInst>(I) && 6637 NumPredStores > NumberOfStoresToPredicate); 6638 } 6639 6640 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6641 // If we aren't vectorizing the loop, or if we've already collected the 6642 // instructions to scalarize, there's nothing to do. Collection may already 6643 // have occurred if we have a user-selected VF and are now computing the 6644 // expected cost for interleaving. 6645 if (VF.isScalar() || VF.isZero() || 6646 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6647 return; 6648 6649 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6650 // not profitable to scalarize any instructions, the presence of VF in the 6651 // map will indicate that we've analyzed it already. 6652 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6653 6654 // Find all the instructions that are scalar with predication in the loop and 6655 // determine if it would be better to not if-convert the blocks they are in. 6656 // If so, we also record the instructions to scalarize. 6657 for (BasicBlock *BB : TheLoop->blocks()) { 6658 if (!blockNeedsPredication(BB)) 6659 continue; 6660 for (Instruction &I : *BB) 6661 if (isScalarWithPredication(&I)) { 6662 ScalarCostsTy ScalarCosts; 6663 // Do not apply discount logic if hacked cost is needed 6664 // for emulated masked memrefs. 6665 if (!useEmulatedMaskMemRefHack(&I) && 6666 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6667 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6668 // Remember that BB will remain after vectorization. 6669 PredicatedBBsAfterVectorization.insert(BB); 6670 } 6671 } 6672 } 6673 6674 int LoopVectorizationCostModel::computePredInstDiscount( 6675 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6676 assert(!isUniformAfterVectorization(PredInst, VF) && 6677 "Instruction marked uniform-after-vectorization will be predicated"); 6678 6679 // Initialize the discount to zero, meaning that the scalar version and the 6680 // vector version cost the same. 6681 InstructionCost Discount = 0; 6682 6683 // Holds instructions to analyze. The instructions we visit are mapped in 6684 // ScalarCosts. Those instructions are the ones that would be scalarized if 6685 // we find that the scalar version costs less. 6686 SmallVector<Instruction *, 8> Worklist; 6687 6688 // Returns true if the given instruction can be scalarized. 6689 auto canBeScalarized = [&](Instruction *I) -> bool { 6690 // We only attempt to scalarize instructions forming a single-use chain 6691 // from the original predicated block that would otherwise be vectorized. 6692 // Although not strictly necessary, we give up on instructions we know will 6693 // already be scalar to avoid traversing chains that are unlikely to be 6694 // beneficial. 6695 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6696 isScalarAfterVectorization(I, VF)) 6697 return false; 6698 6699 // If the instruction is scalar with predication, it will be analyzed 6700 // separately. We ignore it within the context of PredInst. 6701 if (isScalarWithPredication(I)) 6702 return false; 6703 6704 // If any of the instruction's operands are uniform after vectorization, 6705 // the instruction cannot be scalarized. This prevents, for example, a 6706 // masked load from being scalarized. 6707 // 6708 // We assume we will only emit a value for lane zero of an instruction 6709 // marked uniform after vectorization, rather than VF identical values. 6710 // Thus, if we scalarize an instruction that uses a uniform, we would 6711 // create uses of values corresponding to the lanes we aren't emitting code 6712 // for. This behavior can be changed by allowing getScalarValue to clone 6713 // the lane zero values for uniforms rather than asserting. 6714 for (Use &U : I->operands()) 6715 if (auto *J = dyn_cast<Instruction>(U.get())) 6716 if (isUniformAfterVectorization(J, VF)) 6717 return false; 6718 6719 // Otherwise, we can scalarize the instruction. 6720 return true; 6721 }; 6722 6723 // Compute the expected cost discount from scalarizing the entire expression 6724 // feeding the predicated instruction. We currently only consider expressions 6725 // that are single-use instruction chains. 6726 Worklist.push_back(PredInst); 6727 while (!Worklist.empty()) { 6728 Instruction *I = Worklist.pop_back_val(); 6729 6730 // If we've already analyzed the instruction, there's nothing to do. 6731 if (ScalarCosts.find(I) != ScalarCosts.end()) 6732 continue; 6733 6734 // Compute the cost of the vector instruction. Note that this cost already 6735 // includes the scalarization overhead of the predicated instruction. 6736 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6737 6738 // Compute the cost of the scalarized instruction. This cost is the cost of 6739 // the instruction as if it wasn't if-converted and instead remained in the 6740 // predicated block. We will scale this cost by block probability after 6741 // computing the scalarization overhead. 6742 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6743 InstructionCost ScalarCost = 6744 VF.getKnownMinValue() * 6745 getInstructionCost(I, ElementCount::getFixed(1)).first; 6746 6747 // Compute the scalarization overhead of needed insertelement instructions 6748 // and phi nodes. 6749 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6750 ScalarCost += TTI.getScalarizationOverhead( 6751 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6752 APInt::getAllOnesValue(VF.getKnownMinValue()), true, false); 6753 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6754 ScalarCost += 6755 VF.getKnownMinValue() * 6756 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6757 } 6758 6759 // Compute the scalarization overhead of needed extractelement 6760 // instructions. For each of the instruction's operands, if the operand can 6761 // be scalarized, add it to the worklist; otherwise, account for the 6762 // overhead. 6763 for (Use &U : I->operands()) 6764 if (auto *J = dyn_cast<Instruction>(U.get())) { 6765 assert(VectorType::isValidElementType(J->getType()) && 6766 "Instruction has non-scalar type"); 6767 if (canBeScalarized(J)) 6768 Worklist.push_back(J); 6769 else if (needsExtract(J, VF)) { 6770 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6771 ScalarCost += TTI.getScalarizationOverhead( 6772 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6773 APInt::getAllOnesValue(VF.getKnownMinValue()), false, true); 6774 } 6775 } 6776 6777 // Scale the total scalar cost by block probability. 6778 ScalarCost /= getReciprocalPredBlockProb(); 6779 6780 // Compute the discount. A non-negative discount means the vector version 6781 // of the instruction costs more, and scalarizing would be beneficial. 6782 Discount += VectorCost - ScalarCost; 6783 ScalarCosts[I] = ScalarCost; 6784 } 6785 6786 return *Discount.getValue(); 6787 } 6788 6789 LoopVectorizationCostModel::VectorizationCostTy 6790 LoopVectorizationCostModel::expectedCost(ElementCount VF) { 6791 VectorizationCostTy Cost; 6792 6793 // For each block. 6794 for (BasicBlock *BB : TheLoop->blocks()) { 6795 VectorizationCostTy BlockCost; 6796 6797 // For each instruction in the old loop. 6798 for (Instruction &I : BB->instructionsWithoutDebug()) { 6799 // Skip ignored values. 6800 if (ValuesToIgnore.count(&I) || 6801 (VF.isVector() && VecValuesToIgnore.count(&I))) 6802 continue; 6803 6804 VectorizationCostTy C = getInstructionCost(&I, VF); 6805 6806 // Check if we should override the cost. 6807 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6808 C.first = InstructionCost(ForceTargetInstructionCost); 6809 6810 BlockCost.first += C.first; 6811 BlockCost.second |= C.second; 6812 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6813 << " for VF " << VF << " For instruction: " << I 6814 << '\n'); 6815 } 6816 6817 // If we are vectorizing a predicated block, it will have been 6818 // if-converted. This means that the block's instructions (aside from 6819 // stores and instructions that may divide by zero) will now be 6820 // unconditionally executed. For the scalar case, we may not always execute 6821 // the predicated block, if it is an if-else block. Thus, scale the block's 6822 // cost by the probability of executing it. blockNeedsPredication from 6823 // Legal is used so as to not include all blocks in tail folded loops. 6824 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6825 BlockCost.first /= getReciprocalPredBlockProb(); 6826 6827 Cost.first += BlockCost.first; 6828 Cost.second |= BlockCost.second; 6829 } 6830 6831 return Cost; 6832 } 6833 6834 /// Gets Address Access SCEV after verifying that the access pattern 6835 /// is loop invariant except the induction variable dependence. 6836 /// 6837 /// This SCEV can be sent to the Target in order to estimate the address 6838 /// calculation cost. 6839 static const SCEV *getAddressAccessSCEV( 6840 Value *Ptr, 6841 LoopVectorizationLegality *Legal, 6842 PredicatedScalarEvolution &PSE, 6843 const Loop *TheLoop) { 6844 6845 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6846 if (!Gep) 6847 return nullptr; 6848 6849 // We are looking for a gep with all loop invariant indices except for one 6850 // which should be an induction variable. 6851 auto SE = PSE.getSE(); 6852 unsigned NumOperands = Gep->getNumOperands(); 6853 for (unsigned i = 1; i < NumOperands; ++i) { 6854 Value *Opd = Gep->getOperand(i); 6855 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6856 !Legal->isInductionVariable(Opd)) 6857 return nullptr; 6858 } 6859 6860 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6861 return PSE.getSCEV(Ptr); 6862 } 6863 6864 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6865 return Legal->hasStride(I->getOperand(0)) || 6866 Legal->hasStride(I->getOperand(1)); 6867 } 6868 6869 InstructionCost 6870 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6871 ElementCount VF) { 6872 assert(VF.isVector() && 6873 "Scalarization cost of instruction implies vectorization."); 6874 if (VF.isScalable()) 6875 return InstructionCost::getInvalid(); 6876 6877 Type *ValTy = getMemInstValueType(I); 6878 auto SE = PSE.getSE(); 6879 6880 unsigned AS = getLoadStoreAddressSpace(I); 6881 Value *Ptr = getLoadStorePointerOperand(I); 6882 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6883 6884 // Figure out whether the access is strided and get the stride value 6885 // if it's known in compile time 6886 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6887 6888 // Get the cost of the scalar memory instruction and address computation. 6889 InstructionCost Cost = 6890 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6891 6892 // Don't pass *I here, since it is scalar but will actually be part of a 6893 // vectorized loop where the user of it is a vectorized instruction. 6894 const Align Alignment = getLoadStoreAlignment(I); 6895 Cost += VF.getKnownMinValue() * 6896 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6897 AS, TTI::TCK_RecipThroughput); 6898 6899 // Get the overhead of the extractelement and insertelement instructions 6900 // we might create due to scalarization. 6901 Cost += getScalarizationOverhead(I, VF); 6902 6903 // If we have a predicated load/store, it will need extra i1 extracts and 6904 // conditional branches, but may not be executed for each vector lane. Scale 6905 // the cost by the probability of executing the predicated block. 6906 if (isPredicatedInst(I, ElementCount::getFixed(1))) { 6907 Cost /= getReciprocalPredBlockProb(); 6908 6909 // Add the cost of an i1 extract and a branch 6910 auto *Vec_i1Ty = 6911 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6912 Cost += TTI.getScalarizationOverhead( 6913 Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), 6914 /*Insert=*/false, /*Extract=*/true); 6915 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6916 6917 if (useEmulatedMaskMemRefHack(I)) 6918 // Artificially setting to a high enough value to practically disable 6919 // vectorization with such operations. 6920 Cost = 3000000; 6921 } 6922 6923 return Cost; 6924 } 6925 6926 InstructionCost 6927 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6928 ElementCount VF) { 6929 Type *ValTy = getMemInstValueType(I); 6930 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6931 Value *Ptr = getLoadStorePointerOperand(I); 6932 unsigned AS = getLoadStoreAddressSpace(I); 6933 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6934 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6935 6936 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6937 "Stride should be 1 or -1 for consecutive memory access"); 6938 const Align Alignment = getLoadStoreAlignment(I); 6939 InstructionCost Cost = 0; 6940 if (Legal->isMaskRequired(I)) 6941 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6942 CostKind); 6943 else 6944 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6945 CostKind, I); 6946 6947 bool Reverse = ConsecutiveStride < 0; 6948 if (Reverse) 6949 Cost += 6950 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6951 return Cost; 6952 } 6953 6954 InstructionCost 6955 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6956 ElementCount VF) { 6957 assert(Legal->isUniformMemOp(*I)); 6958 6959 Type *ValTy = getMemInstValueType(I); 6960 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6961 const Align Alignment = getLoadStoreAlignment(I); 6962 unsigned AS = getLoadStoreAddressSpace(I); 6963 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6964 if (isa<LoadInst>(I)) { 6965 return TTI.getAddressComputationCost(ValTy) + 6966 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6967 CostKind) + 6968 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6969 } 6970 StoreInst *SI = cast<StoreInst>(I); 6971 6972 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6973 return TTI.getAddressComputationCost(ValTy) + 6974 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6975 CostKind) + 6976 (isLoopInvariantStoreValue 6977 ? 0 6978 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6979 VF.getKnownMinValue() - 1)); 6980 } 6981 6982 InstructionCost 6983 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6984 ElementCount VF) { 6985 Type *ValTy = getMemInstValueType(I); 6986 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6987 const Align Alignment = getLoadStoreAlignment(I); 6988 const Value *Ptr = getLoadStorePointerOperand(I); 6989 6990 return TTI.getAddressComputationCost(VectorTy) + 6991 TTI.getGatherScatterOpCost( 6992 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6993 TargetTransformInfo::TCK_RecipThroughput, I); 6994 } 6995 6996 InstructionCost 6997 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6998 ElementCount VF) { 6999 // TODO: Once we have support for interleaving with scalable vectors 7000 // we can calculate the cost properly here. 7001 if (VF.isScalable()) 7002 return InstructionCost::getInvalid(); 7003 7004 Type *ValTy = getMemInstValueType(I); 7005 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7006 unsigned AS = getLoadStoreAddressSpace(I); 7007 7008 auto Group = getInterleavedAccessGroup(I); 7009 assert(Group && "Fail to get an interleaved access group."); 7010 7011 unsigned InterleaveFactor = Group->getFactor(); 7012 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 7013 7014 // Holds the indices of existing members in an interleaved load group. 7015 // An interleaved store group doesn't need this as it doesn't allow gaps. 7016 SmallVector<unsigned, 4> Indices; 7017 if (isa<LoadInst>(I)) { 7018 for (unsigned i = 0; i < InterleaveFactor; i++) 7019 if (Group->getMember(i)) 7020 Indices.push_back(i); 7021 } 7022 7023 // Calculate the cost of the whole interleaved group. 7024 bool UseMaskForGaps = 7025 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 7026 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 7027 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 7028 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 7029 7030 if (Group->isReverse()) { 7031 // TODO: Add support for reversed masked interleaved access. 7032 assert(!Legal->isMaskRequired(I) && 7033 "Reverse masked interleaved access not supported."); 7034 Cost += 7035 Group->getNumMembers() * 7036 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 7037 } 7038 return Cost; 7039 } 7040 7041 InstructionCost LoopVectorizationCostModel::getReductionPatternCost( 7042 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 7043 // Early exit for no inloop reductions 7044 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 7045 return InstructionCost::getInvalid(); 7046 auto *VectorTy = cast<VectorType>(Ty); 7047 7048 // We are looking for a pattern of, and finding the minimal acceptable cost: 7049 // reduce(mul(ext(A), ext(B))) or 7050 // reduce(mul(A, B)) or 7051 // reduce(ext(A)) or 7052 // reduce(A). 7053 // The basic idea is that we walk down the tree to do that, finding the root 7054 // reduction instruction in InLoopReductionImmediateChains. From there we find 7055 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 7056 // of the components. If the reduction cost is lower then we return it for the 7057 // reduction instruction and 0 for the other instructions in the pattern. If 7058 // it is not we return an invalid cost specifying the orignal cost method 7059 // should be used. 7060 Instruction *RetI = I; 7061 if ((RetI->getOpcode() == Instruction::SExt || 7062 RetI->getOpcode() == Instruction::ZExt)) { 7063 if (!RetI->hasOneUser()) 7064 return InstructionCost::getInvalid(); 7065 RetI = RetI->user_back(); 7066 } 7067 if (RetI->getOpcode() == Instruction::Mul && 7068 RetI->user_back()->getOpcode() == Instruction::Add) { 7069 if (!RetI->hasOneUser()) 7070 return InstructionCost::getInvalid(); 7071 RetI = RetI->user_back(); 7072 } 7073 7074 // Test if the found instruction is a reduction, and if not return an invalid 7075 // cost specifying the parent to use the original cost modelling. 7076 if (!InLoopReductionImmediateChains.count(RetI)) 7077 return InstructionCost::getInvalid(); 7078 7079 // Find the reduction this chain is a part of and calculate the basic cost of 7080 // the reduction on its own. 7081 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 7082 Instruction *ReductionPhi = LastChain; 7083 while (!isa<PHINode>(ReductionPhi)) 7084 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 7085 7086 RecurrenceDescriptor RdxDesc = 7087 Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; 7088 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 7089 RdxDesc.getOpcode(), VectorTy, false, CostKind); 7090 7091 // Get the operand that was not the reduction chain and match it to one of the 7092 // patterns, returning the better cost if it is found. 7093 Instruction *RedOp = RetI->getOperand(1) == LastChain 7094 ? dyn_cast<Instruction>(RetI->getOperand(0)) 7095 : dyn_cast<Instruction>(RetI->getOperand(1)); 7096 7097 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 7098 7099 if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) && 7100 !TheLoop->isLoopInvariant(RedOp)) { 7101 bool IsUnsigned = isa<ZExtInst>(RedOp); 7102 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 7103 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7104 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7105 CostKind); 7106 7107 InstructionCost ExtCost = 7108 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 7109 TTI::CastContextHint::None, CostKind, RedOp); 7110 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 7111 return I == RetI ? *RedCost.getValue() : 0; 7112 } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) { 7113 Instruction *Mul = RedOp; 7114 Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0)); 7115 Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1)); 7116 if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) && 7117 Op0->getOpcode() == Op1->getOpcode() && 7118 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7119 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 7120 bool IsUnsigned = isa<ZExtInst>(Op0); 7121 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7122 // reduce(mul(ext, ext)) 7123 InstructionCost ExtCost = 7124 TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, 7125 TTI::CastContextHint::None, CostKind, Op0); 7126 InstructionCost MulCost = 7127 TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); 7128 7129 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7130 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7131 CostKind); 7132 7133 if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) 7134 return I == RetI ? *RedCost.getValue() : 0; 7135 } else { 7136 InstructionCost MulCost = 7137 TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); 7138 7139 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7140 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 7141 CostKind); 7142 7143 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 7144 return I == RetI ? *RedCost.getValue() : 0; 7145 } 7146 } 7147 7148 return I == RetI ? BaseCost : InstructionCost::getInvalid(); 7149 } 7150 7151 InstructionCost 7152 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7153 ElementCount VF) { 7154 // Calculate scalar cost only. Vectorization cost should be ready at this 7155 // moment. 7156 if (VF.isScalar()) { 7157 Type *ValTy = getMemInstValueType(I); 7158 const Align Alignment = getLoadStoreAlignment(I); 7159 unsigned AS = getLoadStoreAddressSpace(I); 7160 7161 return TTI.getAddressComputationCost(ValTy) + 7162 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 7163 TTI::TCK_RecipThroughput, I); 7164 } 7165 return getWideningCost(I, VF); 7166 } 7167 7168 LoopVectorizationCostModel::VectorizationCostTy 7169 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7170 ElementCount VF) { 7171 // If we know that this instruction will remain uniform, check the cost of 7172 // the scalar version. 7173 if (isUniformAfterVectorization(I, VF)) 7174 VF = ElementCount::getFixed(1); 7175 7176 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7177 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7178 7179 // Forced scalars do not have any scalarization overhead. 7180 auto ForcedScalar = ForcedScalars.find(VF); 7181 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7182 auto InstSet = ForcedScalar->second; 7183 if (InstSet.count(I)) 7184 return VectorizationCostTy( 7185 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7186 VF.getKnownMinValue()), 7187 false); 7188 } 7189 7190 Type *VectorTy; 7191 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7192 7193 bool TypeNotScalarized = 7194 VF.isVector() && VectorTy->isVectorTy() && 7195 TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue(); 7196 return VectorizationCostTy(C, TypeNotScalarized); 7197 } 7198 7199 InstructionCost 7200 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7201 ElementCount VF) const { 7202 7203 if (VF.isScalable()) 7204 return InstructionCost::getInvalid(); 7205 7206 if (VF.isScalar()) 7207 return 0; 7208 7209 InstructionCost Cost = 0; 7210 Type *RetTy = ToVectorTy(I->getType(), VF); 7211 if (!RetTy->isVoidTy() && 7212 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7213 Cost += TTI.getScalarizationOverhead( 7214 cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()), 7215 true, false); 7216 7217 // Some targets keep addresses scalar. 7218 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7219 return Cost; 7220 7221 // Some targets support efficient element stores. 7222 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7223 return Cost; 7224 7225 // Collect operands to consider. 7226 CallInst *CI = dyn_cast<CallInst>(I); 7227 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 7228 7229 // Skip operands that do not require extraction/scalarization and do not incur 7230 // any overhead. 7231 SmallVector<Type *> Tys; 7232 for (auto *V : filterExtractingOperands(Ops, VF)) 7233 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 7234 return Cost + TTI.getOperandsScalarizationOverhead( 7235 filterExtractingOperands(Ops, VF), Tys); 7236 } 7237 7238 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7239 if (VF.isScalar()) 7240 return; 7241 NumPredStores = 0; 7242 for (BasicBlock *BB : TheLoop->blocks()) { 7243 // For each instruction in the old loop. 7244 for (Instruction &I : *BB) { 7245 Value *Ptr = getLoadStorePointerOperand(&I); 7246 if (!Ptr) 7247 continue; 7248 7249 // TODO: We should generate better code and update the cost model for 7250 // predicated uniform stores. Today they are treated as any other 7251 // predicated store (see added test cases in 7252 // invariant-store-vectorization.ll). 7253 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 7254 NumPredStores++; 7255 7256 if (Legal->isUniformMemOp(I)) { 7257 // TODO: Avoid replicating loads and stores instead of 7258 // relying on instcombine to remove them. 7259 // Load: Scalar load + broadcast 7260 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7261 InstructionCost Cost = getUniformMemOpCost(&I, VF); 7262 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7263 continue; 7264 } 7265 7266 // We assume that widening is the best solution when possible. 7267 if (memoryInstructionCanBeWidened(&I, VF)) { 7268 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7269 int ConsecutiveStride = 7270 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 7271 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7272 "Expected consecutive stride."); 7273 InstWidening Decision = 7274 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7275 setWideningDecision(&I, VF, Decision, Cost); 7276 continue; 7277 } 7278 7279 // Choose between Interleaving, Gather/Scatter or Scalarization. 7280 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7281 unsigned NumAccesses = 1; 7282 if (isAccessInterleaved(&I)) { 7283 auto Group = getInterleavedAccessGroup(&I); 7284 assert(Group && "Fail to get an interleaved access group."); 7285 7286 // Make one decision for the whole group. 7287 if (getWideningDecision(&I, VF) != CM_Unknown) 7288 continue; 7289 7290 NumAccesses = Group->getNumMembers(); 7291 if (interleavedAccessCanBeWidened(&I, VF)) 7292 InterleaveCost = getInterleaveGroupCost(&I, VF); 7293 } 7294 7295 InstructionCost GatherScatterCost = 7296 isLegalGatherOrScatter(&I) 7297 ? getGatherScatterCost(&I, VF) * NumAccesses 7298 : InstructionCost::getInvalid(); 7299 7300 InstructionCost ScalarizationCost = 7301 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7302 7303 // Choose better solution for the current VF, 7304 // write down this decision and use it during vectorization. 7305 InstructionCost Cost; 7306 InstWidening Decision; 7307 if (InterleaveCost <= GatherScatterCost && 7308 InterleaveCost < ScalarizationCost) { 7309 Decision = CM_Interleave; 7310 Cost = InterleaveCost; 7311 } else if (GatherScatterCost < ScalarizationCost) { 7312 Decision = CM_GatherScatter; 7313 Cost = GatherScatterCost; 7314 } else { 7315 assert(!VF.isScalable() && 7316 "We cannot yet scalarise for scalable vectors"); 7317 Decision = CM_Scalarize; 7318 Cost = ScalarizationCost; 7319 } 7320 // If the instructions belongs to an interleave group, the whole group 7321 // receives the same decision. The whole group receives the cost, but 7322 // the cost will actually be assigned to one instruction. 7323 if (auto Group = getInterleavedAccessGroup(&I)) 7324 setWideningDecision(Group, VF, Decision, Cost); 7325 else 7326 setWideningDecision(&I, VF, Decision, Cost); 7327 } 7328 } 7329 7330 // Make sure that any load of address and any other address computation 7331 // remains scalar unless there is gather/scatter support. This avoids 7332 // inevitable extracts into address registers, and also has the benefit of 7333 // activating LSR more, since that pass can't optimize vectorized 7334 // addresses. 7335 if (TTI.prefersVectorizedAddressing()) 7336 return; 7337 7338 // Start with all scalar pointer uses. 7339 SmallPtrSet<Instruction *, 8> AddrDefs; 7340 for (BasicBlock *BB : TheLoop->blocks()) 7341 for (Instruction &I : *BB) { 7342 Instruction *PtrDef = 7343 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7344 if (PtrDef && TheLoop->contains(PtrDef) && 7345 getWideningDecision(&I, VF) != CM_GatherScatter) 7346 AddrDefs.insert(PtrDef); 7347 } 7348 7349 // Add all instructions used to generate the addresses. 7350 SmallVector<Instruction *, 4> Worklist; 7351 append_range(Worklist, AddrDefs); 7352 while (!Worklist.empty()) { 7353 Instruction *I = Worklist.pop_back_val(); 7354 for (auto &Op : I->operands()) 7355 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7356 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7357 AddrDefs.insert(InstOp).second) 7358 Worklist.push_back(InstOp); 7359 } 7360 7361 for (auto *I : AddrDefs) { 7362 if (isa<LoadInst>(I)) { 7363 // Setting the desired widening decision should ideally be handled in 7364 // by cost functions, but since this involves the task of finding out 7365 // if the loaded register is involved in an address computation, it is 7366 // instead changed here when we know this is the case. 7367 InstWidening Decision = getWideningDecision(I, VF); 7368 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7369 // Scalarize a widened load of address. 7370 setWideningDecision( 7371 I, VF, CM_Scalarize, 7372 (VF.getKnownMinValue() * 7373 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7374 else if (auto Group = getInterleavedAccessGroup(I)) { 7375 // Scalarize an interleave group of address loads. 7376 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7377 if (Instruction *Member = Group->getMember(I)) 7378 setWideningDecision( 7379 Member, VF, CM_Scalarize, 7380 (VF.getKnownMinValue() * 7381 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7382 } 7383 } 7384 } else 7385 // Make sure I gets scalarized and a cost estimate without 7386 // scalarization overhead. 7387 ForcedScalars[VF].insert(I); 7388 } 7389 } 7390 7391 InstructionCost 7392 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7393 Type *&VectorTy) { 7394 Type *RetTy = I->getType(); 7395 if (canTruncateToMinimalBitwidth(I, VF)) 7396 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7397 auto SE = PSE.getSE(); 7398 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7399 7400 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 7401 ElementCount VF) -> bool { 7402 if (VF.isScalar()) 7403 return true; 7404 7405 auto Scalarized = InstsToScalarize.find(VF); 7406 assert(Scalarized != InstsToScalarize.end() && 7407 "VF not yet analyzed for scalarization profitability"); 7408 return !Scalarized->second.count(I) && 7409 llvm::all_of(I->users(), [&](User *U) { 7410 auto *UI = cast<Instruction>(U); 7411 return !Scalarized->second.count(UI); 7412 }); 7413 }; 7414 (void) hasSingleCopyAfterVectorization; 7415 7416 if (isScalarAfterVectorization(I, VF)) { 7417 // With the exception of GEPs and PHIs, after scalarization there should 7418 // only be one copy of the instruction generated in the loop. This is 7419 // because the VF is either 1, or any instructions that need scalarizing 7420 // have already been dealt with by the the time we get here. As a result, 7421 // it means we don't have to multiply the instruction cost by VF. 7422 assert(I->getOpcode() == Instruction::GetElementPtr || 7423 I->getOpcode() == Instruction::PHI || 7424 (I->getOpcode() == Instruction::BitCast && 7425 I->getType()->isPointerTy()) || 7426 hasSingleCopyAfterVectorization(I, VF)); 7427 VectorTy = RetTy; 7428 } else 7429 VectorTy = ToVectorTy(RetTy, VF); 7430 7431 // TODO: We need to estimate the cost of intrinsic calls. 7432 switch (I->getOpcode()) { 7433 case Instruction::GetElementPtr: 7434 // We mark this instruction as zero-cost because the cost of GEPs in 7435 // vectorized code depends on whether the corresponding memory instruction 7436 // is scalarized or not. Therefore, we handle GEPs with the memory 7437 // instruction cost. 7438 return 0; 7439 case Instruction::Br: { 7440 // In cases of scalarized and predicated instructions, there will be VF 7441 // predicated blocks in the vectorized loop. Each branch around these 7442 // blocks requires also an extract of its vector compare i1 element. 7443 bool ScalarPredicatedBB = false; 7444 BranchInst *BI = cast<BranchInst>(I); 7445 if (VF.isVector() && BI->isConditional() && 7446 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7447 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7448 ScalarPredicatedBB = true; 7449 7450 if (ScalarPredicatedBB) { 7451 // Return cost for branches around scalarized and predicated blocks. 7452 assert(!VF.isScalable() && "scalable vectors not yet supported."); 7453 auto *Vec_i1Ty = 7454 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7455 return (TTI.getScalarizationOverhead( 7456 Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), 7457 false, true) + 7458 (TTI.getCFInstrCost(Instruction::Br, CostKind) * 7459 VF.getKnownMinValue())); 7460 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7461 // The back-edge branch will remain, as will all scalar branches. 7462 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7463 else 7464 // This branch will be eliminated by if-conversion. 7465 return 0; 7466 // Note: We currently assume zero cost for an unconditional branch inside 7467 // a predicated block since it will become a fall-through, although we 7468 // may decide in the future to call TTI for all branches. 7469 } 7470 case Instruction::PHI: { 7471 auto *Phi = cast<PHINode>(I); 7472 7473 // First-order recurrences are replaced by vector shuffles inside the loop. 7474 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7475 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7476 return TTI.getShuffleCost( 7477 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7478 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7479 7480 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7481 // converted into select instructions. We require N - 1 selects per phi 7482 // node, where N is the number of incoming values. 7483 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7484 return (Phi->getNumIncomingValues() - 1) * 7485 TTI.getCmpSelInstrCost( 7486 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7487 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7488 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7489 7490 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7491 } 7492 case Instruction::UDiv: 7493 case Instruction::SDiv: 7494 case Instruction::URem: 7495 case Instruction::SRem: 7496 // If we have a predicated instruction, it may not be executed for each 7497 // vector lane. Get the scalarization cost and scale this amount by the 7498 // probability of executing the predicated block. If the instruction is not 7499 // predicated, we fall through to the next case. 7500 if (VF.isVector() && isScalarWithPredication(I)) { 7501 InstructionCost Cost = 0; 7502 7503 // These instructions have a non-void type, so account for the phi nodes 7504 // that we will create. This cost is likely to be zero. The phi node 7505 // cost, if any, should be scaled by the block probability because it 7506 // models a copy at the end of each predicated block. 7507 Cost += VF.getKnownMinValue() * 7508 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7509 7510 // The cost of the non-predicated instruction. 7511 Cost += VF.getKnownMinValue() * 7512 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7513 7514 // The cost of insertelement and extractelement instructions needed for 7515 // scalarization. 7516 Cost += getScalarizationOverhead(I, VF); 7517 7518 // Scale the cost by the probability of executing the predicated blocks. 7519 // This assumes the predicated block for each vector lane is equally 7520 // likely. 7521 return Cost / getReciprocalPredBlockProb(); 7522 } 7523 LLVM_FALLTHROUGH; 7524 case Instruction::Add: 7525 case Instruction::FAdd: 7526 case Instruction::Sub: 7527 case Instruction::FSub: 7528 case Instruction::Mul: 7529 case Instruction::FMul: 7530 case Instruction::FDiv: 7531 case Instruction::FRem: 7532 case Instruction::Shl: 7533 case Instruction::LShr: 7534 case Instruction::AShr: 7535 case Instruction::And: 7536 case Instruction::Or: 7537 case Instruction::Xor: { 7538 // Since we will replace the stride by 1 the multiplication should go away. 7539 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7540 return 0; 7541 7542 // Detect reduction patterns 7543 InstructionCost RedCost; 7544 if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7545 .isValid()) 7546 return RedCost; 7547 7548 // Certain instructions can be cheaper to vectorize if they have a constant 7549 // second vector operand. One example of this are shifts on x86. 7550 Value *Op2 = I->getOperand(1); 7551 TargetTransformInfo::OperandValueProperties Op2VP; 7552 TargetTransformInfo::OperandValueKind Op2VK = 7553 TTI.getOperandInfo(Op2, Op2VP); 7554 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7555 Op2VK = TargetTransformInfo::OK_UniformValue; 7556 7557 SmallVector<const Value *, 4> Operands(I->operand_values()); 7558 return TTI.getArithmeticInstrCost( 7559 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7560 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7561 } 7562 case Instruction::FNeg: { 7563 return TTI.getArithmeticInstrCost( 7564 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7565 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7566 TargetTransformInfo::OP_None, I->getOperand(0), I); 7567 } 7568 case Instruction::Select: { 7569 SelectInst *SI = cast<SelectInst>(I); 7570 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7571 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7572 7573 const Value *Op0, *Op1; 7574 using namespace llvm::PatternMatch; 7575 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7576 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7577 // select x, y, false --> x & y 7578 // select x, true, y --> x | y 7579 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7580 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7581 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7582 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7583 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7584 Op1->getType()->getScalarSizeInBits() == 1); 7585 7586 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7587 return TTI.getArithmeticInstrCost( 7588 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7589 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7590 } 7591 7592 Type *CondTy = SI->getCondition()->getType(); 7593 if (!ScalarCond) 7594 CondTy = VectorType::get(CondTy, VF); 7595 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 7596 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7597 } 7598 case Instruction::ICmp: 7599 case Instruction::FCmp: { 7600 Type *ValTy = I->getOperand(0)->getType(); 7601 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7602 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7603 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7604 VectorTy = ToVectorTy(ValTy, VF); 7605 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7606 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7607 } 7608 case Instruction::Store: 7609 case Instruction::Load: { 7610 ElementCount Width = VF; 7611 if (Width.isVector()) { 7612 InstWidening Decision = getWideningDecision(I, Width); 7613 assert(Decision != CM_Unknown && 7614 "CM decision should be taken at this point"); 7615 if (Decision == CM_Scalarize) 7616 Width = ElementCount::getFixed(1); 7617 } 7618 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 7619 return getMemoryInstructionCost(I, VF); 7620 } 7621 case Instruction::BitCast: 7622 if (I->getType()->isPointerTy()) 7623 return 0; 7624 LLVM_FALLTHROUGH; 7625 case Instruction::ZExt: 7626 case Instruction::SExt: 7627 case Instruction::FPToUI: 7628 case Instruction::FPToSI: 7629 case Instruction::FPExt: 7630 case Instruction::PtrToInt: 7631 case Instruction::IntToPtr: 7632 case Instruction::SIToFP: 7633 case Instruction::UIToFP: 7634 case Instruction::Trunc: 7635 case Instruction::FPTrunc: { 7636 // Computes the CastContextHint from a Load/Store instruction. 7637 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7638 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7639 "Expected a load or a store!"); 7640 7641 if (VF.isScalar() || !TheLoop->contains(I)) 7642 return TTI::CastContextHint::Normal; 7643 7644 switch (getWideningDecision(I, VF)) { 7645 case LoopVectorizationCostModel::CM_GatherScatter: 7646 return TTI::CastContextHint::GatherScatter; 7647 case LoopVectorizationCostModel::CM_Interleave: 7648 return TTI::CastContextHint::Interleave; 7649 case LoopVectorizationCostModel::CM_Scalarize: 7650 case LoopVectorizationCostModel::CM_Widen: 7651 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7652 : TTI::CastContextHint::Normal; 7653 case LoopVectorizationCostModel::CM_Widen_Reverse: 7654 return TTI::CastContextHint::Reversed; 7655 case LoopVectorizationCostModel::CM_Unknown: 7656 llvm_unreachable("Instr did not go through cost modelling?"); 7657 } 7658 7659 llvm_unreachable("Unhandled case!"); 7660 }; 7661 7662 unsigned Opcode = I->getOpcode(); 7663 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7664 // For Trunc, the context is the only user, which must be a StoreInst. 7665 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7666 if (I->hasOneUse()) 7667 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7668 CCH = ComputeCCH(Store); 7669 } 7670 // For Z/Sext, the context is the operand, which must be a LoadInst. 7671 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7672 Opcode == Instruction::FPExt) { 7673 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7674 CCH = ComputeCCH(Load); 7675 } 7676 7677 // We optimize the truncation of induction variables having constant 7678 // integer steps. The cost of these truncations is the same as the scalar 7679 // operation. 7680 if (isOptimizableIVTruncate(I, VF)) { 7681 auto *Trunc = cast<TruncInst>(I); 7682 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7683 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7684 } 7685 7686 // Detect reduction patterns 7687 InstructionCost RedCost; 7688 if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7689 .isValid()) 7690 return RedCost; 7691 7692 Type *SrcScalarTy = I->getOperand(0)->getType(); 7693 Type *SrcVecTy = 7694 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7695 if (canTruncateToMinimalBitwidth(I, VF)) { 7696 // This cast is going to be shrunk. This may remove the cast or it might 7697 // turn it into slightly different cast. For example, if MinBW == 16, 7698 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7699 // 7700 // Calculate the modified src and dest types. 7701 Type *MinVecTy = VectorTy; 7702 if (Opcode == Instruction::Trunc) { 7703 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7704 VectorTy = 7705 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7706 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7707 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7708 VectorTy = 7709 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7710 } 7711 } 7712 7713 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7714 } 7715 case Instruction::Call: { 7716 bool NeedToScalarize; 7717 CallInst *CI = cast<CallInst>(I); 7718 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7719 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7720 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7721 return std::min(CallCost, IntrinsicCost); 7722 } 7723 return CallCost; 7724 } 7725 case Instruction::ExtractValue: 7726 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7727 default: 7728 // This opcode is unknown. Assume that it is the same as 'mul'. 7729 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7730 } // end of switch. 7731 } 7732 7733 char LoopVectorize::ID = 0; 7734 7735 static const char lv_name[] = "Loop Vectorization"; 7736 7737 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7738 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7739 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7740 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7741 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7742 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7743 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7744 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7745 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7746 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7747 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7748 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7749 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7750 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7751 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7752 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7753 7754 namespace llvm { 7755 7756 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7757 7758 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7759 bool VectorizeOnlyWhenForced) { 7760 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7761 } 7762 7763 } // end namespace llvm 7764 7765 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7766 // Check if the pointer operand of a load or store instruction is 7767 // consecutive. 7768 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7769 return Legal->isConsecutivePtr(Ptr); 7770 return false; 7771 } 7772 7773 void LoopVectorizationCostModel::collectValuesToIgnore() { 7774 // Ignore ephemeral values. 7775 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7776 7777 // Ignore type-promoting instructions we identified during reduction 7778 // detection. 7779 for (auto &Reduction : Legal->getReductionVars()) { 7780 RecurrenceDescriptor &RedDes = Reduction.second; 7781 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7782 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7783 } 7784 // Ignore type-casting instructions we identified during induction 7785 // detection. 7786 for (auto &Induction : Legal->getInductionVars()) { 7787 InductionDescriptor &IndDes = Induction.second; 7788 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7789 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7790 } 7791 } 7792 7793 void LoopVectorizationCostModel::collectInLoopReductions() { 7794 for (auto &Reduction : Legal->getReductionVars()) { 7795 PHINode *Phi = Reduction.first; 7796 RecurrenceDescriptor &RdxDesc = Reduction.second; 7797 7798 // We don't collect reductions that are type promoted (yet). 7799 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7800 continue; 7801 7802 // If the target would prefer this reduction to happen "in-loop", then we 7803 // want to record it as such. 7804 unsigned Opcode = RdxDesc.getOpcode(); 7805 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7806 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7807 TargetTransformInfo::ReductionFlags())) 7808 continue; 7809 7810 // Check that we can correctly put the reductions into the loop, by 7811 // finding the chain of operations that leads from the phi to the loop 7812 // exit value. 7813 SmallVector<Instruction *, 4> ReductionOperations = 7814 RdxDesc.getReductionOpChain(Phi, TheLoop); 7815 bool InLoop = !ReductionOperations.empty(); 7816 if (InLoop) { 7817 InLoopReductionChains[Phi] = ReductionOperations; 7818 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7819 Instruction *LastChain = Phi; 7820 for (auto *I : ReductionOperations) { 7821 InLoopReductionImmediateChains[I] = LastChain; 7822 LastChain = I; 7823 } 7824 } 7825 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7826 << " reduction for phi: " << *Phi << "\n"); 7827 } 7828 } 7829 7830 // TODO: we could return a pair of values that specify the max VF and 7831 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7832 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7833 // doesn't have a cost model that can choose which plan to execute if 7834 // more than one is generated. 7835 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7836 LoopVectorizationCostModel &CM) { 7837 unsigned WidestType; 7838 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7839 return WidestVectorRegBits / WidestType; 7840 } 7841 7842 VectorizationFactor 7843 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7844 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7845 ElementCount VF = UserVF; 7846 // Outer loop handling: They may require CFG and instruction level 7847 // transformations before even evaluating whether vectorization is profitable. 7848 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7849 // the vectorization pipeline. 7850 if (!OrigLoop->isInnermost()) { 7851 // If the user doesn't provide a vectorization factor, determine a 7852 // reasonable one. 7853 if (UserVF.isZero()) { 7854 VF = ElementCount::getFixed(determineVPlanVF( 7855 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7856 .getFixedSize(), 7857 CM)); 7858 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7859 7860 // Make sure we have a VF > 1 for stress testing. 7861 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7862 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7863 << "overriding computed VF.\n"); 7864 VF = ElementCount::getFixed(4); 7865 } 7866 } 7867 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7868 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7869 "VF needs to be a power of two"); 7870 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7871 << "VF " << VF << " to build VPlans.\n"); 7872 buildVPlans(VF, VF); 7873 7874 // For VPlan build stress testing, we bail out after VPlan construction. 7875 if (VPlanBuildStressTest) 7876 return VectorizationFactor::Disabled(); 7877 7878 return {VF, 0 /*Cost*/}; 7879 } 7880 7881 LLVM_DEBUG( 7882 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7883 "VPlan-native path.\n"); 7884 return VectorizationFactor::Disabled(); 7885 } 7886 7887 Optional<VectorizationFactor> 7888 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7889 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7890 Optional<ElementCount> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC); 7891 if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved. 7892 return None; 7893 7894 // Invalidate interleave groups if all blocks of loop will be predicated. 7895 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 7896 !useMaskedInterleavedAccesses(*TTI)) { 7897 LLVM_DEBUG( 7898 dbgs() 7899 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7900 "which requires masked-interleaved support.\n"); 7901 if (CM.InterleaveInfo.invalidateGroups()) 7902 // Invalidating interleave groups also requires invalidating all decisions 7903 // based on them, which includes widening decisions and uniform and scalar 7904 // values. 7905 CM.invalidateCostModelingDecisions(); 7906 } 7907 7908 ElementCount MaxVF = MaybeMaxVF.getValue(); 7909 assert(MaxVF.isNonZero() && "MaxVF is zero."); 7910 7911 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxVF); 7912 if (!UserVF.isZero() && 7913 (UserVFIsLegal || (UserVF.isScalable() && MaxVF.isScalable()))) { 7914 // FIXME: MaxVF is temporarily used inplace of UserVF for illegal scalable 7915 // VFs here, this should be reverted to only use legal UserVFs once the 7916 // loop below supports scalable VFs. 7917 ElementCount VF = UserVFIsLegal ? UserVF : MaxVF; 7918 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max") 7919 << " VF " << VF << ".\n"); 7920 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7921 "VF needs to be a power of two"); 7922 // Collect the instructions (and their associated costs) that will be more 7923 // profitable to scalarize. 7924 CM.selectUserVectorizationFactor(VF); 7925 CM.collectInLoopReductions(); 7926 buildVPlansWithVPRecipes(VF, VF); 7927 LLVM_DEBUG(printPlans(dbgs())); 7928 return {{VF, 0}}; 7929 } 7930 7931 assert(!MaxVF.isScalable() && 7932 "Scalable vectors not yet supported beyond this point"); 7933 7934 for (ElementCount VF = ElementCount::getFixed(1); 7935 ElementCount::isKnownLE(VF, MaxVF); VF *= 2) { 7936 // Collect Uniform and Scalar instructions after vectorization with VF. 7937 CM.collectUniformsAndScalars(VF); 7938 7939 // Collect the instructions (and their associated costs) that will be more 7940 // profitable to scalarize. 7941 if (VF.isVector()) 7942 CM.collectInstsToScalarize(VF); 7943 } 7944 7945 CM.collectInLoopReductions(); 7946 7947 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxVF); 7948 LLVM_DEBUG(printPlans(dbgs())); 7949 if (MaxVF.isScalar()) 7950 return VectorizationFactor::Disabled(); 7951 7952 // Select the optimal vectorization factor. 7953 auto SelectedVF = CM.selectVectorizationFactor(MaxVF); 7954 7955 // Check if it is profitable to vectorize with runtime checks. 7956 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 7957 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 7958 bool PragmaThresholdReached = 7959 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 7960 bool ThresholdReached = 7961 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 7962 if ((ThresholdReached && !Hints.allowReordering()) || 7963 PragmaThresholdReached) { 7964 ORE->emit([&]() { 7965 return OptimizationRemarkAnalysisAliasing( 7966 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 7967 OrigLoop->getHeader()) 7968 << "loop not vectorized: cannot prove it is safe to reorder " 7969 "memory operations"; 7970 }); 7971 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 7972 Hints.emitRemarkWithHints(); 7973 return VectorizationFactor::Disabled(); 7974 } 7975 } 7976 return SelectedVF; 7977 } 7978 7979 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) { 7980 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 7981 << '\n'); 7982 BestVF = VF; 7983 BestUF = UF; 7984 7985 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 7986 return !Plan->hasVF(VF); 7987 }); 7988 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 7989 } 7990 7991 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 7992 DominatorTree *DT) { 7993 // Perform the actual loop transformation. 7994 7995 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7996 assert(BestVF.hasValue() && "Vectorization Factor is missing"); 7997 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 7998 7999 VPTransformState State{ 8000 *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()}; 8001 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 8002 State.TripCount = ILV.getOrCreateTripCount(nullptr); 8003 State.CanonicalIV = ILV.Induction; 8004 8005 ILV.printDebugTracesAtStart(); 8006 8007 //===------------------------------------------------===// 8008 // 8009 // Notice: any optimization or new instruction that go 8010 // into the code below should also be implemented in 8011 // the cost-model. 8012 // 8013 //===------------------------------------------------===// 8014 8015 // 2. Copy and widen instructions from the old loop into the new loop. 8016 VPlans.front()->execute(&State); 8017 8018 // 3. Fix the vectorized code: take care of header phi's, live-outs, 8019 // predication, updating analyses. 8020 ILV.fixVectorizedLoop(State); 8021 8022 ILV.printDebugTracesAtEnd(); 8023 } 8024 8025 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 8026 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 8027 for (const auto &Plan : VPlans) 8028 if (PrintVPlansInDotFormat) 8029 Plan->printDOT(O); 8030 else 8031 Plan->print(O); 8032 } 8033 #endif 8034 8035 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 8036 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 8037 8038 // We create new control-flow for the vectorized loop, so the original exit 8039 // conditions will be dead after vectorization if it's only used by the 8040 // terminator 8041 SmallVector<BasicBlock*> ExitingBlocks; 8042 OrigLoop->getExitingBlocks(ExitingBlocks); 8043 for (auto *BB : ExitingBlocks) { 8044 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 8045 if (!Cmp || !Cmp->hasOneUse()) 8046 continue; 8047 8048 // TODO: we should introduce a getUniqueExitingBlocks on Loop 8049 if (!DeadInstructions.insert(Cmp).second) 8050 continue; 8051 8052 // The operands of the icmp is often a dead trunc, used by IndUpdate. 8053 // TODO: can recurse through operands in general 8054 for (Value *Op : Cmp->operands()) { 8055 if (isa<TruncInst>(Op) && Op->hasOneUse()) 8056 DeadInstructions.insert(cast<Instruction>(Op)); 8057 } 8058 } 8059 8060 // We create new "steps" for induction variable updates to which the original 8061 // induction variables map. An original update instruction will be dead if 8062 // all its users except the induction variable are dead. 8063 auto *Latch = OrigLoop->getLoopLatch(); 8064 for (auto &Induction : Legal->getInductionVars()) { 8065 PHINode *Ind = Induction.first; 8066 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 8067 8068 // If the tail is to be folded by masking, the primary induction variable, 8069 // if exists, isn't dead: it will be used for masking. Don't kill it. 8070 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 8071 continue; 8072 8073 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 8074 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 8075 })) 8076 DeadInstructions.insert(IndUpdate); 8077 8078 // We record as "Dead" also the type-casting instructions we had identified 8079 // during induction analysis. We don't need any handling for them in the 8080 // vectorized loop because we have proven that, under a proper runtime 8081 // test guarding the vectorized loop, the value of the phi, and the casted 8082 // value of the phi, are the same. The last instruction in this casting chain 8083 // will get its scalar/vector/widened def from the scalar/vector/widened def 8084 // of the respective phi node. Any other casts in the induction def-use chain 8085 // have no other uses outside the phi update chain, and will be ignored. 8086 InductionDescriptor &IndDes = Induction.second; 8087 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 8088 DeadInstructions.insert(Casts.begin(), Casts.end()); 8089 } 8090 } 8091 8092 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 8093 8094 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 8095 8096 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 8097 Instruction::BinaryOps BinOp) { 8098 // When unrolling and the VF is 1, we only need to add a simple scalar. 8099 Type *Ty = Val->getType(); 8100 assert(!Ty->isVectorTy() && "Val must be a scalar"); 8101 8102 if (Ty->isFloatingPointTy()) { 8103 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 8104 8105 // Floating-point operations inherit FMF via the builder's flags. 8106 Value *MulOp = Builder.CreateFMul(C, Step); 8107 return Builder.CreateBinOp(BinOp, Val, MulOp); 8108 } 8109 Constant *C = ConstantInt::get(Ty, StartIdx); 8110 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 8111 } 8112 8113 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 8114 SmallVector<Metadata *, 4> MDs; 8115 // Reserve first location for self reference to the LoopID metadata node. 8116 MDs.push_back(nullptr); 8117 bool IsUnrollMetadata = false; 8118 MDNode *LoopID = L->getLoopID(); 8119 if (LoopID) { 8120 // First find existing loop unrolling disable metadata. 8121 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 8122 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 8123 if (MD) { 8124 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 8125 IsUnrollMetadata = 8126 S && S->getString().startswith("llvm.loop.unroll.disable"); 8127 } 8128 MDs.push_back(LoopID->getOperand(i)); 8129 } 8130 } 8131 8132 if (!IsUnrollMetadata) { 8133 // Add runtime unroll disable metadata. 8134 LLVMContext &Context = L->getHeader()->getContext(); 8135 SmallVector<Metadata *, 1> DisableOperands; 8136 DisableOperands.push_back( 8137 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 8138 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 8139 MDs.push_back(DisableNode); 8140 MDNode *NewLoopID = MDNode::get(Context, MDs); 8141 // Set operand 0 to refer to the loop id itself. 8142 NewLoopID->replaceOperandWith(0, NewLoopID); 8143 L->setLoopID(NewLoopID); 8144 } 8145 } 8146 8147 //===--------------------------------------------------------------------===// 8148 // EpilogueVectorizerMainLoop 8149 //===--------------------------------------------------------------------===// 8150 8151 /// This function is partially responsible for generating the control flow 8152 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8153 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 8154 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8155 Loop *Lp = createVectorLoopSkeleton(""); 8156 8157 // Generate the code to check the minimum iteration count of the vector 8158 // epilogue (see below). 8159 EPI.EpilogueIterationCountCheck = 8160 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 8161 EPI.EpilogueIterationCountCheck->setName("iter.check"); 8162 8163 // Generate the code to check any assumptions that we've made for SCEV 8164 // expressions. 8165 EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); 8166 8167 // Generate the code that checks at runtime if arrays overlap. We put the 8168 // checks into a separate block to make the more common case of few elements 8169 // faster. 8170 EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 8171 8172 // Generate the iteration count check for the main loop, *after* the check 8173 // for the epilogue loop, so that the path-length is shorter for the case 8174 // that goes directly through the vector epilogue. The longer-path length for 8175 // the main loop is compensated for, by the gain from vectorizing the larger 8176 // trip count. Note: the branch will get updated later on when we vectorize 8177 // the epilogue. 8178 EPI.MainLoopIterationCountCheck = 8179 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 8180 8181 // Generate the induction variable. 8182 OldInduction = Legal->getPrimaryInduction(); 8183 Type *IdxTy = Legal->getWidestInductionType(); 8184 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8185 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8186 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8187 EPI.VectorTripCount = CountRoundDown; 8188 Induction = 8189 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8190 getDebugLocFromInstOrOperands(OldInduction)); 8191 8192 // Skip induction resume value creation here because they will be created in 8193 // the second pass. If we created them here, they wouldn't be used anyway, 8194 // because the vplan in the second pass still contains the inductions from the 8195 // original loop. 8196 8197 return completeLoopSkeleton(Lp, OrigLoopID); 8198 } 8199 8200 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 8201 LLVM_DEBUG({ 8202 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 8203 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 8204 << ", Main Loop UF:" << EPI.MainLoopUF 8205 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 8206 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8207 }); 8208 } 8209 8210 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 8211 DEBUG_WITH_TYPE(VerboseDebug, { 8212 dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; 8213 }); 8214 } 8215 8216 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 8217 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 8218 assert(L && "Expected valid Loop."); 8219 assert(Bypass && "Expected valid bypass basic block."); 8220 unsigned VFactor = 8221 ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue(); 8222 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 8223 Value *Count = getOrCreateTripCount(L); 8224 // Reuse existing vector loop preheader for TC checks. 8225 // Note that new preheader block is generated for vector loop. 8226 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 8227 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8228 8229 // Generate code to check if the loop's trip count is less than VF * UF of the 8230 // main vector loop. 8231 auto P = 8232 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8233 8234 Value *CheckMinIters = Builder.CreateICmp( 8235 P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor), 8236 "min.iters.check"); 8237 8238 if (!ForEpilogue) 8239 TCCheckBlock->setName("vector.main.loop.iter.check"); 8240 8241 // Create new preheader for vector loop. 8242 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8243 DT, LI, nullptr, "vector.ph"); 8244 8245 if (ForEpilogue) { 8246 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8247 DT->getNode(Bypass)->getIDom()) && 8248 "TC check is expected to dominate Bypass"); 8249 8250 // Update dominator for Bypass & LoopExit. 8251 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8252 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8253 8254 LoopBypassBlocks.push_back(TCCheckBlock); 8255 8256 // Save the trip count so we don't have to regenerate it in the 8257 // vec.epilog.iter.check. This is safe to do because the trip count 8258 // generated here dominates the vector epilog iter check. 8259 EPI.TripCount = Count; 8260 } 8261 8262 ReplaceInstWithInst( 8263 TCCheckBlock->getTerminator(), 8264 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8265 8266 return TCCheckBlock; 8267 } 8268 8269 //===--------------------------------------------------------------------===// 8270 // EpilogueVectorizerEpilogueLoop 8271 //===--------------------------------------------------------------------===// 8272 8273 /// This function is partially responsible for generating the control flow 8274 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8275 BasicBlock * 8276 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8277 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8278 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8279 8280 // Now, compare the remaining count and if there aren't enough iterations to 8281 // execute the vectorized epilogue skip to the scalar part. 8282 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8283 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8284 LoopVectorPreHeader = 8285 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8286 LI, nullptr, "vec.epilog.ph"); 8287 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8288 VecEpilogueIterationCountCheck); 8289 8290 // Adjust the control flow taking the state info from the main loop 8291 // vectorization into account. 8292 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8293 "expected this to be saved from the previous pass."); 8294 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8295 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8296 8297 DT->changeImmediateDominator(LoopVectorPreHeader, 8298 EPI.MainLoopIterationCountCheck); 8299 8300 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8301 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8302 8303 if (EPI.SCEVSafetyCheck) 8304 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8305 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8306 if (EPI.MemSafetyCheck) 8307 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8308 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8309 8310 DT->changeImmediateDominator( 8311 VecEpilogueIterationCountCheck, 8312 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8313 8314 DT->changeImmediateDominator(LoopScalarPreHeader, 8315 EPI.EpilogueIterationCountCheck); 8316 DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck); 8317 8318 // Keep track of bypass blocks, as they feed start values to the induction 8319 // phis in the scalar loop preheader. 8320 if (EPI.SCEVSafetyCheck) 8321 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8322 if (EPI.MemSafetyCheck) 8323 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8324 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8325 8326 // Generate a resume induction for the vector epilogue and put it in the 8327 // vector epilogue preheader 8328 Type *IdxTy = Legal->getWidestInductionType(); 8329 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8330 LoopVectorPreHeader->getFirstNonPHI()); 8331 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8332 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8333 EPI.MainLoopIterationCountCheck); 8334 8335 // Generate the induction variable. 8336 OldInduction = Legal->getPrimaryInduction(); 8337 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8338 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8339 Value *StartIdx = EPResumeVal; 8340 Induction = 8341 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8342 getDebugLocFromInstOrOperands(OldInduction)); 8343 8344 // Generate induction resume values. These variables save the new starting 8345 // indexes for the scalar loop. They are used to test if there are any tail 8346 // iterations left once the vector loop has completed. 8347 // Note that when the vectorized epilogue is skipped due to iteration count 8348 // check, then the resume value for the induction variable comes from 8349 // the trip count of the main vector loop, hence passing the AdditionalBypass 8350 // argument. 8351 createInductionResumeValues(Lp, CountRoundDown, 8352 {VecEpilogueIterationCountCheck, 8353 EPI.VectorTripCount} /* AdditionalBypass */); 8354 8355 AddRuntimeUnrollDisableMetaData(Lp); 8356 return completeLoopSkeleton(Lp, OrigLoopID); 8357 } 8358 8359 BasicBlock * 8360 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8361 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8362 8363 assert(EPI.TripCount && 8364 "Expected trip count to have been safed in the first pass."); 8365 assert( 8366 (!isa<Instruction>(EPI.TripCount) || 8367 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8368 "saved trip count does not dominate insertion point."); 8369 Value *TC = EPI.TripCount; 8370 IRBuilder<> Builder(Insert->getTerminator()); 8371 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8372 8373 // Generate code to check if the loop's trip count is less than VF * UF of the 8374 // vector epilogue loop. 8375 auto P = 8376 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8377 8378 Value *CheckMinIters = Builder.CreateICmp( 8379 P, Count, 8380 ConstantInt::get(Count->getType(), 8381 EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF), 8382 "min.epilog.iters.check"); 8383 8384 ReplaceInstWithInst( 8385 Insert->getTerminator(), 8386 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8387 8388 LoopBypassBlocks.push_back(Insert); 8389 return Insert; 8390 } 8391 8392 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8393 LLVM_DEBUG({ 8394 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8395 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 8396 << ", Main Loop UF:" << EPI.MainLoopUF 8397 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 8398 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8399 }); 8400 } 8401 8402 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8403 DEBUG_WITH_TYPE(VerboseDebug, { 8404 dbgs() << "final fn:\n" << *Induction->getFunction() << "\n"; 8405 }); 8406 } 8407 8408 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8409 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8410 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8411 bool PredicateAtRangeStart = Predicate(Range.Start); 8412 8413 for (ElementCount TmpVF = Range.Start * 2; 8414 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8415 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8416 Range.End = TmpVF; 8417 break; 8418 } 8419 8420 return PredicateAtRangeStart; 8421 } 8422 8423 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8424 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8425 /// of VF's starting at a given VF and extending it as much as possible. Each 8426 /// vectorization decision can potentially shorten this sub-range during 8427 /// buildVPlan(). 8428 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8429 ElementCount MaxVF) { 8430 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8431 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8432 VFRange SubRange = {VF, MaxVFPlusOne}; 8433 VPlans.push_back(buildVPlan(SubRange)); 8434 VF = SubRange.End; 8435 } 8436 } 8437 8438 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8439 VPlanPtr &Plan) { 8440 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8441 8442 // Look for cached value. 8443 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8444 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8445 if (ECEntryIt != EdgeMaskCache.end()) 8446 return ECEntryIt->second; 8447 8448 VPValue *SrcMask = createBlockInMask(Src, Plan); 8449 8450 // The terminator has to be a branch inst! 8451 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8452 assert(BI && "Unexpected terminator found"); 8453 8454 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8455 return EdgeMaskCache[Edge] = SrcMask; 8456 8457 // If source is an exiting block, we know the exit edge is dynamically dead 8458 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8459 // adding uses of an otherwise potentially dead instruction. 8460 if (OrigLoop->isLoopExiting(Src)) 8461 return EdgeMaskCache[Edge] = SrcMask; 8462 8463 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8464 assert(EdgeMask && "No Edge Mask found for condition"); 8465 8466 if (BI->getSuccessor(0) != Dst) 8467 EdgeMask = Builder.createNot(EdgeMask); 8468 8469 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8470 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8471 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8472 // The select version does not introduce new UB if SrcMask is false and 8473 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8474 VPValue *False = Plan->getOrAddVPValue( 8475 ConstantInt::getFalse(BI->getCondition()->getType())); 8476 EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False); 8477 } 8478 8479 return EdgeMaskCache[Edge] = EdgeMask; 8480 } 8481 8482 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8483 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8484 8485 // Look for cached value. 8486 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8487 if (BCEntryIt != BlockMaskCache.end()) 8488 return BCEntryIt->second; 8489 8490 // All-one mask is modelled as no-mask following the convention for masked 8491 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8492 VPValue *BlockMask = nullptr; 8493 8494 if (OrigLoop->getHeader() == BB) { 8495 if (!CM.blockNeedsPredication(BB)) 8496 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8497 8498 // Create the block in mask as the first non-phi instruction in the block. 8499 VPBuilder::InsertPointGuard Guard(Builder); 8500 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8501 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8502 8503 // Introduce the early-exit compare IV <= BTC to form header block mask. 8504 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8505 // Start by constructing the desired canonical IV. 8506 VPValue *IV = nullptr; 8507 if (Legal->getPrimaryInduction()) 8508 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8509 else { 8510 auto IVRecipe = new VPWidenCanonicalIVRecipe(); 8511 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8512 IV = IVRecipe->getVPSingleValue(); 8513 } 8514 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8515 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8516 8517 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8518 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8519 // as a second argument, we only pass the IV here and extract the 8520 // tripcount from the transform state where codegen of the VP instructions 8521 // happen. 8522 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8523 } else { 8524 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8525 } 8526 return BlockMaskCache[BB] = BlockMask; 8527 } 8528 8529 // This is the block mask. We OR all incoming edges. 8530 for (auto *Predecessor : predecessors(BB)) { 8531 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8532 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8533 return BlockMaskCache[BB] = EdgeMask; 8534 8535 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8536 BlockMask = EdgeMask; 8537 continue; 8538 } 8539 8540 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8541 } 8542 8543 return BlockMaskCache[BB] = BlockMask; 8544 } 8545 8546 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8547 ArrayRef<VPValue *> Operands, 8548 VFRange &Range, 8549 VPlanPtr &Plan) { 8550 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8551 "Must be called with either a load or store"); 8552 8553 auto willWiden = [&](ElementCount VF) -> bool { 8554 if (VF.isScalar()) 8555 return false; 8556 LoopVectorizationCostModel::InstWidening Decision = 8557 CM.getWideningDecision(I, VF); 8558 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8559 "CM decision should be taken at this point."); 8560 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8561 return true; 8562 if (CM.isScalarAfterVectorization(I, VF) || 8563 CM.isProfitableToScalarize(I, VF)) 8564 return false; 8565 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8566 }; 8567 8568 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8569 return nullptr; 8570 8571 VPValue *Mask = nullptr; 8572 if (Legal->isMaskRequired(I)) 8573 Mask = createBlockInMask(I->getParent(), Plan); 8574 8575 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8576 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask); 8577 8578 StoreInst *Store = cast<StoreInst>(I); 8579 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8580 Mask); 8581 } 8582 8583 VPWidenIntOrFpInductionRecipe * 8584 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, 8585 ArrayRef<VPValue *> Operands) const { 8586 // Check if this is an integer or fp induction. If so, build the recipe that 8587 // produces its scalar and vector values. 8588 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8589 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8590 II.getKind() == InductionDescriptor::IK_FpInduction) { 8591 assert(II.getStartValue() == 8592 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8593 const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); 8594 return new VPWidenIntOrFpInductionRecipe( 8595 Phi, Operands[0], Casts.empty() ? nullptr : Casts.front()); 8596 } 8597 8598 return nullptr; 8599 } 8600 8601 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8602 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8603 VPlan &Plan) const { 8604 // Optimize the special case where the source is a constant integer 8605 // induction variable. Notice that we can only optimize the 'trunc' case 8606 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8607 // (c) other casts depend on pointer size. 8608 8609 // Determine whether \p K is a truncation based on an induction variable that 8610 // can be optimized. 8611 auto isOptimizableIVTruncate = 8612 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8613 return [=](ElementCount VF) -> bool { 8614 return CM.isOptimizableIVTruncate(K, VF); 8615 }; 8616 }; 8617 8618 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8619 isOptimizableIVTruncate(I), Range)) { 8620 8621 InductionDescriptor II = 8622 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 8623 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8624 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8625 Start, nullptr, I); 8626 } 8627 return nullptr; 8628 } 8629 8630 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8631 ArrayRef<VPValue *> Operands, 8632 VPlanPtr &Plan) { 8633 // If all incoming values are equal, the incoming VPValue can be used directly 8634 // instead of creating a new VPBlendRecipe. 8635 VPValue *FirstIncoming = Operands[0]; 8636 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8637 return FirstIncoming == Inc; 8638 })) { 8639 return Operands[0]; 8640 } 8641 8642 // We know that all PHIs in non-header blocks are converted into selects, so 8643 // we don't have to worry about the insertion order and we can just use the 8644 // builder. At this point we generate the predication tree. There may be 8645 // duplications since this is a simple recursive scan, but future 8646 // optimizations will clean it up. 8647 SmallVector<VPValue *, 2> OperandsWithMask; 8648 unsigned NumIncoming = Phi->getNumIncomingValues(); 8649 8650 for (unsigned In = 0; In < NumIncoming; In++) { 8651 VPValue *EdgeMask = 8652 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8653 assert((EdgeMask || NumIncoming == 1) && 8654 "Multiple predecessors with one having a full mask"); 8655 OperandsWithMask.push_back(Operands[In]); 8656 if (EdgeMask) 8657 OperandsWithMask.push_back(EdgeMask); 8658 } 8659 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8660 } 8661 8662 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8663 ArrayRef<VPValue *> Operands, 8664 VFRange &Range) const { 8665 8666 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8667 [this, CI](ElementCount VF) { 8668 return CM.isScalarWithPredication(CI, VF); 8669 }, 8670 Range); 8671 8672 if (IsPredicated) 8673 return nullptr; 8674 8675 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8676 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8677 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8678 ID == Intrinsic::pseudoprobe || 8679 ID == Intrinsic::experimental_noalias_scope_decl)) 8680 return nullptr; 8681 8682 auto willWiden = [&](ElementCount VF) -> bool { 8683 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8684 // The following case may be scalarized depending on the VF. 8685 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8686 // version of the instruction. 8687 // Is it beneficial to perform intrinsic call compared to lib call? 8688 bool NeedToScalarize = false; 8689 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8690 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8691 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8692 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 8693 "Either the intrinsic cost or vector call cost must be valid"); 8694 return UseVectorIntrinsic || !NeedToScalarize; 8695 }; 8696 8697 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8698 return nullptr; 8699 8700 ArrayRef<VPValue *> Ops = Operands.take_front(CI->getNumArgOperands()); 8701 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8702 } 8703 8704 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8705 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8706 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8707 // Instruction should be widened, unless it is scalar after vectorization, 8708 // scalarization is profitable or it is predicated. 8709 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8710 return CM.isScalarAfterVectorization(I, VF) || 8711 CM.isProfitableToScalarize(I, VF) || 8712 CM.isScalarWithPredication(I, VF); 8713 }; 8714 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8715 Range); 8716 } 8717 8718 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8719 ArrayRef<VPValue *> Operands) const { 8720 auto IsVectorizableOpcode = [](unsigned Opcode) { 8721 switch (Opcode) { 8722 case Instruction::Add: 8723 case Instruction::And: 8724 case Instruction::AShr: 8725 case Instruction::BitCast: 8726 case Instruction::FAdd: 8727 case Instruction::FCmp: 8728 case Instruction::FDiv: 8729 case Instruction::FMul: 8730 case Instruction::FNeg: 8731 case Instruction::FPExt: 8732 case Instruction::FPToSI: 8733 case Instruction::FPToUI: 8734 case Instruction::FPTrunc: 8735 case Instruction::FRem: 8736 case Instruction::FSub: 8737 case Instruction::ICmp: 8738 case Instruction::IntToPtr: 8739 case Instruction::LShr: 8740 case Instruction::Mul: 8741 case Instruction::Or: 8742 case Instruction::PtrToInt: 8743 case Instruction::SDiv: 8744 case Instruction::Select: 8745 case Instruction::SExt: 8746 case Instruction::Shl: 8747 case Instruction::SIToFP: 8748 case Instruction::SRem: 8749 case Instruction::Sub: 8750 case Instruction::Trunc: 8751 case Instruction::UDiv: 8752 case Instruction::UIToFP: 8753 case Instruction::URem: 8754 case Instruction::Xor: 8755 case Instruction::ZExt: 8756 return true; 8757 } 8758 return false; 8759 }; 8760 8761 if (!IsVectorizableOpcode(I->getOpcode())) 8762 return nullptr; 8763 8764 // Success: widen this instruction. 8765 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8766 } 8767 8768 void VPRecipeBuilder::fixHeaderPhis() { 8769 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8770 for (VPWidenPHIRecipe *R : PhisToFix) { 8771 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8772 VPRecipeBase *IncR = 8773 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8774 R->addOperand(IncR->getVPSingleValue()); 8775 } 8776 } 8777 8778 VPBasicBlock *VPRecipeBuilder::handleReplication( 8779 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8780 VPlanPtr &Plan) { 8781 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8782 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8783 Range); 8784 8785 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8786 [&](ElementCount VF) { return CM.isPredicatedInst(I, VF); }, Range); 8787 8788 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8789 IsUniform, IsPredicated); 8790 setRecipe(I, Recipe); 8791 Plan->addVPValue(I, Recipe); 8792 8793 // Find if I uses a predicated instruction. If so, it will use its scalar 8794 // value. Avoid hoisting the insert-element which packs the scalar value into 8795 // a vector value, as that happens iff all users use the vector value. 8796 for (VPValue *Op : Recipe->operands()) { 8797 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8798 if (!PredR) 8799 continue; 8800 auto *RepR = 8801 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8802 assert(RepR->isPredicated() && 8803 "expected Replicate recipe to be predicated"); 8804 RepR->setAlsoPack(false); 8805 } 8806 8807 // Finalize the recipe for Instr, first if it is not predicated. 8808 if (!IsPredicated) { 8809 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8810 VPBB->appendRecipe(Recipe); 8811 return VPBB; 8812 } 8813 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8814 assert(VPBB->getSuccessors().empty() && 8815 "VPBB has successors when handling predicated replication."); 8816 // Record predicated instructions for above packing optimizations. 8817 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8818 VPBlockUtils::insertBlockAfter(Region, VPBB); 8819 auto *RegSucc = new VPBasicBlock(); 8820 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8821 return RegSucc; 8822 } 8823 8824 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8825 VPRecipeBase *PredRecipe, 8826 VPlanPtr &Plan) { 8827 // Instructions marked for predication are replicated and placed under an 8828 // if-then construct to prevent side-effects. 8829 8830 // Generate recipes to compute the block mask for this region. 8831 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8832 8833 // Build the triangular if-then region. 8834 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8835 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8836 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8837 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8838 auto *PHIRecipe = Instr->getType()->isVoidTy() 8839 ? nullptr 8840 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8841 if (PHIRecipe) { 8842 Plan->removeVPValueFor(Instr); 8843 Plan->addVPValue(Instr, PHIRecipe); 8844 } 8845 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8846 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8847 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8848 8849 // Note: first set Entry as region entry and then connect successors starting 8850 // from it in order, to propagate the "parent" of each VPBasicBlock. 8851 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8852 VPBlockUtils::connectBlocks(Pred, Exit); 8853 8854 return Region; 8855 } 8856 8857 VPRecipeOrVPValueTy 8858 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8859 ArrayRef<VPValue *> Operands, 8860 VFRange &Range, VPlanPtr &Plan) { 8861 // First, check for specific widening recipes that deal with calls, memory 8862 // operations, inductions and Phi nodes. 8863 if (auto *CI = dyn_cast<CallInst>(Instr)) 8864 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8865 8866 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8867 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8868 8869 VPRecipeBase *Recipe; 8870 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8871 if (Phi->getParent() != OrigLoop->getHeader()) 8872 return tryToBlend(Phi, Operands, Plan); 8873 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands))) 8874 return toVPRecipeResult(Recipe); 8875 8876 if (Legal->isReductionVariable(Phi)) { 8877 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 8878 assert(RdxDesc.getRecurrenceStartValue() == 8879 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8880 VPValue *StartV = Operands[0]; 8881 8882 // Record the PHI and the incoming value from the backedge, so we can add 8883 // the incoming value from the backedge after all recipes have been 8884 // created. 8885 auto *PhiRecipe = new VPWidenPHIRecipe(Phi, RdxDesc, *StartV); 8886 PhisToFix.push_back(PhiRecipe); 8887 recordRecipeOf(cast<Instruction>( 8888 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8889 return toVPRecipeResult(PhiRecipe); 8890 } 8891 8892 return toVPRecipeResult(new VPWidenPHIRecipe(Phi)); 8893 } 8894 8895 if (isa<TruncInst>(Instr) && 8896 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8897 Range, *Plan))) 8898 return toVPRecipeResult(Recipe); 8899 8900 if (!shouldWiden(Instr, Range)) 8901 return nullptr; 8902 8903 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8904 return toVPRecipeResult(new VPWidenGEPRecipe( 8905 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8906 8907 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8908 bool InvariantCond = 8909 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8910 return toVPRecipeResult(new VPWidenSelectRecipe( 8911 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8912 } 8913 8914 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8915 } 8916 8917 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8918 ElementCount MaxVF) { 8919 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8920 8921 // Collect instructions from the original loop that will become trivially dead 8922 // in the vectorized loop. We don't need to vectorize these instructions. For 8923 // example, original induction update instructions can become dead because we 8924 // separately emit induction "steps" when generating code for the new loop. 8925 // Similarly, we create a new latch condition when setting up the structure 8926 // of the new loop, so the old one can become dead. 8927 SmallPtrSet<Instruction *, 4> DeadInstructions; 8928 collectTriviallyDeadInstructions(DeadInstructions); 8929 8930 // Add assume instructions we need to drop to DeadInstructions, to prevent 8931 // them from being added to the VPlan. 8932 // TODO: We only need to drop assumes in blocks that get flattend. If the 8933 // control flow is preserved, we should keep them. 8934 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8935 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8936 8937 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8938 // Dead instructions do not need sinking. Remove them from SinkAfter. 8939 for (Instruction *I : DeadInstructions) 8940 SinkAfter.erase(I); 8941 8942 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8943 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8944 VFRange SubRange = {VF, MaxVFPlusOne}; 8945 VPlans.push_back( 8946 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8947 VF = SubRange.End; 8948 } 8949 } 8950 8951 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8952 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8953 const DenseMap<Instruction *, Instruction *> &SinkAfter) { 8954 8955 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8956 8957 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8958 8959 // --------------------------------------------------------------------------- 8960 // Pre-construction: record ingredients whose recipes we'll need to further 8961 // process after constructing the initial VPlan. 8962 // --------------------------------------------------------------------------- 8963 8964 // Mark instructions we'll need to sink later and their targets as 8965 // ingredients whose recipe we'll need to record. 8966 for (auto &Entry : SinkAfter) { 8967 RecipeBuilder.recordRecipeOf(Entry.first); 8968 RecipeBuilder.recordRecipeOf(Entry.second); 8969 } 8970 for (auto &Reduction : CM.getInLoopReductionChains()) { 8971 PHINode *Phi = Reduction.first; 8972 RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); 8973 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8974 8975 RecipeBuilder.recordRecipeOf(Phi); 8976 for (auto &R : ReductionOperations) { 8977 RecipeBuilder.recordRecipeOf(R); 8978 // For min/max reducitons, where we have a pair of icmp/select, we also 8979 // need to record the ICmp recipe, so it can be removed later. 8980 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8981 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8982 } 8983 } 8984 8985 // For each interleave group which is relevant for this (possibly trimmed) 8986 // Range, add it to the set of groups to be later applied to the VPlan and add 8987 // placeholders for its members' Recipes which we'll be replacing with a 8988 // single VPInterleaveRecipe. 8989 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8990 auto applyIG = [IG, this](ElementCount VF) -> bool { 8991 return (VF.isVector() && // Query is illegal for VF == 1 8992 CM.getWideningDecision(IG->getInsertPos(), VF) == 8993 LoopVectorizationCostModel::CM_Interleave); 8994 }; 8995 if (!getDecisionAndClampRange(applyIG, Range)) 8996 continue; 8997 InterleaveGroups.insert(IG); 8998 for (unsigned i = 0; i < IG->getFactor(); i++) 8999 if (Instruction *Member = IG->getMember(i)) 9000 RecipeBuilder.recordRecipeOf(Member); 9001 }; 9002 9003 // --------------------------------------------------------------------------- 9004 // Build initial VPlan: Scan the body of the loop in a topological order to 9005 // visit each basic block after having visited its predecessor basic blocks. 9006 // --------------------------------------------------------------------------- 9007 9008 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 9009 auto Plan = std::make_unique<VPlan>(); 9010 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 9011 Plan->setEntry(VPBB); 9012 9013 // Scan the body of the loop in a topological order to visit each basic block 9014 // after having visited its predecessor basic blocks. 9015 LoopBlocksDFS DFS(OrigLoop); 9016 DFS.perform(LI); 9017 9018 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 9019 // Relevant instructions from basic block BB will be grouped into VPRecipe 9020 // ingredients and fill a new VPBasicBlock. 9021 unsigned VPBBsForBB = 0; 9022 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 9023 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 9024 VPBB = FirstVPBBForBB; 9025 Builder.setInsertPoint(VPBB); 9026 9027 // Introduce each ingredient into VPlan. 9028 // TODO: Model and preserve debug instrinsics in VPlan. 9029 for (Instruction &I : BB->instructionsWithoutDebug()) { 9030 Instruction *Instr = &I; 9031 9032 // First filter out irrelevant instructions, to ensure no recipes are 9033 // built for them. 9034 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 9035 continue; 9036 9037 SmallVector<VPValue *, 4> Operands; 9038 auto *Phi = dyn_cast<PHINode>(Instr); 9039 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 9040 Operands.push_back(Plan->getOrAddVPValue( 9041 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 9042 } else { 9043 auto OpRange = Plan->mapToVPValues(Instr->operands()); 9044 Operands = {OpRange.begin(), OpRange.end()}; 9045 } 9046 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 9047 Instr, Operands, Range, Plan)) { 9048 // If Instr can be simplified to an existing VPValue, use it. 9049 if (RecipeOrValue.is<VPValue *>()) { 9050 Plan->addVPValue(Instr, RecipeOrValue.get<VPValue *>()); 9051 continue; 9052 } 9053 // Otherwise, add the new recipe. 9054 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 9055 for (auto *Def : Recipe->definedValues()) { 9056 auto *UV = Def->getUnderlyingValue(); 9057 Plan->addVPValue(UV, Def); 9058 } 9059 9060 RecipeBuilder.setRecipe(Instr, Recipe); 9061 VPBB->appendRecipe(Recipe); 9062 continue; 9063 } 9064 9065 // Otherwise, if all widening options failed, Instruction is to be 9066 // replicated. This may create a successor for VPBB. 9067 VPBasicBlock *NextVPBB = 9068 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 9069 if (NextVPBB != VPBB) { 9070 VPBB = NextVPBB; 9071 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 9072 : ""); 9073 } 9074 } 9075 } 9076 9077 RecipeBuilder.fixHeaderPhis(); 9078 9079 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 9080 // may also be empty, such as the last one VPBB, reflecting original 9081 // basic-blocks with no recipes. 9082 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 9083 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 9084 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 9085 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 9086 delete PreEntry; 9087 9088 // --------------------------------------------------------------------------- 9089 // Transform initial VPlan: Apply previously taken decisions, in order, to 9090 // bring the VPlan to its final state. 9091 // --------------------------------------------------------------------------- 9092 9093 // Apply Sink-After legal constraints. 9094 for (auto &Entry : SinkAfter) { 9095 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 9096 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 9097 9098 // If the target is in a replication region, make sure to move Sink to the 9099 // block after it, not into the replication region itself. 9100 if (auto *Region = 9101 dyn_cast_or_null<VPRegionBlock>(Target->getParent()->getParent())) { 9102 if (Region->isReplicator()) { 9103 assert(Region->getNumSuccessors() == 1 && "Expected SESE region!"); 9104 VPBasicBlock *NextBlock = 9105 cast<VPBasicBlock>(Region->getSuccessors().front()); 9106 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 9107 continue; 9108 } 9109 } 9110 9111 auto *SinkRegion = 9112 dyn_cast_or_null<VPRegionBlock>(Sink->getParent()->getParent()); 9113 // Unless the sink source is in a replicate region, sink the recipe 9114 // directly. 9115 if (!SinkRegion || !SinkRegion->isReplicator()) { 9116 Sink->moveAfter(Target); 9117 continue; 9118 } 9119 9120 // If the sink source is in a replicate region, we need to move the whole 9121 // replicate region, which should only contain a single recipe in the main 9122 // block. 9123 assert(Sink->getParent()->size() == 1 && 9124 "parent must be a replicator with a single recipe"); 9125 auto *SplitBlock = 9126 Target->getParent()->splitAt(std::next(Target->getIterator())); 9127 9128 auto *Pred = SinkRegion->getSinglePredecessor(); 9129 auto *Succ = SinkRegion->getSingleSuccessor(); 9130 VPBlockUtils::disconnectBlocks(Pred, SinkRegion); 9131 VPBlockUtils::disconnectBlocks(SinkRegion, Succ); 9132 VPBlockUtils::connectBlocks(Pred, Succ); 9133 9134 auto *SplitPred = SplitBlock->getSinglePredecessor(); 9135 9136 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 9137 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 9138 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 9139 if (VPBB == SplitPred) 9140 VPBB = SplitBlock; 9141 } 9142 9143 // Interleave memory: for each Interleave Group we marked earlier as relevant 9144 // for this VPlan, replace the Recipes widening its memory instructions with a 9145 // single VPInterleaveRecipe at its insertion point. 9146 for (auto IG : InterleaveGroups) { 9147 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9148 RecipeBuilder.getRecipe(IG->getInsertPos())); 9149 SmallVector<VPValue *, 4> StoredValues; 9150 for (unsigned i = 0; i < IG->getFactor(); ++i) 9151 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) 9152 StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0))); 9153 9154 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9155 Recipe->getMask()); 9156 VPIG->insertBefore(Recipe); 9157 unsigned J = 0; 9158 for (unsigned i = 0; i < IG->getFactor(); ++i) 9159 if (Instruction *Member = IG->getMember(i)) { 9160 if (!Member->getType()->isVoidTy()) { 9161 VPValue *OriginalV = Plan->getVPValue(Member); 9162 Plan->removeVPValueFor(Member); 9163 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9164 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9165 J++; 9166 } 9167 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9168 } 9169 } 9170 9171 // Adjust the recipes for any inloop reductions. 9172 if (Range.Start.isVector()) 9173 adjustRecipesForInLoopReductions(Plan, RecipeBuilder); 9174 9175 // Finally, if tail is folded by masking, introduce selects between the phi 9176 // and the live-out instruction of each reduction, at the end of the latch. 9177 if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) { 9178 Builder.setInsertPoint(VPBB); 9179 auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9180 for (auto &Reduction : Legal->getReductionVars()) { 9181 if (CM.isInLoopReduction(Reduction.first)) 9182 continue; 9183 VPValue *Phi = Plan->getOrAddVPValue(Reduction.first); 9184 VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr()); 9185 Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi}); 9186 } 9187 } 9188 9189 std::string PlanName; 9190 raw_string_ostream RSO(PlanName); 9191 ElementCount VF = Range.Start; 9192 Plan->addVF(VF); 9193 RSO << "Initial VPlan for VF={" << VF; 9194 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9195 Plan->addVF(VF); 9196 RSO << "," << VF; 9197 } 9198 RSO << "},UF>=1"; 9199 RSO.flush(); 9200 Plan->setName(PlanName); 9201 9202 return Plan; 9203 } 9204 9205 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9206 // Outer loop handling: They may require CFG and instruction level 9207 // transformations before even evaluating whether vectorization is profitable. 9208 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9209 // the vectorization pipeline. 9210 assert(!OrigLoop->isInnermost()); 9211 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9212 9213 // Create new empty VPlan 9214 auto Plan = std::make_unique<VPlan>(); 9215 9216 // Build hierarchical CFG 9217 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9218 HCFGBuilder.buildHierarchicalCFG(); 9219 9220 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9221 VF *= 2) 9222 Plan->addVF(VF); 9223 9224 if (EnableVPlanPredication) { 9225 VPlanPredicator VPP(*Plan); 9226 VPP.predicate(); 9227 9228 // Avoid running transformation to recipes until masked code generation in 9229 // VPlan-native path is in place. 9230 return Plan; 9231 } 9232 9233 SmallPtrSet<Instruction *, 1> DeadInstructions; 9234 VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan, 9235 Legal->getInductionVars(), 9236 DeadInstructions, *PSE.getSE()); 9237 return Plan; 9238 } 9239 9240 // Adjust the recipes for any inloop reductions. The chain of instructions 9241 // leading from the loop exit instr to the phi need to be converted to 9242 // reductions, with one operand being vector and the other being the scalar 9243 // reduction chain. 9244 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions( 9245 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) { 9246 for (auto &Reduction : CM.getInLoopReductionChains()) { 9247 PHINode *Phi = Reduction.first; 9248 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 9249 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9250 9251 // ReductionOperations are orders top-down from the phi's use to the 9252 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9253 // which of the two operands will remain scalar and which will be reduced. 9254 // For minmax the chain will be the select instructions. 9255 Instruction *Chain = Phi; 9256 for (Instruction *R : ReductionOperations) { 9257 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9258 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9259 9260 VPValue *ChainOp = Plan->getVPValue(Chain); 9261 unsigned FirstOpId; 9262 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9263 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9264 "Expected to replace a VPWidenSelectSC"); 9265 FirstOpId = 1; 9266 } else { 9267 assert(isa<VPWidenRecipe>(WidenRecipe) && 9268 "Expected to replace a VPWidenSC"); 9269 FirstOpId = 0; 9270 } 9271 unsigned VecOpId = 9272 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9273 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9274 9275 auto *CondOp = CM.foldTailByMasking() 9276 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9277 : nullptr; 9278 VPReductionRecipe *RedRecipe = new VPReductionRecipe( 9279 &RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9280 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9281 Plan->removeVPValueFor(R); 9282 Plan->addVPValue(R, RedRecipe); 9283 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9284 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9285 WidenRecipe->eraseFromParent(); 9286 9287 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9288 VPRecipeBase *CompareRecipe = 9289 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9290 assert(isa<VPWidenRecipe>(CompareRecipe) && 9291 "Expected to replace a VPWidenSC"); 9292 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9293 "Expected no remaining users"); 9294 CompareRecipe->eraseFromParent(); 9295 } 9296 Chain = R; 9297 } 9298 } 9299 } 9300 9301 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9302 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9303 VPSlotTracker &SlotTracker) const { 9304 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9305 IG->getInsertPos()->printAsOperand(O, false); 9306 O << ", "; 9307 getAddr()->printAsOperand(O, SlotTracker); 9308 VPValue *Mask = getMask(); 9309 if (Mask) { 9310 O << ", "; 9311 Mask->printAsOperand(O, SlotTracker); 9312 } 9313 for (unsigned i = 0; i < IG->getFactor(); ++i) 9314 if (Instruction *I = IG->getMember(i)) 9315 O << "\n" << Indent << " " << VPlanIngredient(I) << " " << i; 9316 } 9317 #endif 9318 9319 void VPWidenCallRecipe::execute(VPTransformState &State) { 9320 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9321 *this, State); 9322 } 9323 9324 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9325 State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), 9326 this, *this, InvariantCond, State); 9327 } 9328 9329 void VPWidenRecipe::execute(VPTransformState &State) { 9330 State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State); 9331 } 9332 9333 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9334 State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, 9335 *this, State.UF, State.VF, IsPtrLoopInvariant, 9336 IsIndexLoopInvariant, State); 9337 } 9338 9339 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9340 assert(!State.Instance && "Int or FP induction being replicated."); 9341 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 9342 getTruncInst(), getVPValue(0), 9343 getCastValue(), State); 9344 } 9345 9346 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9347 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), RdxDesc, 9348 this, State); 9349 } 9350 9351 void VPBlendRecipe::execute(VPTransformState &State) { 9352 State.ILV->setDebugLocFromInst(State.Builder, Phi); 9353 // We know that all PHIs in non-header blocks are converted into 9354 // selects, so we don't have to worry about the insertion order and we 9355 // can just use the builder. 9356 // At this point we generate the predication tree. There may be 9357 // duplications since this is a simple recursive scan, but future 9358 // optimizations will clean it up. 9359 9360 unsigned NumIncoming = getNumIncomingValues(); 9361 9362 // Generate a sequence of selects of the form: 9363 // SELECT(Mask3, In3, 9364 // SELECT(Mask2, In2, 9365 // SELECT(Mask1, In1, 9366 // In0))) 9367 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9368 // are essentially undef are taken from In0. 9369 InnerLoopVectorizer::VectorParts Entry(State.UF); 9370 for (unsigned In = 0; In < NumIncoming; ++In) { 9371 for (unsigned Part = 0; Part < State.UF; ++Part) { 9372 // We might have single edge PHIs (blocks) - use an identity 9373 // 'select' for the first PHI operand. 9374 Value *In0 = State.get(getIncomingValue(In), Part); 9375 if (In == 0) 9376 Entry[Part] = In0; // Initialize with the first incoming value. 9377 else { 9378 // Select between the current value and the previous incoming edge 9379 // based on the incoming mask. 9380 Value *Cond = State.get(getMask(In), Part); 9381 Entry[Part] = 9382 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9383 } 9384 } 9385 } 9386 for (unsigned Part = 0; Part < State.UF; ++Part) 9387 State.set(this, Entry[Part], Part); 9388 } 9389 9390 void VPInterleaveRecipe::execute(VPTransformState &State) { 9391 assert(!State.Instance && "Interleave group being replicated."); 9392 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9393 getStoredValues(), getMask()); 9394 } 9395 9396 void VPReductionRecipe::execute(VPTransformState &State) { 9397 assert(!State.Instance && "Reduction being replicated."); 9398 Value *PrevInChain = State.get(getChainOp(), 0); 9399 for (unsigned Part = 0; Part < State.UF; ++Part) { 9400 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9401 bool IsOrdered = useOrderedReductions(*RdxDesc); 9402 Value *NewVecOp = State.get(getVecOp(), Part); 9403 if (VPValue *Cond = getCondOp()) { 9404 Value *NewCond = State.get(Cond, Part); 9405 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9406 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 9407 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9408 Constant *IdenVec = 9409 ConstantVector::getSplat(VecTy->getElementCount(), Iden); 9410 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9411 NewVecOp = Select; 9412 } 9413 Value *NewRed; 9414 Value *NextInChain; 9415 if (IsOrdered) { 9416 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9417 PrevInChain); 9418 PrevInChain = NewRed; 9419 } else { 9420 PrevInChain = State.get(getChainOp(), Part); 9421 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9422 } 9423 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9424 NextInChain = 9425 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9426 NewRed, PrevInChain); 9427 } else if (IsOrdered) 9428 NextInChain = NewRed; 9429 else { 9430 NextInChain = State.Builder.CreateBinOp( 9431 (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed, 9432 PrevInChain); 9433 } 9434 State.set(this, NextInChain, Part); 9435 } 9436 } 9437 9438 void VPReplicateRecipe::execute(VPTransformState &State) { 9439 if (State.Instance) { // Generate a single instance. 9440 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9441 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9442 *State.Instance, IsPredicated, State); 9443 // Insert scalar instance packing it into a vector. 9444 if (AlsoPack && State.VF.isVector()) { 9445 // If we're constructing lane 0, initialize to start from poison. 9446 if (State.Instance->Lane.isFirstLane()) { 9447 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9448 Value *Poison = PoisonValue::get( 9449 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9450 State.set(this, Poison, State.Instance->Part); 9451 } 9452 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9453 } 9454 return; 9455 } 9456 9457 // Generate scalar instances for all VF lanes of all UF parts, unless the 9458 // instruction is uniform inwhich case generate only the first lane for each 9459 // of the UF parts. 9460 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9461 assert((!State.VF.isScalable() || IsUniform) && 9462 "Can't scalarize a scalable vector"); 9463 for (unsigned Part = 0; Part < State.UF; ++Part) 9464 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9465 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9466 VPIteration(Part, Lane), IsPredicated, 9467 State); 9468 } 9469 9470 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9471 assert(State.Instance && "Branch on Mask works only on single instance."); 9472 9473 unsigned Part = State.Instance->Part; 9474 unsigned Lane = State.Instance->Lane.getKnownLane(); 9475 9476 Value *ConditionBit = nullptr; 9477 VPValue *BlockInMask = getMask(); 9478 if (BlockInMask) { 9479 ConditionBit = State.get(BlockInMask, Part); 9480 if (ConditionBit->getType()->isVectorTy()) 9481 ConditionBit = State.Builder.CreateExtractElement( 9482 ConditionBit, State.Builder.getInt32(Lane)); 9483 } else // Block in mask is all-one. 9484 ConditionBit = State.Builder.getTrue(); 9485 9486 // Replace the temporary unreachable terminator with a new conditional branch, 9487 // whose two destinations will be set later when they are created. 9488 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9489 assert(isa<UnreachableInst>(CurrentTerminator) && 9490 "Expected to replace unreachable terminator with conditional branch."); 9491 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9492 CondBr->setSuccessor(0, nullptr); 9493 ReplaceInstWithInst(CurrentTerminator, CondBr); 9494 } 9495 9496 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9497 assert(State.Instance && "Predicated instruction PHI works per instance."); 9498 Instruction *ScalarPredInst = 9499 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9500 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9501 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9502 assert(PredicatingBB && "Predicated block has no single predecessor."); 9503 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9504 "operand must be VPReplicateRecipe"); 9505 9506 // By current pack/unpack logic we need to generate only a single phi node: if 9507 // a vector value for the predicated instruction exists at this point it means 9508 // the instruction has vector users only, and a phi for the vector value is 9509 // needed. In this case the recipe of the predicated instruction is marked to 9510 // also do that packing, thereby "hoisting" the insert-element sequence. 9511 // Otherwise, a phi node for the scalar value is needed. 9512 unsigned Part = State.Instance->Part; 9513 if (State.hasVectorValue(getOperand(0), Part)) { 9514 Value *VectorValue = State.get(getOperand(0), Part); 9515 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9516 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9517 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9518 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9519 if (State.hasVectorValue(this, Part)) 9520 State.reset(this, VPhi, Part); 9521 else 9522 State.set(this, VPhi, Part); 9523 // NOTE: Currently we need to update the value of the operand, so the next 9524 // predicated iteration inserts its generated value in the correct vector. 9525 State.reset(getOperand(0), VPhi, Part); 9526 } else { 9527 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9528 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9529 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9530 PredicatingBB); 9531 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9532 if (State.hasScalarValue(this, *State.Instance)) 9533 State.reset(this, Phi, *State.Instance); 9534 else 9535 State.set(this, Phi, *State.Instance); 9536 // NOTE: Currently we need to update the value of the operand, so the next 9537 // predicated iteration inserts its generated value in the correct vector. 9538 State.reset(getOperand(0), Phi, *State.Instance); 9539 } 9540 } 9541 9542 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9543 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9544 State.ILV->vectorizeMemoryInstruction( 9545 &Ingredient, State, StoredValue ? nullptr : getVPSingleValue(), getAddr(), 9546 StoredValue, getMask()); 9547 } 9548 9549 // Determine how to lower the scalar epilogue, which depends on 1) optimising 9550 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 9551 // predication, and 4) a TTI hook that analyses whether the loop is suitable 9552 // for predication. 9553 static ScalarEpilogueLowering getScalarEpilogueLowering( 9554 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 9555 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 9556 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 9557 LoopVectorizationLegality &LVL) { 9558 // 1) OptSize takes precedence over all other options, i.e. if this is set, 9559 // don't look at hints or options, and don't request a scalar epilogue. 9560 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 9561 // LoopAccessInfo (due to code dependency and not being able to reliably get 9562 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 9563 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 9564 // versioning when the vectorization is forced, unlike hasOptSize. So revert 9565 // back to the old way and vectorize with versioning when forced. See D81345.) 9566 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 9567 PGSOQueryType::IRPass) && 9568 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 9569 return CM_ScalarEpilogueNotAllowedOptSize; 9570 9571 // 2) If set, obey the directives 9572 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 9573 switch (PreferPredicateOverEpilogue) { 9574 case PreferPredicateTy::ScalarEpilogue: 9575 return CM_ScalarEpilogueAllowed; 9576 case PreferPredicateTy::PredicateElseScalarEpilogue: 9577 return CM_ScalarEpilogueNotNeededUsePredicate; 9578 case PreferPredicateTy::PredicateOrDontVectorize: 9579 return CM_ScalarEpilogueNotAllowedUsePredicate; 9580 }; 9581 } 9582 9583 // 3) If set, obey the hints 9584 switch (Hints.getPredicate()) { 9585 case LoopVectorizeHints::FK_Enabled: 9586 return CM_ScalarEpilogueNotNeededUsePredicate; 9587 case LoopVectorizeHints::FK_Disabled: 9588 return CM_ScalarEpilogueAllowed; 9589 }; 9590 9591 // 4) if the TTI hook indicates this is profitable, request predication. 9592 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 9593 LVL.getLAI())) 9594 return CM_ScalarEpilogueNotNeededUsePredicate; 9595 9596 return CM_ScalarEpilogueAllowed; 9597 } 9598 9599 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 9600 // If Values have been set for this Def return the one relevant for \p Part. 9601 if (hasVectorValue(Def, Part)) 9602 return Data.PerPartOutput[Def][Part]; 9603 9604 if (!hasScalarValue(Def, {Part, 0})) { 9605 Value *IRV = Def->getLiveInIRValue(); 9606 Value *B = ILV->getBroadcastInstrs(IRV); 9607 set(Def, B, Part); 9608 return B; 9609 } 9610 9611 Value *ScalarValue = get(Def, {Part, 0}); 9612 // If we aren't vectorizing, we can just copy the scalar map values over 9613 // to the vector map. 9614 if (VF.isScalar()) { 9615 set(Def, ScalarValue, Part); 9616 return ScalarValue; 9617 } 9618 9619 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 9620 bool IsUniform = RepR && RepR->isUniform(); 9621 9622 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 9623 // Check if there is a scalar value for the selected lane. 9624 if (!hasScalarValue(Def, {Part, LastLane})) { 9625 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 9626 assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && 9627 "unexpected recipe found to be invariant"); 9628 IsUniform = true; 9629 LastLane = 0; 9630 } 9631 9632 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 9633 9634 // Set the insert point after the last scalarized instruction. This 9635 // ensures the insertelement sequence will directly follow the scalar 9636 // definitions. 9637 auto OldIP = Builder.saveIP(); 9638 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 9639 Builder.SetInsertPoint(&*NewIP); 9640 9641 // However, if we are vectorizing, we need to construct the vector values. 9642 // If the value is known to be uniform after vectorization, we can just 9643 // broadcast the scalar value corresponding to lane zero for each unroll 9644 // iteration. Otherwise, we construct the vector values using 9645 // insertelement instructions. Since the resulting vectors are stored in 9646 // State, we will only generate the insertelements once. 9647 Value *VectorValue = nullptr; 9648 if (IsUniform) { 9649 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 9650 set(Def, VectorValue, Part); 9651 } else { 9652 // Initialize packing with insertelements to start from undef. 9653 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 9654 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 9655 set(Def, Undef, Part); 9656 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 9657 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 9658 VectorValue = get(Def, Part); 9659 } 9660 Builder.restoreIP(OldIP); 9661 return VectorValue; 9662 } 9663 9664 // Process the loop in the VPlan-native vectorization path. This path builds 9665 // VPlan upfront in the vectorization pipeline, which allows to apply 9666 // VPlan-to-VPlan transformations from the very beginning without modifying the 9667 // input LLVM IR. 9668 static bool processLoopInVPlanNativePath( 9669 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 9670 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 9671 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 9672 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 9673 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 9674 LoopVectorizationRequirements &Requirements) { 9675 9676 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 9677 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 9678 return false; 9679 } 9680 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 9681 Function *F = L->getHeader()->getParent(); 9682 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 9683 9684 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9685 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 9686 9687 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 9688 &Hints, IAI); 9689 // Use the planner for outer loop vectorization. 9690 // TODO: CM is not used at this point inside the planner. Turn CM into an 9691 // optional argument if we don't need it in the future. 9692 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 9693 Requirements, ORE); 9694 9695 // Get user vectorization factor. 9696 ElementCount UserVF = Hints.getWidth(); 9697 9698 // Plan how to best vectorize, return the best VF and its cost. 9699 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 9700 9701 // If we are stress testing VPlan builds, do not attempt to generate vector 9702 // code. Masked vector code generation support will follow soon. 9703 // Also, do not attempt to vectorize if no vector code will be produced. 9704 if (VPlanBuildStressTest || EnableVPlanPredication || 9705 VectorizationFactor::Disabled() == VF) 9706 return false; 9707 9708 LVP.setBestPlan(VF.Width, 1); 9709 9710 { 9711 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 9712 F->getParent()->getDataLayout()); 9713 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 9714 &CM, BFI, PSI, Checks); 9715 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 9716 << L->getHeader()->getParent()->getName() << "\"\n"); 9717 LVP.executePlan(LB, DT); 9718 } 9719 9720 // Mark the loop as already vectorized to avoid vectorizing again. 9721 Hints.setAlreadyVectorized(); 9722 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9723 return true; 9724 } 9725 9726 // Emit a remark if there are stores to floats that required a floating point 9727 // extension. If the vectorized loop was generated with floating point there 9728 // will be a performance penalty from the conversion overhead and the change in 9729 // the vector width. 9730 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 9731 SmallVector<Instruction *, 4> Worklist; 9732 for (BasicBlock *BB : L->getBlocks()) { 9733 for (Instruction &Inst : *BB) { 9734 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 9735 if (S->getValueOperand()->getType()->isFloatTy()) 9736 Worklist.push_back(S); 9737 } 9738 } 9739 } 9740 9741 // Traverse the floating point stores upwards searching, for floating point 9742 // conversions. 9743 SmallPtrSet<const Instruction *, 4> Visited; 9744 SmallPtrSet<const Instruction *, 4> EmittedRemark; 9745 while (!Worklist.empty()) { 9746 auto *I = Worklist.pop_back_val(); 9747 if (!L->contains(I)) 9748 continue; 9749 if (!Visited.insert(I).second) 9750 continue; 9751 9752 // Emit a remark if the floating point store required a floating 9753 // point conversion. 9754 // TODO: More work could be done to identify the root cause such as a 9755 // constant or a function return type and point the user to it. 9756 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 9757 ORE->emit([&]() { 9758 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 9759 I->getDebugLoc(), L->getHeader()) 9760 << "floating point conversion changes vector width. " 9761 << "Mixed floating point precision requires an up/down " 9762 << "cast that will negatively impact performance."; 9763 }); 9764 9765 for (Use &Op : I->operands()) 9766 if (auto *OpI = dyn_cast<Instruction>(Op)) 9767 Worklist.push_back(OpI); 9768 } 9769 } 9770 9771 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 9772 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 9773 !EnableLoopInterleaving), 9774 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 9775 !EnableLoopVectorization) {} 9776 9777 bool LoopVectorizePass::processLoop(Loop *L) { 9778 assert((EnableVPlanNativePath || L->isInnermost()) && 9779 "VPlan-native path is not enabled. Only process inner loops."); 9780 9781 #ifndef NDEBUG 9782 const std::string DebugLocStr = getDebugLocString(L); 9783 #endif /* NDEBUG */ 9784 9785 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 9786 << L->getHeader()->getParent()->getName() << "\" from " 9787 << DebugLocStr << "\n"); 9788 9789 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 9790 9791 LLVM_DEBUG( 9792 dbgs() << "LV: Loop hints:" 9793 << " force=" 9794 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 9795 ? "disabled" 9796 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 9797 ? "enabled" 9798 : "?")) 9799 << " width=" << Hints.getWidth() 9800 << " interleave=" << Hints.getInterleave() << "\n"); 9801 9802 // Function containing loop 9803 Function *F = L->getHeader()->getParent(); 9804 9805 // Looking at the diagnostic output is the only way to determine if a loop 9806 // was vectorized (other than looking at the IR or machine code), so it 9807 // is important to generate an optimization remark for each loop. Most of 9808 // these messages are generated as OptimizationRemarkAnalysis. Remarks 9809 // generated as OptimizationRemark and OptimizationRemarkMissed are 9810 // less verbose reporting vectorized loops and unvectorized loops that may 9811 // benefit from vectorization, respectively. 9812 9813 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 9814 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 9815 return false; 9816 } 9817 9818 PredicatedScalarEvolution PSE(*SE, *L); 9819 9820 // Check if it is legal to vectorize the loop. 9821 LoopVectorizationRequirements Requirements; 9822 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 9823 &Requirements, &Hints, DB, AC, BFI, PSI); 9824 if (!LVL.canVectorize(EnableVPlanNativePath)) { 9825 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 9826 Hints.emitRemarkWithHints(); 9827 return false; 9828 } 9829 9830 // Check the function attributes and profiles to find out if this function 9831 // should be optimized for size. 9832 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9833 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 9834 9835 // Entrance to the VPlan-native vectorization path. Outer loops are processed 9836 // here. They may require CFG and instruction level transformations before 9837 // even evaluating whether vectorization is profitable. Since we cannot modify 9838 // the incoming IR, we need to build VPlan upfront in the vectorization 9839 // pipeline. 9840 if (!L->isInnermost()) 9841 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 9842 ORE, BFI, PSI, Hints, Requirements); 9843 9844 assert(L->isInnermost() && "Inner loop expected."); 9845 9846 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 9847 // count by optimizing for size, to minimize overheads. 9848 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 9849 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 9850 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 9851 << "This loop is worth vectorizing only if no scalar " 9852 << "iteration overheads are incurred."); 9853 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 9854 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 9855 else { 9856 LLVM_DEBUG(dbgs() << "\n"); 9857 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 9858 } 9859 } 9860 9861 // Check the function attributes to see if implicit floats are allowed. 9862 // FIXME: This check doesn't seem possibly correct -- what if the loop is 9863 // an integer loop and the vector instructions selected are purely integer 9864 // vector instructions? 9865 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 9866 reportVectorizationFailure( 9867 "Can't vectorize when the NoImplicitFloat attribute is used", 9868 "loop not vectorized due to NoImplicitFloat attribute", 9869 "NoImplicitFloat", ORE, L); 9870 Hints.emitRemarkWithHints(); 9871 return false; 9872 } 9873 9874 // Check if the target supports potentially unsafe FP vectorization. 9875 // FIXME: Add a check for the type of safety issue (denormal, signaling) 9876 // for the target we're vectorizing for, to make sure none of the 9877 // additional fp-math flags can help. 9878 if (Hints.isPotentiallyUnsafe() && 9879 TTI->isFPVectorizationPotentiallyUnsafe()) { 9880 reportVectorizationFailure( 9881 "Potentially unsafe FP op prevents vectorization", 9882 "loop not vectorized due to unsafe FP support.", 9883 "UnsafeFP", ORE, L); 9884 Hints.emitRemarkWithHints(); 9885 return false; 9886 } 9887 9888 if (!Requirements.canVectorizeFPMath(Hints)) { 9889 ORE->emit([&]() { 9890 auto *ExactFPMathInst = Requirements.getExactFPInst(); 9891 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 9892 ExactFPMathInst->getDebugLoc(), 9893 ExactFPMathInst->getParent()) 9894 << "loop not vectorized: cannot prove it is safe to reorder " 9895 "floating-point operations"; 9896 }); 9897 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 9898 "reorder floating-point operations\n"); 9899 Hints.emitRemarkWithHints(); 9900 return false; 9901 } 9902 9903 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 9904 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 9905 9906 // If an override option has been passed in for interleaved accesses, use it. 9907 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 9908 UseInterleaved = EnableInterleavedMemAccesses; 9909 9910 // Analyze interleaved memory accesses. 9911 if (UseInterleaved) { 9912 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 9913 } 9914 9915 // Use the cost model. 9916 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 9917 F, &Hints, IAI); 9918 CM.collectValuesToIgnore(); 9919 9920 // Use the planner for vectorization. 9921 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 9922 Requirements, ORE); 9923 9924 // Get user vectorization factor and interleave count. 9925 ElementCount UserVF = Hints.getWidth(); 9926 unsigned UserIC = Hints.getInterleave(); 9927 9928 // Plan how to best vectorize, return the best VF and its cost. 9929 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 9930 9931 VectorizationFactor VF = VectorizationFactor::Disabled(); 9932 unsigned IC = 1; 9933 9934 if (MaybeVF) { 9935 VF = *MaybeVF; 9936 // Select the interleave count. 9937 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 9938 } 9939 9940 // Identify the diagnostic messages that should be produced. 9941 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 9942 bool VectorizeLoop = true, InterleaveLoop = true; 9943 if (VF.Width.isScalar()) { 9944 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 9945 VecDiagMsg = std::make_pair( 9946 "VectorizationNotBeneficial", 9947 "the cost-model indicates that vectorization is not beneficial"); 9948 VectorizeLoop = false; 9949 } 9950 9951 if (!MaybeVF && UserIC > 1) { 9952 // Tell the user interleaving was avoided up-front, despite being explicitly 9953 // requested. 9954 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 9955 "interleaving should be avoided up front\n"); 9956 IntDiagMsg = std::make_pair( 9957 "InterleavingAvoided", 9958 "Ignoring UserIC, because interleaving was avoided up front"); 9959 InterleaveLoop = false; 9960 } else if (IC == 1 && UserIC <= 1) { 9961 // Tell the user interleaving is not beneficial. 9962 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 9963 IntDiagMsg = std::make_pair( 9964 "InterleavingNotBeneficial", 9965 "the cost-model indicates that interleaving is not beneficial"); 9966 InterleaveLoop = false; 9967 if (UserIC == 1) { 9968 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 9969 IntDiagMsg.second += 9970 " and is explicitly disabled or interleave count is set to 1"; 9971 } 9972 } else if (IC > 1 && UserIC == 1) { 9973 // Tell the user interleaving is beneficial, but it explicitly disabled. 9974 LLVM_DEBUG( 9975 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 9976 IntDiagMsg = std::make_pair( 9977 "InterleavingBeneficialButDisabled", 9978 "the cost-model indicates that interleaving is beneficial " 9979 "but is explicitly disabled or interleave count is set to 1"); 9980 InterleaveLoop = false; 9981 } 9982 9983 // Override IC if user provided an interleave count. 9984 IC = UserIC > 0 ? UserIC : IC; 9985 9986 // Emit diagnostic messages, if any. 9987 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 9988 if (!VectorizeLoop && !InterleaveLoop) { 9989 // Do not vectorize or interleaving the loop. 9990 ORE->emit([&]() { 9991 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 9992 L->getStartLoc(), L->getHeader()) 9993 << VecDiagMsg.second; 9994 }); 9995 ORE->emit([&]() { 9996 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 9997 L->getStartLoc(), L->getHeader()) 9998 << IntDiagMsg.second; 9999 }); 10000 return false; 10001 } else if (!VectorizeLoop && InterleaveLoop) { 10002 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10003 ORE->emit([&]() { 10004 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10005 L->getStartLoc(), L->getHeader()) 10006 << VecDiagMsg.second; 10007 }); 10008 } else if (VectorizeLoop && !InterleaveLoop) { 10009 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10010 << ") in " << DebugLocStr << '\n'); 10011 ORE->emit([&]() { 10012 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10013 L->getStartLoc(), L->getHeader()) 10014 << IntDiagMsg.second; 10015 }); 10016 } else if (VectorizeLoop && InterleaveLoop) { 10017 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10018 << ") in " << DebugLocStr << '\n'); 10019 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10020 } 10021 10022 bool DisableRuntimeUnroll = false; 10023 MDNode *OrigLoopID = L->getLoopID(); 10024 { 10025 // Optimistically generate runtime checks. Drop them if they turn out to not 10026 // be profitable. Limit the scope of Checks, so the cleanup happens 10027 // immediately after vector codegeneration is done. 10028 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10029 F->getParent()->getDataLayout()); 10030 if (!VF.Width.isScalar() || IC > 1) 10031 Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); 10032 LVP.setBestPlan(VF.Width, IC); 10033 10034 using namespace ore; 10035 if (!VectorizeLoop) { 10036 assert(IC > 1 && "interleave count should not be 1 or 0"); 10037 // If we decided that it is not legal to vectorize the loop, then 10038 // interleave it. 10039 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10040 &CM, BFI, PSI, Checks); 10041 LVP.executePlan(Unroller, DT); 10042 10043 ORE->emit([&]() { 10044 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10045 L->getHeader()) 10046 << "interleaved loop (interleaved count: " 10047 << NV("InterleaveCount", IC) << ")"; 10048 }); 10049 } else { 10050 // If we decided that it is *legal* to vectorize the loop, then do it. 10051 10052 // Consider vectorizing the epilogue too if it's profitable. 10053 VectorizationFactor EpilogueVF = 10054 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10055 if (EpilogueVF.Width.isVector()) { 10056 10057 // The first pass vectorizes the main loop and creates a scalar epilogue 10058 // to be vectorized by executing the plan (potentially with a different 10059 // factor) again shortly afterwards. 10060 EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC, 10061 EpilogueVF.Width.getKnownMinValue(), 10062 1); 10063 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10064 EPI, &LVL, &CM, BFI, PSI, Checks); 10065 10066 LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF); 10067 LVP.executePlan(MainILV, DT); 10068 ++LoopsVectorized; 10069 10070 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10071 formLCSSARecursively(*L, *DT, LI, SE); 10072 10073 // Second pass vectorizes the epilogue and adjusts the control flow 10074 // edges from the first pass. 10075 LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF); 10076 EPI.MainLoopVF = EPI.EpilogueVF; 10077 EPI.MainLoopUF = EPI.EpilogueUF; 10078 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10079 ORE, EPI, &LVL, &CM, BFI, PSI, 10080 Checks); 10081 LVP.executePlan(EpilogILV, DT); 10082 ++LoopsEpilogueVectorized; 10083 10084 if (!MainILV.areSafetyChecksAdded()) 10085 DisableRuntimeUnroll = true; 10086 } else { 10087 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10088 &LVL, &CM, BFI, PSI, Checks); 10089 LVP.executePlan(LB, DT); 10090 ++LoopsVectorized; 10091 10092 // Add metadata to disable runtime unrolling a scalar loop when there 10093 // are no runtime checks about strides and memory. A scalar loop that is 10094 // rarely used is not worth unrolling. 10095 if (!LB.areSafetyChecksAdded()) 10096 DisableRuntimeUnroll = true; 10097 } 10098 // Report the vectorization decision. 10099 ORE->emit([&]() { 10100 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10101 L->getHeader()) 10102 << "vectorized loop (vectorization width: " 10103 << NV("VectorizationFactor", VF.Width) 10104 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10105 }); 10106 } 10107 10108 if (ORE->allowExtraAnalysis(LV_NAME)) 10109 checkMixedPrecision(L, ORE); 10110 } 10111 10112 Optional<MDNode *> RemainderLoopID = 10113 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10114 LLVMLoopVectorizeFollowupEpilogue}); 10115 if (RemainderLoopID.hasValue()) { 10116 L->setLoopID(RemainderLoopID.getValue()); 10117 } else { 10118 if (DisableRuntimeUnroll) 10119 AddRuntimeUnrollDisableMetaData(L); 10120 10121 // Mark the loop as already vectorized to avoid vectorizing again. 10122 Hints.setAlreadyVectorized(); 10123 } 10124 10125 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10126 return true; 10127 } 10128 10129 LoopVectorizeResult LoopVectorizePass::runImpl( 10130 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10131 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10132 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10133 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10134 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10135 SE = &SE_; 10136 LI = &LI_; 10137 TTI = &TTI_; 10138 DT = &DT_; 10139 BFI = &BFI_; 10140 TLI = TLI_; 10141 AA = &AA_; 10142 AC = &AC_; 10143 GetLAA = &GetLAA_; 10144 DB = &DB_; 10145 ORE = &ORE_; 10146 PSI = PSI_; 10147 10148 // Don't attempt if 10149 // 1. the target claims to have no vector registers, and 10150 // 2. interleaving won't help ILP. 10151 // 10152 // The second condition is necessary because, even if the target has no 10153 // vector registers, loop vectorization may still enable scalar 10154 // interleaving. 10155 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10156 TTI->getMaxInterleaveFactor(1) < 2) 10157 return LoopVectorizeResult(false, false); 10158 10159 bool Changed = false, CFGChanged = false; 10160 10161 // The vectorizer requires loops to be in simplified form. 10162 // Since simplification may add new inner loops, it has to run before the 10163 // legality and profitability checks. This means running the loop vectorizer 10164 // will simplify all loops, regardless of whether anything end up being 10165 // vectorized. 10166 for (auto &L : *LI) 10167 Changed |= CFGChanged |= 10168 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10169 10170 // Build up a worklist of inner-loops to vectorize. This is necessary as 10171 // the act of vectorizing or partially unrolling a loop creates new loops 10172 // and can invalidate iterators across the loops. 10173 SmallVector<Loop *, 8> Worklist; 10174 10175 for (Loop *L : *LI) 10176 collectSupportedLoops(*L, LI, ORE, Worklist); 10177 10178 LoopsAnalyzed += Worklist.size(); 10179 10180 // Now walk the identified inner loops. 10181 while (!Worklist.empty()) { 10182 Loop *L = Worklist.pop_back_val(); 10183 10184 // For the inner loops we actually process, form LCSSA to simplify the 10185 // transform. 10186 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10187 10188 Changed |= CFGChanged |= processLoop(L); 10189 } 10190 10191 // Process each loop nest in the function. 10192 return LoopVectorizeResult(Changed, CFGChanged); 10193 } 10194 10195 PreservedAnalyses LoopVectorizePass::run(Function &F, 10196 FunctionAnalysisManager &AM) { 10197 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10198 auto &LI = AM.getResult<LoopAnalysis>(F); 10199 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10200 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10201 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10202 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10203 auto &AA = AM.getResult<AAManager>(F); 10204 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10205 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10206 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10207 MemorySSA *MSSA = EnableMSSALoopDependency 10208 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 10209 : nullptr; 10210 10211 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10212 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10213 [&](Loop &L) -> const LoopAccessInfo & { 10214 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10215 TLI, TTI, nullptr, MSSA}; 10216 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10217 }; 10218 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10219 ProfileSummaryInfo *PSI = 10220 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10221 LoopVectorizeResult Result = 10222 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10223 if (!Result.MadeAnyChange) 10224 return PreservedAnalyses::all(); 10225 PreservedAnalyses PA; 10226 10227 // We currently do not preserve loopinfo/dominator analyses with outer loop 10228 // vectorization. Until this is addressed, mark these analyses as preserved 10229 // only for non-VPlan-native path. 10230 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10231 if (!EnableVPlanNativePath) { 10232 PA.preserve<LoopAnalysis>(); 10233 PA.preserve<DominatorTreeAnalysis>(); 10234 } 10235 PA.preserve<BasicAA>(); 10236 PA.preserve<GlobalsAA>(); 10237 if (!Result.MadeCFGChange) 10238 PA.preserveSet<CFGAnalyses>(); 10239 return PA; 10240 } 10241