1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/MemorySSA.h" 91 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 92 #include "llvm/Analysis/ProfileSummaryInfo.h" 93 #include "llvm/Analysis/ScalarEvolution.h" 94 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 95 #include "llvm/Analysis/TargetLibraryInfo.h" 96 #include "llvm/Analysis/TargetTransformInfo.h" 97 #include "llvm/Analysis/VectorUtils.h" 98 #include "llvm/IR/Attributes.h" 99 #include "llvm/IR/BasicBlock.h" 100 #include "llvm/IR/CFG.h" 101 #include "llvm/IR/Constant.h" 102 #include "llvm/IR/Constants.h" 103 #include "llvm/IR/DataLayout.h" 104 #include "llvm/IR/DebugInfoMetadata.h" 105 #include "llvm/IR/DebugLoc.h" 106 #include "llvm/IR/DerivedTypes.h" 107 #include "llvm/IR/DiagnosticInfo.h" 108 #include "llvm/IR/Dominators.h" 109 #include "llvm/IR/Function.h" 110 #include "llvm/IR/IRBuilder.h" 111 #include "llvm/IR/InstrTypes.h" 112 #include "llvm/IR/Instruction.h" 113 #include "llvm/IR/Instructions.h" 114 #include "llvm/IR/IntrinsicInst.h" 115 #include "llvm/IR/Intrinsics.h" 116 #include "llvm/IR/LLVMContext.h" 117 #include "llvm/IR/Metadata.h" 118 #include "llvm/IR/Module.h" 119 #include "llvm/IR/Operator.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/MathExtras.h" 134 #include "llvm/Support/raw_ostream.h" 135 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 136 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 137 #include "llvm/Transforms/Utils/LoopSimplify.h" 138 #include "llvm/Transforms/Utils/LoopUtils.h" 139 #include "llvm/Transforms/Utils/LoopVersioning.h" 140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 141 #include "llvm/Transforms/Utils/SizeOpts.h" 142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 143 #include <algorithm> 144 #include <cassert> 145 #include <cstdint> 146 #include <cstdlib> 147 #include <functional> 148 #include <iterator> 149 #include <limits> 150 #include <memory> 151 #include <string> 152 #include <tuple> 153 #include <utility> 154 155 using namespace llvm; 156 157 #define LV_NAME "loop-vectorize" 158 #define DEBUG_TYPE LV_NAME 159 160 #ifndef NDEBUG 161 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 162 #endif 163 164 /// @{ 165 /// Metadata attribute names 166 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 167 const char LLVMLoopVectorizeFollowupVectorized[] = 168 "llvm.loop.vectorize.followup_vectorized"; 169 const char LLVMLoopVectorizeFollowupEpilogue[] = 170 "llvm.loop.vectorize.followup_epilogue"; 171 /// @} 172 173 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 174 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 175 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 176 177 static cl::opt<bool> EnableEpilogueVectorization( 178 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 179 cl::desc("Enable vectorization of epilogue loops.")); 180 181 static cl::opt<unsigned> EpilogueVectorizationForceVF( 182 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 183 cl::desc("When epilogue vectorization is enabled, and a value greater than " 184 "1 is specified, forces the given VF for all applicable epilogue " 185 "loops.")); 186 187 static cl::opt<unsigned> EpilogueVectorizationMinVF( 188 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 189 cl::desc("Only loops with vectorization factor equal to or larger than " 190 "the specified value are considered for epilogue vectorization.")); 191 192 /// Loops with a known constant trip count below this number are vectorized only 193 /// if no scalar iteration overheads are incurred. 194 static cl::opt<unsigned> TinyTripCountVectorThreshold( 195 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 196 cl::desc("Loops with a constant trip count that is smaller than this " 197 "value are vectorized only if no scalar iteration overheads " 198 "are incurred.")); 199 200 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 201 // that predication is preferred, and this lists all options. I.e., the 202 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 203 // and predicate the instructions accordingly. If tail-folding fails, there are 204 // different fallback strategies depending on these values: 205 namespace PreferPredicateTy { 206 enum Option { 207 ScalarEpilogue = 0, 208 PredicateElseScalarEpilogue, 209 PredicateOrDontVectorize 210 }; 211 } // namespace PreferPredicateTy 212 213 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 214 "prefer-predicate-over-epilogue", 215 cl::init(PreferPredicateTy::ScalarEpilogue), 216 cl::Hidden, 217 cl::desc("Tail-folding and predication preferences over creating a scalar " 218 "epilogue loop."), 219 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 220 "scalar-epilogue", 221 "Don't tail-predicate loops, create scalar epilogue"), 222 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 223 "predicate-else-scalar-epilogue", 224 "prefer tail-folding, create scalar epilogue if tail " 225 "folding fails."), 226 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 227 "predicate-dont-vectorize", 228 "prefers tail-folding, don't attempt vectorization if " 229 "tail-folding fails."))); 230 231 static cl::opt<bool> MaximizeBandwidth( 232 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 233 cl::desc("Maximize bandwidth when selecting vectorization factor which " 234 "will be determined by the smallest type in loop.")); 235 236 static cl::opt<bool> EnableInterleavedMemAccesses( 237 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 238 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 239 240 /// An interleave-group may need masking if it resides in a block that needs 241 /// predication, or in order to mask away gaps. 242 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 243 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 244 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 245 246 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 247 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 248 cl::desc("We don't interleave loops with a estimated constant trip count " 249 "below this number")); 250 251 static cl::opt<unsigned> ForceTargetNumScalarRegs( 252 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 253 cl::desc("A flag that overrides the target's number of scalar registers.")); 254 255 static cl::opt<unsigned> ForceTargetNumVectorRegs( 256 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 257 cl::desc("A flag that overrides the target's number of vector registers.")); 258 259 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 260 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 261 cl::desc("A flag that overrides the target's max interleave factor for " 262 "scalar loops.")); 263 264 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 265 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 266 cl::desc("A flag that overrides the target's max interleave factor for " 267 "vectorized loops.")); 268 269 static cl::opt<unsigned> ForceTargetInstructionCost( 270 "force-target-instruction-cost", cl::init(0), cl::Hidden, 271 cl::desc("A flag that overrides the target's expected cost for " 272 "an instruction to a single constant value. Mostly " 273 "useful for getting consistent testing.")); 274 275 static cl::opt<bool> ForceTargetSupportsScalableVectors( 276 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 277 cl::desc( 278 "Pretend that scalable vectors are supported, even if the target does " 279 "not support them. This flag should only be used for testing.")); 280 281 static cl::opt<unsigned> SmallLoopCost( 282 "small-loop-cost", cl::init(20), cl::Hidden, 283 cl::desc( 284 "The cost of a loop that is considered 'small' by the interleaver.")); 285 286 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 287 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 288 cl::desc("Enable the use of the block frequency analysis to access PGO " 289 "heuristics minimizing code growth in cold regions and being more " 290 "aggressive in hot regions.")); 291 292 // Runtime interleave loops for load/store throughput. 293 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 294 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 295 cl::desc( 296 "Enable runtime interleaving until load/store ports are saturated")); 297 298 /// Interleave small loops with scalar reductions. 299 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 300 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 301 cl::desc("Enable interleaving for loops with small iteration counts that " 302 "contain scalar reductions to expose ILP.")); 303 304 /// The number of stores in a loop that are allowed to need predication. 305 static cl::opt<unsigned> NumberOfStoresToPredicate( 306 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 307 cl::desc("Max number of stores to be predicated behind an if.")); 308 309 static cl::opt<bool> EnableIndVarRegisterHeur( 310 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 311 cl::desc("Count the induction variable only once when interleaving")); 312 313 static cl::opt<bool> EnableCondStoresVectorization( 314 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 315 cl::desc("Enable if predication of stores during vectorization.")); 316 317 static cl::opt<unsigned> MaxNestedScalarReductionIC( 318 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 319 cl::desc("The maximum interleave count to use when interleaving a scalar " 320 "reduction in a nested loop.")); 321 322 static cl::opt<bool> 323 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 324 cl::Hidden, 325 cl::desc("Prefer in-loop vector reductions, " 326 "overriding the targets preference.")); 327 328 static cl::opt<bool> PreferPredicatedReductionSelect( 329 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 330 cl::desc( 331 "Prefer predicating a reduction operation over an after loop select.")); 332 333 cl::opt<bool> EnableVPlanNativePath( 334 "enable-vplan-native-path", cl::init(false), cl::Hidden, 335 cl::desc("Enable VPlan-native vectorization path with " 336 "support for outer loop vectorization.")); 337 338 // FIXME: Remove this switch once we have divergence analysis. Currently we 339 // assume divergent non-backedge branches when this switch is true. 340 cl::opt<bool> EnableVPlanPredication( 341 "enable-vplan-predication", cl::init(false), cl::Hidden, 342 cl::desc("Enable VPlan-native vectorization path predicator with " 343 "support for outer loop vectorization.")); 344 345 // This flag enables the stress testing of the VPlan H-CFG construction in the 346 // VPlan-native vectorization path. It must be used in conjuction with 347 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 348 // verification of the H-CFGs built. 349 static cl::opt<bool> VPlanBuildStressTest( 350 "vplan-build-stress-test", cl::init(false), cl::Hidden, 351 cl::desc( 352 "Build VPlan for every supported loop nest in the function and bail " 353 "out right after the build (stress test the VPlan H-CFG construction " 354 "in the VPlan-native vectorization path).")); 355 356 cl::opt<bool> llvm::EnableLoopInterleaving( 357 "interleave-loops", cl::init(true), cl::Hidden, 358 cl::desc("Enable loop interleaving in Loop vectorization passes")); 359 cl::opt<bool> llvm::EnableLoopVectorization( 360 "vectorize-loops", cl::init(true), cl::Hidden, 361 cl::desc("Run the Loop vectorization passes")); 362 363 /// A helper function that returns the type of loaded or stored value. 364 static Type *getMemInstValueType(Value *I) { 365 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 366 "Expected Load or Store instruction"); 367 if (auto *LI = dyn_cast<LoadInst>(I)) 368 return LI->getType(); 369 return cast<StoreInst>(I)->getValueOperand()->getType(); 370 } 371 372 /// A helper function that returns true if the given type is irregular. The 373 /// type is irregular if its allocated size doesn't equal the store size of an 374 /// element of the corresponding vector type at the given vectorization factor. 375 static bool hasIrregularType(Type *Ty, const DataLayout &DL, ElementCount VF) { 376 // Determine if an array of VF elements of type Ty is "bitcast compatible" 377 // with a <VF x Ty> vector. 378 if (VF.isVector()) { 379 auto *VectorTy = VectorType::get(Ty, VF); 380 return TypeSize::get(VF.getKnownMinValue() * 381 DL.getTypeAllocSize(Ty).getFixedValue(), 382 VF.isScalable()) != DL.getTypeStoreSize(VectorTy); 383 } 384 385 // If the vectorization factor is one, we just check if an array of type Ty 386 // requires padding between elements. 387 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 388 } 389 390 /// A helper function that returns the reciprocal of the block probability of 391 /// predicated blocks. If we return X, we are assuming the predicated block 392 /// will execute once for every X iterations of the loop header. 393 /// 394 /// TODO: We should use actual block probability here, if available. Currently, 395 /// we always assume predicated blocks have a 50% chance of executing. 396 static unsigned getReciprocalPredBlockProb() { return 2; } 397 398 /// A helper function that adds a 'fast' flag to floating-point operations. 399 static Value *addFastMathFlag(Value *V) { 400 if (isa<FPMathOperator>(V)) 401 cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast()); 402 return V; 403 } 404 405 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) { 406 if (isa<FPMathOperator>(V)) 407 cast<Instruction>(V)->setFastMathFlags(FMF); 408 return V; 409 } 410 411 /// A helper function that returns an integer or floating-point constant with 412 /// value C. 413 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 414 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 415 : ConstantFP::get(Ty, C); 416 } 417 418 /// Returns "best known" trip count for the specified loop \p L as defined by 419 /// the following procedure: 420 /// 1) Returns exact trip count if it is known. 421 /// 2) Returns expected trip count according to profile data if any. 422 /// 3) Returns upper bound estimate if it is known. 423 /// 4) Returns None if all of the above failed. 424 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 425 // Check if exact trip count is known. 426 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 427 return ExpectedTC; 428 429 // Check if there is an expected trip count available from profile data. 430 if (LoopVectorizeWithBlockFrequency) 431 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 432 return EstimatedTC; 433 434 // Check if upper bound estimate is known. 435 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 436 return ExpectedTC; 437 438 return None; 439 } 440 441 namespace llvm { 442 443 /// InnerLoopVectorizer vectorizes loops which contain only one basic 444 /// block to a specified vectorization factor (VF). 445 /// This class performs the widening of scalars into vectors, or multiple 446 /// scalars. This class also implements the following features: 447 /// * It inserts an epilogue loop for handling loops that don't have iteration 448 /// counts that are known to be a multiple of the vectorization factor. 449 /// * It handles the code generation for reduction variables. 450 /// * Scalarization (implementation using scalars) of un-vectorizable 451 /// instructions. 452 /// InnerLoopVectorizer does not perform any vectorization-legality 453 /// checks, and relies on the caller to check for the different legality 454 /// aspects. The InnerLoopVectorizer relies on the 455 /// LoopVectorizationLegality class to provide information about the induction 456 /// and reduction variables that were found to a given vectorization factor. 457 class InnerLoopVectorizer { 458 public: 459 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 460 LoopInfo *LI, DominatorTree *DT, 461 const TargetLibraryInfo *TLI, 462 const TargetTransformInfo *TTI, AssumptionCache *AC, 463 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 464 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 465 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 466 ProfileSummaryInfo *PSI) 467 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 468 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 469 Builder(PSE.getSE()->getContext()), 470 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM), 471 BFI(BFI), PSI(PSI) { 472 // Query this against the original loop and save it here because the profile 473 // of the original loop header may change as the transformation happens. 474 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 475 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 476 } 477 478 virtual ~InnerLoopVectorizer() = default; 479 480 /// Create a new empty loop that will contain vectorized instructions later 481 /// on, while the old loop will be used as the scalar remainder. Control flow 482 /// is generated around the vectorized (and scalar epilogue) loops consisting 483 /// of various checks and bypasses. Return the pre-header block of the new 484 /// loop. 485 /// In the case of epilogue vectorization, this function is overriden to 486 /// handle the more complex control flow around the loops. 487 virtual BasicBlock *createVectorizedLoopSkeleton(); 488 489 /// Widen a single instruction within the innermost loop. 490 void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands, 491 VPTransformState &State); 492 493 /// Widen a single call instruction within the innermost loop. 494 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 495 VPTransformState &State); 496 497 /// Widen a single select instruction within the innermost loop. 498 void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, 499 bool InvariantCond, VPTransformState &State); 500 501 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 502 void fixVectorizedLoop(); 503 504 // Return true if any runtime check is added. 505 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 506 507 /// A type for vectorized values in the new loop. Each value from the 508 /// original loop, when vectorized, is represented by UF vector values in the 509 /// new unrolled loop, where UF is the unroll factor. 510 using VectorParts = SmallVector<Value *, 2>; 511 512 /// Vectorize a single GetElementPtrInst based on information gathered and 513 /// decisions taken during planning. 514 void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices, 515 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant, 516 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); 517 518 /// Vectorize a single PHINode in a block. This method handles the induction 519 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 520 /// arbitrary length vectors. 521 void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc, 522 Value *StartV, unsigned UF, ElementCount VF); 523 524 /// A helper function to scalarize a single Instruction in the innermost loop. 525 /// Generates a sequence of scalar instances for each lane between \p MinLane 526 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 527 /// inclusive. Uses the VPValue operands from \p Operands instead of \p 528 /// Instr's operands. 529 void scalarizeInstruction(Instruction *Instr, VPUser &Operands, 530 const VPIteration &Instance, bool IfPredicateInstr, 531 VPTransformState &State); 532 533 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 534 /// is provided, the integer induction variable will first be truncated to 535 /// the corresponding type. 536 void widenIntOrFpInduction(PHINode *IV, Value *Start, 537 TruncInst *Trunc = nullptr); 538 539 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 540 /// vector or scalar value on-demand if one is not yet available. When 541 /// vectorizing a loop, we visit the definition of an instruction before its 542 /// uses. When visiting the definition, we either vectorize or scalarize the 543 /// instruction, creating an entry for it in the corresponding map. (In some 544 /// cases, such as induction variables, we will create both vector and scalar 545 /// entries.) Then, as we encounter uses of the definition, we derive values 546 /// for each scalar or vector use unless such a value is already available. 547 /// For example, if we scalarize a definition and one of its uses is vector, 548 /// we build the required vector on-demand with an insertelement sequence 549 /// when visiting the use. Otherwise, if the use is scalar, we can use the 550 /// existing scalar definition. 551 /// 552 /// Return a value in the new loop corresponding to \p V from the original 553 /// loop at unroll index \p Part. If the value has already been vectorized, 554 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 555 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 556 /// a new vector value on-demand by inserting the scalar values into a vector 557 /// with an insertelement sequence. If the value has been neither vectorized 558 /// nor scalarized, it must be loop invariant, so we simply broadcast the 559 /// value into a vector. 560 Value *getOrCreateVectorValue(Value *V, unsigned Part); 561 562 void setVectorValue(Value *Scalar, unsigned Part, Value *Vector) { 563 VectorLoopValueMap.setVectorValue(Scalar, Part, Vector); 564 } 565 566 /// Return a value in the new loop corresponding to \p V from the original 567 /// loop at unroll and vector indices \p Instance. If the value has been 568 /// vectorized but not scalarized, the necessary extractelement instruction 569 /// will be generated. 570 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 571 572 /// Construct the vector value of a scalarized value \p V one lane at a time. 573 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 574 575 /// Try to vectorize interleaved access group \p Group with the base address 576 /// given in \p Addr, optionally masking the vector operations if \p 577 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 578 /// values in the vectorized loop. 579 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 580 ArrayRef<VPValue *> VPDefs, 581 VPTransformState &State, VPValue *Addr, 582 ArrayRef<VPValue *> StoredValues, 583 VPValue *BlockInMask = nullptr); 584 585 /// Vectorize Load and Store instructions with the base address given in \p 586 /// Addr, optionally masking the vector operations if \p BlockInMask is 587 /// non-null. Use \p State to translate given VPValues to IR values in the 588 /// vectorized loop. 589 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 590 VPValue *Def, VPValue *Addr, 591 VPValue *StoredValue, VPValue *BlockInMask); 592 593 /// Set the debug location in the builder using the debug location in 594 /// the instruction. 595 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 596 597 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 598 void fixNonInductionPHIs(void); 599 600 protected: 601 friend class LoopVectorizationPlanner; 602 603 /// A small list of PHINodes. 604 using PhiVector = SmallVector<PHINode *, 4>; 605 606 /// A type for scalarized values in the new loop. Each value from the 607 /// original loop, when scalarized, is represented by UF x VF scalar values 608 /// in the new unrolled loop, where UF is the unroll factor and VF is the 609 /// vectorization factor. 610 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 611 612 /// Set up the values of the IVs correctly when exiting the vector loop. 613 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 614 Value *CountRoundDown, Value *EndValue, 615 BasicBlock *MiddleBlock); 616 617 /// Create a new induction variable inside L. 618 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 619 Value *Step, Instruction *DL); 620 621 /// Handle all cross-iteration phis in the header. 622 void fixCrossIterationPHIs(); 623 624 /// Fix a first-order recurrence. This is the second phase of vectorizing 625 /// this phi node. 626 void fixFirstOrderRecurrence(PHINode *Phi); 627 628 /// Fix a reduction cross-iteration phi. This is the second phase of 629 /// vectorizing this phi node. 630 void fixReduction(PHINode *Phi); 631 632 /// Clear NSW/NUW flags from reduction instructions if necessary. 633 void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc); 634 635 /// The Loop exit block may have single value PHI nodes with some 636 /// incoming value. While vectorizing we only handled real values 637 /// that were defined inside the loop and we should have one value for 638 /// each predecessor of its parent basic block. See PR14725. 639 void fixLCSSAPHIs(); 640 641 /// Iteratively sink the scalarized operands of a predicated instruction into 642 /// the block that was created for it. 643 void sinkScalarOperands(Instruction *PredInst); 644 645 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 646 /// represented as. 647 void truncateToMinimalBitwidths(); 648 649 /// Create a broadcast instruction. This method generates a broadcast 650 /// instruction (shuffle) for loop invariant values and for the induction 651 /// value. If this is the induction variable then we extend it to N, N+1, ... 652 /// this is needed because each iteration in the loop corresponds to a SIMD 653 /// element. 654 virtual Value *getBroadcastInstrs(Value *V); 655 656 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 657 /// to each vector element of Val. The sequence starts at StartIndex. 658 /// \p Opcode is relevant for FP induction variable. 659 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 660 Instruction::BinaryOps Opcode = 661 Instruction::BinaryOpsEnd); 662 663 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 664 /// variable on which to base the steps, \p Step is the size of the step, and 665 /// \p EntryVal is the value from the original loop that maps to the steps. 666 /// Note that \p EntryVal doesn't have to be an induction variable - it 667 /// can also be a truncate instruction. 668 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 669 const InductionDescriptor &ID); 670 671 /// Create a vector induction phi node based on an existing scalar one. \p 672 /// EntryVal is the value from the original loop that maps to the vector phi 673 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 674 /// truncate instruction, instead of widening the original IV, we widen a 675 /// version of the IV truncated to \p EntryVal's type. 676 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 677 Value *Step, Value *Start, 678 Instruction *EntryVal); 679 680 /// Returns true if an instruction \p I should be scalarized instead of 681 /// vectorized for the chosen vectorization factor. 682 bool shouldScalarizeInstruction(Instruction *I) const; 683 684 /// Returns true if we should generate a scalar version of \p IV. 685 bool needsScalarInduction(Instruction *IV) const; 686 687 /// If there is a cast involved in the induction variable \p ID, which should 688 /// be ignored in the vectorized loop body, this function records the 689 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 690 /// cast. We had already proved that the casted Phi is equal to the uncasted 691 /// Phi in the vectorized loop (under a runtime guard), and therefore 692 /// there is no need to vectorize the cast - the same value can be used in the 693 /// vector loop for both the Phi and the cast. 694 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 695 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 696 /// 697 /// \p EntryVal is the value from the original loop that maps to the vector 698 /// phi node and is used to distinguish what is the IV currently being 699 /// processed - original one (if \p EntryVal is a phi corresponding to the 700 /// original IV) or the "newly-created" one based on the proof mentioned above 701 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 702 /// latter case \p EntryVal is a TruncInst and we must not record anything for 703 /// that IV, but it's error-prone to expect callers of this routine to care 704 /// about that, hence this explicit parameter. 705 void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID, 706 const Instruction *EntryVal, 707 Value *VectorLoopValue, 708 unsigned Part, 709 unsigned Lane = UINT_MAX); 710 711 /// Generate a shuffle sequence that will reverse the vector Vec. 712 virtual Value *reverseVector(Value *Vec); 713 714 /// Returns (and creates if needed) the original loop trip count. 715 Value *getOrCreateTripCount(Loop *NewLoop); 716 717 /// Returns (and creates if needed) the trip count of the widened loop. 718 Value *getOrCreateVectorTripCount(Loop *NewLoop); 719 720 /// Returns a bitcasted value to the requested vector type. 721 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 722 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 723 const DataLayout &DL); 724 725 /// Emit a bypass check to see if the vector trip count is zero, including if 726 /// it overflows. 727 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 728 729 /// Emit a bypass check to see if all of the SCEV assumptions we've 730 /// had to make are correct. 731 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 732 733 /// Emit bypass checks to check any memory assumptions we may have made. 734 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 735 736 /// Compute the transformed value of Index at offset StartValue using step 737 /// StepValue. 738 /// For integer induction, returns StartValue + Index * StepValue. 739 /// For pointer induction, returns StartValue[Index * StepValue]. 740 /// FIXME: The newly created binary instructions should contain nsw/nuw 741 /// flags, which can be found from the original scalar operations. 742 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 743 const DataLayout &DL, 744 const InductionDescriptor &ID) const; 745 746 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 747 /// vector loop preheader, middle block and scalar preheader. Also 748 /// allocate a loop object for the new vector loop and return it. 749 Loop *createVectorLoopSkeleton(StringRef Prefix); 750 751 /// Create new phi nodes for the induction variables to resume iteration count 752 /// in the scalar epilogue, from where the vectorized loop left off (given by 753 /// \p VectorTripCount). 754 /// In cases where the loop skeleton is more complicated (eg. epilogue 755 /// vectorization) and the resume values can come from an additional bypass 756 /// block, the \p AdditionalBypass pair provides information about the bypass 757 /// block and the end value on the edge from bypass to this loop. 758 void createInductionResumeValues( 759 Loop *L, Value *VectorTripCount, 760 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 761 762 /// Complete the loop skeleton by adding debug MDs, creating appropriate 763 /// conditional branches in the middle block, preparing the builder and 764 /// running the verifier. Take in the vector loop \p L as argument, and return 765 /// the preheader of the completed vector loop. 766 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 767 768 /// Add additional metadata to \p To that was not present on \p Orig. 769 /// 770 /// Currently this is used to add the noalias annotations based on the 771 /// inserted memchecks. Use this for instructions that are *cloned* into the 772 /// vector loop. 773 void addNewMetadata(Instruction *To, const Instruction *Orig); 774 775 /// Add metadata from one instruction to another. 776 /// 777 /// This includes both the original MDs from \p From and additional ones (\see 778 /// addNewMetadata). Use this for *newly created* instructions in the vector 779 /// loop. 780 void addMetadata(Instruction *To, Instruction *From); 781 782 /// Similar to the previous function but it adds the metadata to a 783 /// vector of instructions. 784 void addMetadata(ArrayRef<Value *> To, Instruction *From); 785 786 /// Allow subclasses to override and print debug traces before/after vplan 787 /// execution, when trace information is requested. 788 virtual void printDebugTracesAtStart(){}; 789 virtual void printDebugTracesAtEnd(){}; 790 791 /// The original loop. 792 Loop *OrigLoop; 793 794 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 795 /// dynamic knowledge to simplify SCEV expressions and converts them to a 796 /// more usable form. 797 PredicatedScalarEvolution &PSE; 798 799 /// Loop Info. 800 LoopInfo *LI; 801 802 /// Dominator Tree. 803 DominatorTree *DT; 804 805 /// Alias Analysis. 806 AAResults *AA; 807 808 /// Target Library Info. 809 const TargetLibraryInfo *TLI; 810 811 /// Target Transform Info. 812 const TargetTransformInfo *TTI; 813 814 /// Assumption Cache. 815 AssumptionCache *AC; 816 817 /// Interface to emit optimization remarks. 818 OptimizationRemarkEmitter *ORE; 819 820 /// LoopVersioning. It's only set up (non-null) if memchecks were 821 /// used. 822 /// 823 /// This is currently only used to add no-alias metadata based on the 824 /// memchecks. The actually versioning is performed manually. 825 std::unique_ptr<LoopVersioning> LVer; 826 827 /// The vectorization SIMD factor to use. Each vector will have this many 828 /// vector elements. 829 ElementCount VF; 830 831 /// The vectorization unroll factor to use. Each scalar is vectorized to this 832 /// many different vector instructions. 833 unsigned UF; 834 835 /// The builder that we use 836 IRBuilder<> Builder; 837 838 // --- Vectorization state --- 839 840 /// The vector-loop preheader. 841 BasicBlock *LoopVectorPreHeader; 842 843 /// The scalar-loop preheader. 844 BasicBlock *LoopScalarPreHeader; 845 846 /// Middle Block between the vector and the scalar. 847 BasicBlock *LoopMiddleBlock; 848 849 /// The (unique) ExitBlock of the scalar loop. Note that 850 /// there can be multiple exiting edges reaching this block. 851 BasicBlock *LoopExitBlock; 852 853 /// The vector loop body. 854 BasicBlock *LoopVectorBody; 855 856 /// The scalar loop body. 857 BasicBlock *LoopScalarBody; 858 859 /// A list of all bypass blocks. The first block is the entry of the loop. 860 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 861 862 /// The new Induction variable which was added to the new block. 863 PHINode *Induction = nullptr; 864 865 /// The induction variable of the old basic block. 866 PHINode *OldInduction = nullptr; 867 868 /// Maps values from the original loop to their corresponding values in the 869 /// vectorized loop. A key value can map to either vector values, scalar 870 /// values or both kinds of values, depending on whether the key was 871 /// vectorized and scalarized. 872 VectorizerValueMap VectorLoopValueMap; 873 874 /// Store instructions that were predicated. 875 SmallVector<Instruction *, 4> PredicatedInstructions; 876 877 /// Trip count of the original loop. 878 Value *TripCount = nullptr; 879 880 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 881 Value *VectorTripCount = nullptr; 882 883 /// The legality analysis. 884 LoopVectorizationLegality *Legal; 885 886 /// The profitablity analysis. 887 LoopVectorizationCostModel *Cost; 888 889 // Record whether runtime checks are added. 890 bool AddedSafetyChecks = false; 891 892 // Holds the end values for each induction variable. We save the end values 893 // so we can later fix-up the external users of the induction variables. 894 DenseMap<PHINode *, Value *> IVEndValues; 895 896 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 897 // fixed up at the end of vector code generation. 898 SmallVector<PHINode *, 8> OrigPHIsToFix; 899 900 /// BFI and PSI are used to check for profile guided size optimizations. 901 BlockFrequencyInfo *BFI; 902 ProfileSummaryInfo *PSI; 903 904 // Whether this loop should be optimized for size based on profile guided size 905 // optimizatios. 906 bool OptForSizeBasedOnProfile; 907 }; 908 909 class InnerLoopUnroller : public InnerLoopVectorizer { 910 public: 911 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 912 LoopInfo *LI, DominatorTree *DT, 913 const TargetLibraryInfo *TLI, 914 const TargetTransformInfo *TTI, AssumptionCache *AC, 915 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 916 LoopVectorizationLegality *LVL, 917 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 918 ProfileSummaryInfo *PSI) 919 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 920 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 921 BFI, PSI) {} 922 923 private: 924 Value *getBroadcastInstrs(Value *V) override; 925 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 926 Instruction::BinaryOps Opcode = 927 Instruction::BinaryOpsEnd) override; 928 Value *reverseVector(Value *Vec) override; 929 }; 930 931 /// Encapsulate information regarding vectorization of a loop and its epilogue. 932 /// This information is meant to be updated and used across two stages of 933 /// epilogue vectorization. 934 struct EpilogueLoopVectorizationInfo { 935 ElementCount MainLoopVF = ElementCount::getFixed(0); 936 unsigned MainLoopUF = 0; 937 ElementCount EpilogueVF = ElementCount::getFixed(0); 938 unsigned EpilogueUF = 0; 939 BasicBlock *MainLoopIterationCountCheck = nullptr; 940 BasicBlock *EpilogueIterationCountCheck = nullptr; 941 BasicBlock *SCEVSafetyCheck = nullptr; 942 BasicBlock *MemSafetyCheck = nullptr; 943 Value *TripCount = nullptr; 944 Value *VectorTripCount = nullptr; 945 946 EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF, 947 unsigned EUF) 948 : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF), 949 EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) { 950 assert(EUF == 1 && 951 "A high UF for the epilogue loop is likely not beneficial."); 952 } 953 }; 954 955 /// An extension of the inner loop vectorizer that creates a skeleton for a 956 /// vectorized loop that has its epilogue (residual) also vectorized. 957 /// The idea is to run the vplan on a given loop twice, firstly to setup the 958 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 959 /// from the first step and vectorize the epilogue. This is achieved by 960 /// deriving two concrete strategy classes from this base class and invoking 961 /// them in succession from the loop vectorizer planner. 962 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 963 public: 964 InnerLoopAndEpilogueVectorizer( 965 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 966 DominatorTree *DT, const TargetLibraryInfo *TLI, 967 const TargetTransformInfo *TTI, AssumptionCache *AC, 968 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 969 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 970 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 971 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 972 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI), 973 EPI(EPI) {} 974 975 // Override this function to handle the more complex control flow around the 976 // three loops. 977 BasicBlock *createVectorizedLoopSkeleton() final override { 978 return createEpilogueVectorizedLoopSkeleton(); 979 } 980 981 /// The interface for creating a vectorized skeleton using one of two 982 /// different strategies, each corresponding to one execution of the vplan 983 /// as described above. 984 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 985 986 /// Holds and updates state information required to vectorize the main loop 987 /// and its epilogue in two separate passes. This setup helps us avoid 988 /// regenerating and recomputing runtime safety checks. It also helps us to 989 /// shorten the iteration-count-check path length for the cases where the 990 /// iteration count of the loop is so small that the main vector loop is 991 /// completely skipped. 992 EpilogueLoopVectorizationInfo &EPI; 993 }; 994 995 /// A specialized derived class of inner loop vectorizer that performs 996 /// vectorization of *main* loops in the process of vectorizing loops and their 997 /// epilogues. 998 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 999 public: 1000 EpilogueVectorizerMainLoop( 1001 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 1002 DominatorTree *DT, const TargetLibraryInfo *TLI, 1003 const TargetTransformInfo *TTI, AssumptionCache *AC, 1004 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 1005 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 1006 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 1007 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1008 EPI, LVL, CM, BFI, PSI) {} 1009 /// Implements the interface for creating a vectorized skeleton using the 1010 /// *main loop* strategy (ie the first pass of vplan execution). 1011 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1012 1013 protected: 1014 /// Emits an iteration count bypass check once for the main loop (when \p 1015 /// ForEpilogue is false) and once for the epilogue loop (when \p 1016 /// ForEpilogue is true). 1017 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 1018 bool ForEpilogue); 1019 void printDebugTracesAtStart() override; 1020 void printDebugTracesAtEnd() override; 1021 }; 1022 1023 // A specialized derived class of inner loop vectorizer that performs 1024 // vectorization of *epilogue* loops in the process of vectorizing loops and 1025 // their epilogues. 1026 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 1027 public: 1028 EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 1029 LoopInfo *LI, DominatorTree *DT, 1030 const TargetLibraryInfo *TLI, 1031 const TargetTransformInfo *TTI, AssumptionCache *AC, 1032 OptimizationRemarkEmitter *ORE, 1033 EpilogueLoopVectorizationInfo &EPI, 1034 LoopVectorizationLegality *LVL, 1035 llvm::LoopVectorizationCostModel *CM, 1036 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 1037 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1038 EPI, LVL, CM, BFI, PSI) {} 1039 /// Implements the interface for creating a vectorized skeleton using the 1040 /// *epilogue loop* strategy (ie the second pass of vplan execution). 1041 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1042 1043 protected: 1044 /// Emits an iteration count bypass check after the main vector loop has 1045 /// finished to see if there are any iterations left to execute by either 1046 /// the vector epilogue or the scalar epilogue. 1047 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1048 BasicBlock *Bypass, 1049 BasicBlock *Insert); 1050 void printDebugTracesAtStart() override; 1051 void printDebugTracesAtEnd() override; 1052 }; 1053 } // end namespace llvm 1054 1055 /// Look for a meaningful debug location on the instruction or it's 1056 /// operands. 1057 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1058 if (!I) 1059 return I; 1060 1061 DebugLoc Empty; 1062 if (I->getDebugLoc() != Empty) 1063 return I; 1064 1065 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 1066 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 1067 if (OpInst->getDebugLoc() != Empty) 1068 return OpInst; 1069 } 1070 1071 return I; 1072 } 1073 1074 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 1075 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 1076 const DILocation *DIL = Inst->getDebugLoc(); 1077 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1078 !isa<DbgInfoIntrinsic>(Inst)) { 1079 assert(!VF.isScalable() && "scalable vectors not yet supported."); 1080 auto NewDIL = 1081 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1082 if (NewDIL) 1083 B.SetCurrentDebugLocation(NewDIL.getValue()); 1084 else 1085 LLVM_DEBUG(dbgs() 1086 << "Failed to create new discriminator: " 1087 << DIL->getFilename() << " Line: " << DIL->getLine()); 1088 } 1089 else 1090 B.SetCurrentDebugLocation(DIL); 1091 } else 1092 B.SetCurrentDebugLocation(DebugLoc()); 1093 } 1094 1095 /// Write a record \p DebugMsg about vectorization failure to the debug 1096 /// output stream. If \p I is passed, it is an instruction that prevents 1097 /// vectorization. 1098 #ifndef NDEBUG 1099 static void debugVectorizationFailure(const StringRef DebugMsg, 1100 Instruction *I) { 1101 dbgs() << "LV: Not vectorizing: " << DebugMsg; 1102 if (I != nullptr) 1103 dbgs() << " " << *I; 1104 else 1105 dbgs() << '.'; 1106 dbgs() << '\n'; 1107 } 1108 #endif 1109 1110 /// Create an analysis remark that explains why vectorization failed 1111 /// 1112 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1113 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1114 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1115 /// the location of the remark. \return the remark object that can be 1116 /// streamed to. 1117 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1118 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1119 Value *CodeRegion = TheLoop->getHeader(); 1120 DebugLoc DL = TheLoop->getStartLoc(); 1121 1122 if (I) { 1123 CodeRegion = I->getParent(); 1124 // If there is no debug location attached to the instruction, revert back to 1125 // using the loop's. 1126 if (I->getDebugLoc()) 1127 DL = I->getDebugLoc(); 1128 } 1129 1130 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 1131 R << "loop not vectorized: "; 1132 return R; 1133 } 1134 1135 /// Return a value for Step multiplied by VF. 1136 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) { 1137 assert(isa<ConstantInt>(Step) && "Expected an integer step"); 1138 Constant *StepVal = ConstantInt::get( 1139 Step->getType(), 1140 cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue()); 1141 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1142 } 1143 1144 namespace llvm { 1145 1146 void reportVectorizationFailure(const StringRef DebugMsg, 1147 const StringRef OREMsg, const StringRef ORETag, 1148 OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) { 1149 LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I)); 1150 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1151 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), 1152 ORETag, TheLoop, I) << OREMsg); 1153 } 1154 1155 } // end namespace llvm 1156 1157 #ifndef NDEBUG 1158 /// \return string containing a file name and a line # for the given loop. 1159 static std::string getDebugLocString(const Loop *L) { 1160 std::string Result; 1161 if (L) { 1162 raw_string_ostream OS(Result); 1163 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1164 LoopDbgLoc.print(OS); 1165 else 1166 // Just print the module name. 1167 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1168 OS.flush(); 1169 } 1170 return Result; 1171 } 1172 #endif 1173 1174 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1175 const Instruction *Orig) { 1176 // If the loop was versioned with memchecks, add the corresponding no-alias 1177 // metadata. 1178 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1179 LVer->annotateInstWithNoAlias(To, Orig); 1180 } 1181 1182 void InnerLoopVectorizer::addMetadata(Instruction *To, 1183 Instruction *From) { 1184 propagateMetadata(To, From); 1185 addNewMetadata(To, From); 1186 } 1187 1188 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1189 Instruction *From) { 1190 for (Value *V : To) { 1191 if (Instruction *I = dyn_cast<Instruction>(V)) 1192 addMetadata(I, From); 1193 } 1194 } 1195 1196 namespace llvm { 1197 1198 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1199 // lowered. 1200 enum ScalarEpilogueLowering { 1201 1202 // The default: allowing scalar epilogues. 1203 CM_ScalarEpilogueAllowed, 1204 1205 // Vectorization with OptForSize: don't allow epilogues. 1206 CM_ScalarEpilogueNotAllowedOptSize, 1207 1208 // A special case of vectorisation with OptForSize: loops with a very small 1209 // trip count are considered for vectorization under OptForSize, thereby 1210 // making sure the cost of their loop body is dominant, free of runtime 1211 // guards and scalar iteration overheads. 1212 CM_ScalarEpilogueNotAllowedLowTripLoop, 1213 1214 // Loop hint predicate indicating an epilogue is undesired. 1215 CM_ScalarEpilogueNotNeededUsePredicate, 1216 1217 // Directive indicating we must either tail fold or not vectorize 1218 CM_ScalarEpilogueNotAllowedUsePredicate 1219 }; 1220 1221 /// LoopVectorizationCostModel - estimates the expected speedups due to 1222 /// vectorization. 1223 /// In many cases vectorization is not profitable. This can happen because of 1224 /// a number of reasons. In this class we mainly attempt to predict the 1225 /// expected speedup/slowdowns due to the supported instruction set. We use the 1226 /// TargetTransformInfo to query the different backends for the cost of 1227 /// different operations. 1228 class LoopVectorizationCostModel { 1229 public: 1230 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1231 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1232 LoopVectorizationLegality *Legal, 1233 const TargetTransformInfo &TTI, 1234 const TargetLibraryInfo *TLI, DemandedBits *DB, 1235 AssumptionCache *AC, 1236 OptimizationRemarkEmitter *ORE, const Function *F, 1237 const LoopVectorizeHints *Hints, 1238 InterleavedAccessInfo &IAI) 1239 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1240 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1241 Hints(Hints), InterleaveInfo(IAI) {} 1242 1243 /// \return An upper bound for the vectorization factor, or None if 1244 /// vectorization and interleaving should be avoided up front. 1245 Optional<ElementCount> computeMaxVF(ElementCount UserVF, unsigned UserIC); 1246 1247 /// \return True if runtime checks are required for vectorization, and false 1248 /// otherwise. 1249 bool runtimeChecksRequired(); 1250 1251 /// \return The most profitable vectorization factor and the cost of that VF. 1252 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 1253 /// then this vectorization factor will be selected if vectorization is 1254 /// possible. 1255 VectorizationFactor selectVectorizationFactor(ElementCount MaxVF); 1256 VectorizationFactor 1257 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1258 const LoopVectorizationPlanner &LVP); 1259 1260 /// Setup cost-based decisions for user vectorization factor. 1261 void selectUserVectorizationFactor(ElementCount UserVF) { 1262 collectUniformsAndScalars(UserVF); 1263 collectInstsToScalarize(UserVF); 1264 } 1265 1266 /// \return The size (in bits) of the smallest and widest types in the code 1267 /// that needs to be vectorized. We ignore values that remain scalar such as 1268 /// 64 bit loop indices. 1269 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1270 1271 /// \return The desired interleave count. 1272 /// If interleave count has been specified by metadata it will be returned. 1273 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1274 /// are the selected vectorization factor and the cost of the selected VF. 1275 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1276 1277 /// Memory access instruction may be vectorized in more than one way. 1278 /// Form of instruction after vectorization depends on cost. 1279 /// This function takes cost-based decisions for Load/Store instructions 1280 /// and collects them in a map. This decisions map is used for building 1281 /// the lists of loop-uniform and loop-scalar instructions. 1282 /// The calculated cost is saved with widening decision in order to 1283 /// avoid redundant calculations. 1284 void setCostBasedWideningDecision(ElementCount VF); 1285 1286 /// A struct that represents some properties of the register usage 1287 /// of a loop. 1288 struct RegisterUsage { 1289 /// Holds the number of loop invariant values that are used in the loop. 1290 /// The key is ClassID of target-provided register class. 1291 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1292 /// Holds the maximum number of concurrent live intervals in the loop. 1293 /// The key is ClassID of target-provided register class. 1294 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1295 }; 1296 1297 /// \return Returns information about the register usages of the loop for the 1298 /// given vectorization factors. 1299 SmallVector<RegisterUsage, 8> 1300 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1301 1302 /// Collect values we want to ignore in the cost model. 1303 void collectValuesToIgnore(); 1304 1305 /// Split reductions into those that happen in the loop, and those that happen 1306 /// outside. In loop reductions are collected into InLoopReductionChains. 1307 void collectInLoopReductions(); 1308 1309 /// \returns The smallest bitwidth each instruction can be represented with. 1310 /// The vector equivalents of these instructions should be truncated to this 1311 /// type. 1312 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1313 return MinBWs; 1314 } 1315 1316 /// \returns True if it is more profitable to scalarize instruction \p I for 1317 /// vectorization factor \p VF. 1318 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1319 assert(VF.isVector() && 1320 "Profitable to scalarize relevant only for VF > 1."); 1321 1322 // Cost model is not run in the VPlan-native path - return conservative 1323 // result until this changes. 1324 if (EnableVPlanNativePath) 1325 return false; 1326 1327 auto Scalars = InstsToScalarize.find(VF); 1328 assert(Scalars != InstsToScalarize.end() && 1329 "VF not yet analyzed for scalarization profitability"); 1330 return Scalars->second.find(I) != Scalars->second.end(); 1331 } 1332 1333 /// Returns true if \p I is known to be uniform after vectorization. 1334 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1335 if (VF.isScalar()) 1336 return true; 1337 1338 // Cost model is not run in the VPlan-native path - return conservative 1339 // result until this changes. 1340 if (EnableVPlanNativePath) 1341 return false; 1342 1343 auto UniformsPerVF = Uniforms.find(VF); 1344 assert(UniformsPerVF != Uniforms.end() && 1345 "VF not yet analyzed for uniformity"); 1346 return UniformsPerVF->second.count(I); 1347 } 1348 1349 /// Returns true if \p I is known to be scalar after vectorization. 1350 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1351 if (VF.isScalar()) 1352 return true; 1353 1354 // Cost model is not run in the VPlan-native path - return conservative 1355 // result until this changes. 1356 if (EnableVPlanNativePath) 1357 return false; 1358 1359 auto ScalarsPerVF = Scalars.find(VF); 1360 assert(ScalarsPerVF != Scalars.end() && 1361 "Scalar values are not calculated for VF"); 1362 return ScalarsPerVF->second.count(I); 1363 } 1364 1365 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1366 /// for vectorization factor \p VF. 1367 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1368 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1369 !isProfitableToScalarize(I, VF) && 1370 !isScalarAfterVectorization(I, VF); 1371 } 1372 1373 /// Decision that was taken during cost calculation for memory instruction. 1374 enum InstWidening { 1375 CM_Unknown, 1376 CM_Widen, // For consecutive accesses with stride +1. 1377 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1378 CM_Interleave, 1379 CM_GatherScatter, 1380 CM_Scalarize 1381 }; 1382 1383 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1384 /// instruction \p I and vector width \p VF. 1385 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1386 unsigned Cost) { 1387 assert(VF.isVector() && "Expected VF >=2"); 1388 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1389 } 1390 1391 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1392 /// interleaving group \p Grp and vector width \p VF. 1393 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1394 ElementCount VF, InstWidening W, unsigned Cost) { 1395 assert(VF.isVector() && "Expected VF >=2"); 1396 /// Broadcast this decicion to all instructions inside the group. 1397 /// But the cost will be assigned to one instruction only. 1398 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1399 if (auto *I = Grp->getMember(i)) { 1400 if (Grp->getInsertPos() == I) 1401 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1402 else 1403 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1404 } 1405 } 1406 } 1407 1408 /// Return the cost model decision for the given instruction \p I and vector 1409 /// width \p VF. Return CM_Unknown if this instruction did not pass 1410 /// through the cost modeling. 1411 InstWidening getWideningDecision(Instruction *I, ElementCount VF) { 1412 assert(VF.isVector() && "Expected VF to be a vector VF"); 1413 // Cost model is not run in the VPlan-native path - return conservative 1414 // result until this changes. 1415 if (EnableVPlanNativePath) 1416 return CM_GatherScatter; 1417 1418 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1419 auto Itr = WideningDecisions.find(InstOnVF); 1420 if (Itr == WideningDecisions.end()) 1421 return CM_Unknown; 1422 return Itr->second.first; 1423 } 1424 1425 /// Return the vectorization cost for the given instruction \p I and vector 1426 /// width \p VF. 1427 unsigned getWideningCost(Instruction *I, ElementCount VF) { 1428 assert(VF.isVector() && "Expected VF >=2"); 1429 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1430 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1431 "The cost is not calculated"); 1432 return WideningDecisions[InstOnVF].second; 1433 } 1434 1435 /// Return True if instruction \p I is an optimizable truncate whose operand 1436 /// is an induction variable. Such a truncate will be removed by adding a new 1437 /// induction variable with the destination type. 1438 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1439 // If the instruction is not a truncate, return false. 1440 auto *Trunc = dyn_cast<TruncInst>(I); 1441 if (!Trunc) 1442 return false; 1443 1444 // Get the source and destination types of the truncate. 1445 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1446 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1447 1448 // If the truncate is free for the given types, return false. Replacing a 1449 // free truncate with an induction variable would add an induction variable 1450 // update instruction to each iteration of the loop. We exclude from this 1451 // check the primary induction variable since it will need an update 1452 // instruction regardless. 1453 Value *Op = Trunc->getOperand(0); 1454 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1455 return false; 1456 1457 // If the truncated value is not an induction variable, return false. 1458 return Legal->isInductionPhi(Op); 1459 } 1460 1461 /// Collects the instructions to scalarize for each predicated instruction in 1462 /// the loop. 1463 void collectInstsToScalarize(ElementCount VF); 1464 1465 /// Collect Uniform and Scalar values for the given \p VF. 1466 /// The sets depend on CM decision for Load/Store instructions 1467 /// that may be vectorized as interleave, gather-scatter or scalarized. 1468 void collectUniformsAndScalars(ElementCount VF) { 1469 // Do the analysis once. 1470 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1471 return; 1472 setCostBasedWideningDecision(VF); 1473 collectLoopUniforms(VF); 1474 collectLoopScalars(VF); 1475 } 1476 1477 /// Returns true if the target machine supports masked store operation 1478 /// for the given \p DataType and kind of access to \p Ptr. 1479 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) { 1480 return Legal->isConsecutivePtr(Ptr) && 1481 TTI.isLegalMaskedStore(DataType, Alignment); 1482 } 1483 1484 /// Returns true if the target machine supports masked load operation 1485 /// for the given \p DataType and kind of access to \p Ptr. 1486 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) { 1487 return Legal->isConsecutivePtr(Ptr) && 1488 TTI.isLegalMaskedLoad(DataType, Alignment); 1489 } 1490 1491 /// Returns true if the target machine supports masked scatter operation 1492 /// for the given \p DataType. 1493 bool isLegalMaskedScatter(Type *DataType, Align Alignment) { 1494 return TTI.isLegalMaskedScatter(DataType, Alignment); 1495 } 1496 1497 /// Returns true if the target machine supports masked gather operation 1498 /// for the given \p DataType. 1499 bool isLegalMaskedGather(Type *DataType, Align Alignment) { 1500 return TTI.isLegalMaskedGather(DataType, Alignment); 1501 } 1502 1503 /// Returns true if the target machine can represent \p V as a masked gather 1504 /// or scatter operation. 1505 bool isLegalGatherOrScatter(Value *V) { 1506 bool LI = isa<LoadInst>(V); 1507 bool SI = isa<StoreInst>(V); 1508 if (!LI && !SI) 1509 return false; 1510 auto *Ty = getMemInstValueType(V); 1511 Align Align = getLoadStoreAlignment(V); 1512 return (LI && isLegalMaskedGather(Ty, Align)) || 1513 (SI && isLegalMaskedScatter(Ty, Align)); 1514 } 1515 1516 /// Returns true if \p I is an instruction that will be scalarized with 1517 /// predication. Such instructions include conditional stores and 1518 /// instructions that may divide by zero. 1519 /// If a non-zero VF has been calculated, we check if I will be scalarized 1520 /// predication for that VF. 1521 bool isScalarWithPredication(Instruction *I, 1522 ElementCount VF = ElementCount::getFixed(1)); 1523 1524 // Returns true if \p I is an instruction that will be predicated either 1525 // through scalar predication or masked load/store or masked gather/scatter. 1526 // Superset of instructions that return true for isScalarWithPredication. 1527 bool isPredicatedInst(Instruction *I) { 1528 if (!blockNeedsPredication(I->getParent())) 1529 return false; 1530 // Loads and stores that need some form of masked operation are predicated 1531 // instructions. 1532 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1533 return Legal->isMaskRequired(I); 1534 return isScalarWithPredication(I); 1535 } 1536 1537 /// Returns true if \p I is a memory instruction with consecutive memory 1538 /// access that can be widened. 1539 bool 1540 memoryInstructionCanBeWidened(Instruction *I, 1541 ElementCount VF = ElementCount::getFixed(1)); 1542 1543 /// Returns true if \p I is a memory instruction in an interleaved-group 1544 /// of memory accesses that can be vectorized with wide vector loads/stores 1545 /// and shuffles. 1546 bool 1547 interleavedAccessCanBeWidened(Instruction *I, 1548 ElementCount VF = ElementCount::getFixed(1)); 1549 1550 /// Check if \p Instr belongs to any interleaved access group. 1551 bool isAccessInterleaved(Instruction *Instr) { 1552 return InterleaveInfo.isInterleaved(Instr); 1553 } 1554 1555 /// Get the interleaved access group that \p Instr belongs to. 1556 const InterleaveGroup<Instruction> * 1557 getInterleavedAccessGroup(Instruction *Instr) { 1558 return InterleaveInfo.getInterleaveGroup(Instr); 1559 } 1560 1561 /// Returns true if we're required to use a scalar epilogue for at least 1562 /// the final iteration of the original loop. 1563 bool requiresScalarEpilogue() const { 1564 if (!isScalarEpilogueAllowed()) 1565 return false; 1566 // If we might exit from anywhere but the latch, must run the exiting 1567 // iteration in scalar form. 1568 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1569 return true; 1570 return InterleaveInfo.requiresScalarEpilogue(); 1571 } 1572 1573 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1574 /// loop hint annotation. 1575 bool isScalarEpilogueAllowed() const { 1576 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1577 } 1578 1579 /// Returns true if all loop blocks should be masked to fold tail loop. 1580 bool foldTailByMasking() const { return FoldTailByMasking; } 1581 1582 bool blockNeedsPredication(BasicBlock *BB) { 1583 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1584 } 1585 1586 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1587 /// nodes to the chain of instructions representing the reductions. Uses a 1588 /// MapVector to ensure deterministic iteration order. 1589 using ReductionChainMap = 1590 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1591 1592 /// Return the chain of instructions representing an inloop reduction. 1593 const ReductionChainMap &getInLoopReductionChains() const { 1594 return InLoopReductionChains; 1595 } 1596 1597 /// Returns true if the Phi is part of an inloop reduction. 1598 bool isInLoopReduction(PHINode *Phi) const { 1599 return InLoopReductionChains.count(Phi); 1600 } 1601 1602 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1603 /// with factor VF. Return the cost of the instruction, including 1604 /// scalarization overhead if it's needed. 1605 unsigned getVectorIntrinsicCost(CallInst *CI, ElementCount VF); 1606 1607 /// Estimate cost of a call instruction CI if it were vectorized with factor 1608 /// VF. Return the cost of the instruction, including scalarization overhead 1609 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1610 /// scalarized - 1611 /// i.e. either vector version isn't available, or is too expensive. 1612 unsigned getVectorCallCost(CallInst *CI, ElementCount VF, 1613 bool &NeedToScalarize); 1614 1615 /// Invalidates decisions already taken by the cost model. 1616 void invalidateCostModelingDecisions() { 1617 WideningDecisions.clear(); 1618 Uniforms.clear(); 1619 Scalars.clear(); 1620 } 1621 1622 private: 1623 unsigned NumPredStores = 0; 1624 1625 /// \return An upper bound for the vectorization factor, a power-of-2 larger 1626 /// than zero. One is returned if vectorization should best be avoided due 1627 /// to cost. 1628 ElementCount computeFeasibleMaxVF(unsigned ConstTripCount, 1629 ElementCount UserVF); 1630 1631 /// The vectorization cost is a combination of the cost itself and a boolean 1632 /// indicating whether any of the contributing operations will actually 1633 /// operate on 1634 /// vector values after type legalization in the backend. If this latter value 1635 /// is 1636 /// false, then all operations will be scalarized (i.e. no vectorization has 1637 /// actually taken place). 1638 using VectorizationCostTy = std::pair<unsigned, bool>; 1639 1640 /// Returns the expected execution cost. The unit of the cost does 1641 /// not matter because we use the 'cost' units to compare different 1642 /// vector widths. The cost that is returned is *not* normalized by 1643 /// the factor width. 1644 VectorizationCostTy expectedCost(ElementCount VF); 1645 1646 /// Returns the execution time cost of an instruction for a given vector 1647 /// width. Vector width of one means scalar. 1648 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1649 1650 /// The cost-computation logic from getInstructionCost which provides 1651 /// the vector type as an output parameter. 1652 unsigned getInstructionCost(Instruction *I, ElementCount VF, Type *&VectorTy); 1653 1654 /// Calculate vectorization cost of memory instruction \p I. 1655 unsigned getMemoryInstructionCost(Instruction *I, ElementCount VF); 1656 1657 /// The cost computation for scalarized memory instruction. 1658 unsigned getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1659 1660 /// The cost computation for interleaving group of memory instructions. 1661 unsigned getInterleaveGroupCost(Instruction *I, ElementCount VF); 1662 1663 /// The cost computation for Gather/Scatter instruction. 1664 unsigned getGatherScatterCost(Instruction *I, ElementCount VF); 1665 1666 /// The cost computation for widening instruction \p I with consecutive 1667 /// memory access. 1668 unsigned getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1669 1670 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1671 /// Load: scalar load + broadcast. 1672 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1673 /// element) 1674 unsigned getUniformMemOpCost(Instruction *I, ElementCount VF); 1675 1676 /// Estimate the overhead of scalarizing an instruction. This is a 1677 /// convenience wrapper for the type-based getScalarizationOverhead API. 1678 unsigned getScalarizationOverhead(Instruction *I, ElementCount VF); 1679 1680 /// Returns whether the instruction is a load or store and will be a emitted 1681 /// as a vector operation. 1682 bool isConsecutiveLoadOrStore(Instruction *I); 1683 1684 /// Returns true if an artificially high cost for emulated masked memrefs 1685 /// should be used. 1686 bool useEmulatedMaskMemRefHack(Instruction *I); 1687 1688 /// Map of scalar integer values to the smallest bitwidth they can be legally 1689 /// represented as. The vector equivalents of these values should be truncated 1690 /// to this type. 1691 MapVector<Instruction *, uint64_t> MinBWs; 1692 1693 /// A type representing the costs for instructions if they were to be 1694 /// scalarized rather than vectorized. The entries are Instruction-Cost 1695 /// pairs. 1696 using ScalarCostsTy = DenseMap<Instruction *, unsigned>; 1697 1698 /// A set containing all BasicBlocks that are known to present after 1699 /// vectorization as a predicated block. 1700 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1701 1702 /// Records whether it is allowed to have the original scalar loop execute at 1703 /// least once. This may be needed as a fallback loop in case runtime 1704 /// aliasing/dependence checks fail, or to handle the tail/remainder 1705 /// iterations when the trip count is unknown or doesn't divide by the VF, 1706 /// or as a peel-loop to handle gaps in interleave-groups. 1707 /// Under optsize and when the trip count is very small we don't allow any 1708 /// iterations to execute in the scalar loop. 1709 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1710 1711 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1712 bool FoldTailByMasking = false; 1713 1714 /// A map holding scalar costs for different vectorization factors. The 1715 /// presence of a cost for an instruction in the mapping indicates that the 1716 /// instruction will be scalarized when vectorizing with the associated 1717 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1718 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1719 1720 /// Holds the instructions known to be uniform after vectorization. 1721 /// The data is collected per VF. 1722 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1723 1724 /// Holds the instructions known to be scalar after vectorization. 1725 /// The data is collected per VF. 1726 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1727 1728 /// Holds the instructions (address computations) that are forced to be 1729 /// scalarized. 1730 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1731 1732 /// PHINodes of the reductions that should be expanded in-loop along with 1733 /// their associated chains of reduction operations, in program order from top 1734 /// (PHI) to bottom 1735 ReductionChainMap InLoopReductionChains; 1736 1737 /// Returns the expected difference in cost from scalarizing the expression 1738 /// feeding a predicated instruction \p PredInst. The instructions to 1739 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1740 /// non-negative return value implies the expression will be scalarized. 1741 /// Currently, only single-use chains are considered for scalarization. 1742 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1743 ElementCount VF); 1744 1745 /// Collect the instructions that are uniform after vectorization. An 1746 /// instruction is uniform if we represent it with a single scalar value in 1747 /// the vectorized loop corresponding to each vector iteration. Examples of 1748 /// uniform instructions include pointer operands of consecutive or 1749 /// interleaved memory accesses. Note that although uniformity implies an 1750 /// instruction will be scalar, the reverse is not true. In general, a 1751 /// scalarized instruction will be represented by VF scalar values in the 1752 /// vectorized loop, each corresponding to an iteration of the original 1753 /// scalar loop. 1754 void collectLoopUniforms(ElementCount VF); 1755 1756 /// Collect the instructions that are scalar after vectorization. An 1757 /// instruction is scalar if it is known to be uniform or will be scalarized 1758 /// during vectorization. Non-uniform scalarized instructions will be 1759 /// represented by VF values in the vectorized loop, each corresponding to an 1760 /// iteration of the original scalar loop. 1761 void collectLoopScalars(ElementCount VF); 1762 1763 /// Keeps cost model vectorization decision and cost for instructions. 1764 /// Right now it is used for memory instructions only. 1765 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1766 std::pair<InstWidening, unsigned>>; 1767 1768 DecisionList WideningDecisions; 1769 1770 /// Returns true if \p V is expected to be vectorized and it needs to be 1771 /// extracted. 1772 bool needsExtract(Value *V, ElementCount VF) const { 1773 Instruction *I = dyn_cast<Instruction>(V); 1774 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1775 TheLoop->isLoopInvariant(I)) 1776 return false; 1777 1778 // Assume we can vectorize V (and hence we need extraction) if the 1779 // scalars are not computed yet. This can happen, because it is called 1780 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1781 // the scalars are collected. That should be a safe assumption in most 1782 // cases, because we check if the operands have vectorizable types 1783 // beforehand in LoopVectorizationLegality. 1784 return Scalars.find(VF) == Scalars.end() || 1785 !isScalarAfterVectorization(I, VF); 1786 }; 1787 1788 /// Returns a range containing only operands needing to be extracted. 1789 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1790 ElementCount VF) { 1791 return SmallVector<Value *, 4>(make_filter_range( 1792 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1793 } 1794 1795 /// Determines if we have the infrastructure to vectorize loop \p L and its 1796 /// epilogue, assuming the main loop is vectorized by \p VF. 1797 bool isCandidateForEpilogueVectorization(const Loop &L, 1798 const ElementCount VF) const; 1799 1800 /// Returns true if epilogue vectorization is considered profitable, and 1801 /// false otherwise. 1802 /// \p VF is the vectorization factor chosen for the original loop. 1803 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1804 1805 public: 1806 /// The loop that we evaluate. 1807 Loop *TheLoop; 1808 1809 /// Predicated scalar evolution analysis. 1810 PredicatedScalarEvolution &PSE; 1811 1812 /// Loop Info analysis. 1813 LoopInfo *LI; 1814 1815 /// Vectorization legality. 1816 LoopVectorizationLegality *Legal; 1817 1818 /// Vector target information. 1819 const TargetTransformInfo &TTI; 1820 1821 /// Target Library Info. 1822 const TargetLibraryInfo *TLI; 1823 1824 /// Demanded bits analysis. 1825 DemandedBits *DB; 1826 1827 /// Assumption cache. 1828 AssumptionCache *AC; 1829 1830 /// Interface to emit optimization remarks. 1831 OptimizationRemarkEmitter *ORE; 1832 1833 const Function *TheFunction; 1834 1835 /// Loop Vectorize Hint. 1836 const LoopVectorizeHints *Hints; 1837 1838 /// The interleave access information contains groups of interleaved accesses 1839 /// with the same stride and close to each other. 1840 InterleavedAccessInfo &InterleaveInfo; 1841 1842 /// Values to ignore in the cost model. 1843 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1844 1845 /// Values to ignore in the cost model when VF > 1. 1846 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1847 1848 /// Profitable vector factors. 1849 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1850 }; 1851 1852 } // end namespace llvm 1853 1854 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1855 // vectorization. The loop needs to be annotated with #pragma omp simd 1856 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1857 // vector length information is not provided, vectorization is not considered 1858 // explicit. Interleave hints are not allowed either. These limitations will be 1859 // relaxed in the future. 1860 // Please, note that we are currently forced to abuse the pragma 'clang 1861 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1862 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1863 // provides *explicit vectorization hints* (LV can bypass legal checks and 1864 // assume that vectorization is legal). However, both hints are implemented 1865 // using the same metadata (llvm.loop.vectorize, processed by 1866 // LoopVectorizeHints). This will be fixed in the future when the native IR 1867 // representation for pragma 'omp simd' is introduced. 1868 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1869 OptimizationRemarkEmitter *ORE) { 1870 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 1871 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1872 1873 // Only outer loops with an explicit vectorization hint are supported. 1874 // Unannotated outer loops are ignored. 1875 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1876 return false; 1877 1878 Function *Fn = OuterLp->getHeader()->getParent(); 1879 if (!Hints.allowVectorization(Fn, OuterLp, 1880 true /*VectorizeOnlyWhenForced*/)) { 1881 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1882 return false; 1883 } 1884 1885 if (Hints.getInterleave() > 1) { 1886 // TODO: Interleave support is future work. 1887 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1888 "outer loops.\n"); 1889 Hints.emitRemarkWithHints(); 1890 return false; 1891 } 1892 1893 return true; 1894 } 1895 1896 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1897 OptimizationRemarkEmitter *ORE, 1898 SmallVectorImpl<Loop *> &V) { 1899 // Collect inner loops and outer loops without irreducible control flow. For 1900 // now, only collect outer loops that have explicit vectorization hints. If we 1901 // are stress testing the VPlan H-CFG construction, we collect the outermost 1902 // loop of every loop nest. 1903 if (L.isInnermost() || VPlanBuildStressTest || 1904 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1905 LoopBlocksRPO RPOT(&L); 1906 RPOT.perform(LI); 1907 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1908 V.push_back(&L); 1909 // TODO: Collect inner loops inside marked outer loops in case 1910 // vectorization fails for the outer loop. Do not invoke 1911 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1912 // already known to be reducible. We can use an inherited attribute for 1913 // that. 1914 return; 1915 } 1916 } 1917 for (Loop *InnerL : L) 1918 collectSupportedLoops(*InnerL, LI, ORE, V); 1919 } 1920 1921 namespace { 1922 1923 /// The LoopVectorize Pass. 1924 struct LoopVectorize : public FunctionPass { 1925 /// Pass identification, replacement for typeid 1926 static char ID; 1927 1928 LoopVectorizePass Impl; 1929 1930 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 1931 bool VectorizeOnlyWhenForced = false) 1932 : FunctionPass(ID), 1933 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 1934 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1935 } 1936 1937 bool runOnFunction(Function &F) override { 1938 if (skipFunction(F)) 1939 return false; 1940 1941 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1942 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1943 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1944 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1945 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1946 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1947 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 1948 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1949 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1950 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1951 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1952 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1953 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 1954 1955 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1956 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1957 1958 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1959 GetLAA, *ORE, PSI).MadeAnyChange; 1960 } 1961 1962 void getAnalysisUsage(AnalysisUsage &AU) const override { 1963 AU.addRequired<AssumptionCacheTracker>(); 1964 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1965 AU.addRequired<DominatorTreeWrapperPass>(); 1966 AU.addRequired<LoopInfoWrapperPass>(); 1967 AU.addRequired<ScalarEvolutionWrapperPass>(); 1968 AU.addRequired<TargetTransformInfoWrapperPass>(); 1969 AU.addRequired<AAResultsWrapperPass>(); 1970 AU.addRequired<LoopAccessLegacyAnalysis>(); 1971 AU.addRequired<DemandedBitsWrapperPass>(); 1972 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1973 AU.addRequired<InjectTLIMappingsLegacy>(); 1974 1975 // We currently do not preserve loopinfo/dominator analyses with outer loop 1976 // vectorization. Until this is addressed, mark these analyses as preserved 1977 // only for non-VPlan-native path. 1978 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 1979 if (!EnableVPlanNativePath) { 1980 AU.addPreserved<LoopInfoWrapperPass>(); 1981 AU.addPreserved<DominatorTreeWrapperPass>(); 1982 } 1983 1984 AU.addPreserved<BasicAAWrapperPass>(); 1985 AU.addPreserved<GlobalsAAWrapperPass>(); 1986 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 1987 } 1988 }; 1989 1990 } // end anonymous namespace 1991 1992 //===----------------------------------------------------------------------===// 1993 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1994 // LoopVectorizationCostModel and LoopVectorizationPlanner. 1995 //===----------------------------------------------------------------------===// 1996 1997 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1998 // We need to place the broadcast of invariant variables outside the loop, 1999 // but only if it's proven safe to do so. Else, broadcast will be inside 2000 // vector loop body. 2001 Instruction *Instr = dyn_cast<Instruction>(V); 2002 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2003 (!Instr || 2004 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2005 // Place the code for broadcasting invariant variables in the new preheader. 2006 IRBuilder<>::InsertPointGuard Guard(Builder); 2007 if (SafeToHoist) 2008 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2009 2010 // Broadcast the scalar into all locations in the vector. 2011 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2012 2013 return Shuf; 2014 } 2015 2016 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2017 const InductionDescriptor &II, Value *Step, Value *Start, 2018 Instruction *EntryVal) { 2019 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2020 "Expected either an induction phi-node or a truncate of it!"); 2021 2022 // Construct the initial value of the vector IV in the vector loop preheader 2023 auto CurrIP = Builder.saveIP(); 2024 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2025 if (isa<TruncInst>(EntryVal)) { 2026 assert(Start->getType()->isIntegerTy() && 2027 "Truncation requires an integer type"); 2028 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2029 Step = Builder.CreateTrunc(Step, TruncType); 2030 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2031 } 2032 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2033 Value *SteppedStart = 2034 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 2035 2036 // We create vector phi nodes for both integer and floating-point induction 2037 // variables. Here, we determine the kind of arithmetic we will perform. 2038 Instruction::BinaryOps AddOp; 2039 Instruction::BinaryOps MulOp; 2040 if (Step->getType()->isIntegerTy()) { 2041 AddOp = Instruction::Add; 2042 MulOp = Instruction::Mul; 2043 } else { 2044 AddOp = II.getInductionOpcode(); 2045 MulOp = Instruction::FMul; 2046 } 2047 2048 // Multiply the vectorization factor by the step using integer or 2049 // floating-point arithmetic as appropriate. 2050 Value *ConstVF = 2051 getSignedIntOrFpConstant(Step->getType(), VF.getKnownMinValue()); 2052 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 2053 2054 // Create a vector splat to use in the induction update. 2055 // 2056 // FIXME: If the step is non-constant, we create the vector splat with 2057 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2058 // handle a constant vector splat. 2059 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2060 Value *SplatVF = isa<Constant>(Mul) 2061 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2062 : Builder.CreateVectorSplat(VF, Mul); 2063 Builder.restoreIP(CurrIP); 2064 2065 // We may need to add the step a number of times, depending on the unroll 2066 // factor. The last of those goes into the PHI. 2067 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2068 &*LoopVectorBody->getFirstInsertionPt()); 2069 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2070 Instruction *LastInduction = VecInd; 2071 for (unsigned Part = 0; Part < UF; ++Part) { 2072 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 2073 2074 if (isa<TruncInst>(EntryVal)) 2075 addMetadata(LastInduction, EntryVal); 2076 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part); 2077 2078 LastInduction = cast<Instruction>(addFastMathFlag( 2079 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 2080 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2081 } 2082 2083 // Move the last step to the end of the latch block. This ensures consistent 2084 // placement of all induction updates. 2085 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2086 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2087 auto *ICmp = cast<Instruction>(Br->getCondition()); 2088 LastInduction->moveBefore(ICmp); 2089 LastInduction->setName("vec.ind.next"); 2090 2091 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2092 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2093 } 2094 2095 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2096 return Cost->isScalarAfterVectorization(I, VF) || 2097 Cost->isProfitableToScalarize(I, VF); 2098 } 2099 2100 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2101 if (shouldScalarizeInstruction(IV)) 2102 return true; 2103 auto isScalarInst = [&](User *U) -> bool { 2104 auto *I = cast<Instruction>(U); 2105 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2106 }; 2107 return llvm::any_of(IV->users(), isScalarInst); 2108 } 2109 2110 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2111 const InductionDescriptor &ID, const Instruction *EntryVal, 2112 Value *VectorLoopVal, unsigned Part, unsigned Lane) { 2113 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2114 "Expected either an induction phi-node or a truncate of it!"); 2115 2116 // This induction variable is not the phi from the original loop but the 2117 // newly-created IV based on the proof that casted Phi is equal to the 2118 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2119 // re-uses the same InductionDescriptor that original IV uses but we don't 2120 // have to do any recording in this case - that is done when original IV is 2121 // processed. 2122 if (isa<TruncInst>(EntryVal)) 2123 return; 2124 2125 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 2126 if (Casts.empty()) 2127 return; 2128 // Only the first Cast instruction in the Casts vector is of interest. 2129 // The rest of the Casts (if exist) have no uses outside the 2130 // induction update chain itself. 2131 Instruction *CastInst = *Casts.begin(); 2132 if (Lane < UINT_MAX) 2133 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal); 2134 else 2135 VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal); 2136 } 2137 2138 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2139 TruncInst *Trunc) { 2140 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2141 "Primary induction variable must have an integer type"); 2142 2143 auto II = Legal->getInductionVars().find(IV); 2144 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2145 2146 auto ID = II->second; 2147 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2148 2149 // The value from the original loop to which we are mapping the new induction 2150 // variable. 2151 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2152 2153 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2154 2155 // Generate code for the induction step. Note that induction steps are 2156 // required to be loop-invariant 2157 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2158 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2159 "Induction step should be loop invariant"); 2160 if (PSE.getSE()->isSCEVable(IV->getType())) { 2161 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2162 return Exp.expandCodeFor(Step, Step->getType(), 2163 LoopVectorPreHeader->getTerminator()); 2164 } 2165 return cast<SCEVUnknown>(Step)->getValue(); 2166 }; 2167 2168 // The scalar value to broadcast. This is derived from the canonical 2169 // induction variable. If a truncation type is given, truncate the canonical 2170 // induction variable and step. Otherwise, derive these values from the 2171 // induction descriptor. 2172 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2173 Value *ScalarIV = Induction; 2174 if (IV != OldInduction) { 2175 ScalarIV = IV->getType()->isIntegerTy() 2176 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2177 : Builder.CreateCast(Instruction::SIToFP, Induction, 2178 IV->getType()); 2179 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2180 ScalarIV->setName("offset.idx"); 2181 } 2182 if (Trunc) { 2183 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2184 assert(Step->getType()->isIntegerTy() && 2185 "Truncation requires an integer step"); 2186 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2187 Step = Builder.CreateTrunc(Step, TruncType); 2188 } 2189 return ScalarIV; 2190 }; 2191 2192 // Create the vector values from the scalar IV, in the absence of creating a 2193 // vector IV. 2194 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2195 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2196 for (unsigned Part = 0; Part < UF; ++Part) { 2197 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2198 Value *EntryPart = 2199 getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step, 2200 ID.getInductionOpcode()); 2201 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 2202 if (Trunc) 2203 addMetadata(EntryPart, Trunc); 2204 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part); 2205 } 2206 }; 2207 2208 // Now do the actual transformations, and start with creating the step value. 2209 Value *Step = CreateStepValue(ID.getStep()); 2210 if (VF.isZero() || VF.isScalar()) { 2211 Value *ScalarIV = CreateScalarIV(Step); 2212 CreateSplatIV(ScalarIV, Step); 2213 return; 2214 } 2215 2216 // Determine if we want a scalar version of the induction variable. This is 2217 // true if the induction variable itself is not widened, or if it has at 2218 // least one user in the loop that is not widened. 2219 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2220 if (!NeedsScalarIV) { 2221 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal); 2222 return; 2223 } 2224 2225 // Try to create a new independent vector induction variable. If we can't 2226 // create the phi node, we will splat the scalar induction variable in each 2227 // loop iteration. 2228 if (!shouldScalarizeInstruction(EntryVal)) { 2229 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal); 2230 Value *ScalarIV = CreateScalarIV(Step); 2231 // Create scalar steps that can be used by instructions we will later 2232 // scalarize. Note that the addition of the scalar steps will not increase 2233 // the number of instructions in the loop in the common case prior to 2234 // InstCombine. We will be trading one vector extract for each scalar step. 2235 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 2236 return; 2237 } 2238 2239 // All IV users are scalar instructions, so only emit a scalar IV, not a 2240 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2241 // predicate used by the masked loads/stores. 2242 Value *ScalarIV = CreateScalarIV(Step); 2243 if (!Cost->isScalarEpilogueAllowed()) 2244 CreateSplatIV(ScalarIV, Step); 2245 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 2246 } 2247 2248 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2249 Instruction::BinaryOps BinOp) { 2250 // Create and check the types. 2251 auto *ValVTy = cast<FixedVectorType>(Val->getType()); 2252 int VLen = ValVTy->getNumElements(); 2253 2254 Type *STy = Val->getType()->getScalarType(); 2255 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2256 "Induction Step must be an integer or FP"); 2257 assert(Step->getType() == STy && "Step has wrong type"); 2258 2259 SmallVector<Constant *, 8> Indices; 2260 2261 if (STy->isIntegerTy()) { 2262 // Create a vector of consecutive numbers from zero to VF. 2263 for (int i = 0; i < VLen; ++i) 2264 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2265 2266 // Add the consecutive indices to the vector value. 2267 Constant *Cv = ConstantVector::get(Indices); 2268 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2269 Step = Builder.CreateVectorSplat(VLen, Step); 2270 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2271 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2272 // which can be found from the original scalar operations. 2273 Step = Builder.CreateMul(Cv, Step); 2274 return Builder.CreateAdd(Val, Step, "induction"); 2275 } 2276 2277 // Floating point induction. 2278 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2279 "Binary Opcode should be specified for FP induction"); 2280 // Create a vector of consecutive numbers from zero to VF. 2281 for (int i = 0; i < VLen; ++i) 2282 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2283 2284 // Add the consecutive indices to the vector value. 2285 Constant *Cv = ConstantVector::get(Indices); 2286 2287 Step = Builder.CreateVectorSplat(VLen, Step); 2288 2289 // Floating point operations had to be 'fast' to enable the induction. 2290 FastMathFlags Flags; 2291 Flags.setFast(); 2292 2293 Value *MulOp = Builder.CreateFMul(Cv, Step); 2294 if (isa<Instruction>(MulOp)) 2295 // Have to check, MulOp may be a constant 2296 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2297 2298 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2299 if (isa<Instruction>(BOp)) 2300 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2301 return BOp; 2302 } 2303 2304 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2305 Instruction *EntryVal, 2306 const InductionDescriptor &ID) { 2307 // We shouldn't have to build scalar steps if we aren't vectorizing. 2308 assert(VF.isVector() && "VF should be greater than one"); 2309 // Get the value type and ensure it and the step have the same integer type. 2310 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2311 assert(ScalarIVTy == Step->getType() && 2312 "Val and Step should have the same type"); 2313 2314 // We build scalar steps for both integer and floating-point induction 2315 // variables. Here, we determine the kind of arithmetic we will perform. 2316 Instruction::BinaryOps AddOp; 2317 Instruction::BinaryOps MulOp; 2318 if (ScalarIVTy->isIntegerTy()) { 2319 AddOp = Instruction::Add; 2320 MulOp = Instruction::Mul; 2321 } else { 2322 AddOp = ID.getInductionOpcode(); 2323 MulOp = Instruction::FMul; 2324 } 2325 2326 // Determine the number of scalars we need to generate for each unroll 2327 // iteration. If EntryVal is uniform, we only need to generate the first 2328 // lane. Otherwise, we generate all VF values. 2329 unsigned Lanes = 2330 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) 2331 ? 1 2332 : VF.getKnownMinValue(); 2333 assert((!VF.isScalable() || Lanes == 1) && 2334 "Should never scalarize a scalable vector"); 2335 // Compute the scalar steps and save the results in VectorLoopValueMap. 2336 for (unsigned Part = 0; Part < UF; ++Part) { 2337 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2338 auto *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2339 ScalarIVTy->getScalarSizeInBits()); 2340 Value *StartIdx = 2341 createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF); 2342 if (ScalarIVTy->isFloatingPointTy()) 2343 StartIdx = Builder.CreateSIToFP(StartIdx, ScalarIVTy); 2344 StartIdx = addFastMathFlag(Builder.CreateBinOp( 2345 AddOp, StartIdx, getSignedIntOrFpConstant(ScalarIVTy, Lane))); 2346 // The step returned by `createStepForVF` is a runtime-evaluated value 2347 // when VF is scalable. Otherwise, it should be folded into a Constant. 2348 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2349 "Expected StartIdx to be folded to a constant when VF is not " 2350 "scalable"); 2351 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 2352 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 2353 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); 2354 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane); 2355 } 2356 } 2357 } 2358 2359 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 2360 assert(V != Induction && "The new induction variable should not be used."); 2361 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2362 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2363 2364 // If we have a stride that is replaced by one, do it here. Defer this for 2365 // the VPlan-native path until we start running Legal checks in that path. 2366 if (!EnableVPlanNativePath && Legal->hasStride(V)) 2367 V = ConstantInt::get(V->getType(), 1); 2368 2369 // If we have a vector mapped to this value, return it. 2370 if (VectorLoopValueMap.hasVectorValue(V, Part)) 2371 return VectorLoopValueMap.getVectorValue(V, Part); 2372 2373 // If the value has not been vectorized, check if it has been scalarized 2374 // instead. If it has been scalarized, and we actually need the value in 2375 // vector form, we will construct the vector values on demand. 2376 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 2377 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0}); 2378 2379 // If we've scalarized a value, that value should be an instruction. 2380 auto *I = cast<Instruction>(V); 2381 2382 // If we aren't vectorizing, we can just copy the scalar map values over to 2383 // the vector map. 2384 if (VF.isScalar()) { 2385 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 2386 return ScalarValue; 2387 } 2388 2389 // Get the last scalar instruction we generated for V and Part. If the value 2390 // is known to be uniform after vectorization, this corresponds to lane zero 2391 // of the Part unroll iteration. Otherwise, the last instruction is the one 2392 // we created for the last vector lane of the Part unroll iteration. 2393 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) 2394 ? 0 2395 : VF.getKnownMinValue() - 1; 2396 assert((!VF.isScalable() || LastLane == 0) && 2397 "Scalable vectorization can't lead to any scalarized values."); 2398 auto *LastInst = cast<Instruction>( 2399 VectorLoopValueMap.getScalarValue(V, {Part, LastLane})); 2400 2401 // Set the insert point after the last scalarized instruction. This ensures 2402 // the insertelement sequence will directly follow the scalar definitions. 2403 auto OldIP = Builder.saveIP(); 2404 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2405 Builder.SetInsertPoint(&*NewIP); 2406 2407 // However, if we are vectorizing, we need to construct the vector values. 2408 // If the value is known to be uniform after vectorization, we can just 2409 // broadcast the scalar value corresponding to lane zero for each unroll 2410 // iteration. Otherwise, we construct the vector values using insertelement 2411 // instructions. Since the resulting vectors are stored in 2412 // VectorLoopValueMap, we will only generate the insertelements once. 2413 Value *VectorValue = nullptr; 2414 if (Cost->isUniformAfterVectorization(I, VF)) { 2415 VectorValue = getBroadcastInstrs(ScalarValue); 2416 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 2417 } else { 2418 // Initialize packing with insertelements to start from poison. 2419 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2420 Value *Poison = PoisonValue::get(VectorType::get(V->getType(), VF)); 2421 VectorLoopValueMap.setVectorValue(V, Part, Poison); 2422 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 2423 packScalarIntoVectorValue(V, {Part, Lane}); 2424 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 2425 } 2426 Builder.restoreIP(OldIP); 2427 return VectorValue; 2428 } 2429 2430 // If this scalar is unknown, assume that it is a constant or that it is 2431 // loop invariant. Broadcast V and save the value for future uses. 2432 Value *B = getBroadcastInstrs(V); 2433 VectorLoopValueMap.setVectorValue(V, Part, B); 2434 return B; 2435 } 2436 2437 Value * 2438 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 2439 const VPIteration &Instance) { 2440 // If the value is not an instruction contained in the loop, it should 2441 // already be scalar. 2442 if (OrigLoop->isLoopInvariant(V)) 2443 return V; 2444 2445 assert(Instance.Lane > 0 2446 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 2447 : true && "Uniform values only have lane zero"); 2448 2449 // If the value from the original loop has not been vectorized, it is 2450 // represented by UF x VF scalar values in the new loop. Return the requested 2451 // scalar value. 2452 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 2453 return VectorLoopValueMap.getScalarValue(V, Instance); 2454 2455 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2456 // for the given unroll part. If this entry is not a vector type (i.e., the 2457 // vectorization factor is one), there is no need to generate an 2458 // extractelement instruction. 2459 auto *U = getOrCreateVectorValue(V, Instance.Part); 2460 if (!U->getType()->isVectorTy()) { 2461 assert(VF.isScalar() && "Value not scalarized has non-vector type"); 2462 return U; 2463 } 2464 2465 // Otherwise, the value from the original loop has been vectorized and is 2466 // represented by UF vector values. Extract and return the requested scalar 2467 // value from the appropriate vector lane. 2468 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 2469 } 2470 2471 void InnerLoopVectorizer::packScalarIntoVectorValue( 2472 Value *V, const VPIteration &Instance) { 2473 assert(V != Induction && "The new induction variable should not be used."); 2474 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 2475 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2476 2477 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 2478 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 2479 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 2480 Builder.getInt32(Instance.Lane)); 2481 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 2482 } 2483 2484 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2485 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2486 assert(!VF.isScalable() && "Cannot reverse scalable vectors"); 2487 SmallVector<int, 8> ShuffleMask; 2488 for (unsigned i = 0; i < VF.getKnownMinValue(); ++i) 2489 ShuffleMask.push_back(VF.getKnownMinValue() - i - 1); 2490 2491 return Builder.CreateShuffleVector(Vec, ShuffleMask, "reverse"); 2492 } 2493 2494 // Return whether we allow using masked interleave-groups (for dealing with 2495 // strided loads/stores that reside in predicated blocks, or for dealing 2496 // with gaps). 2497 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2498 // If an override option has been passed in for interleaved accesses, use it. 2499 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2500 return EnableMaskedInterleavedMemAccesses; 2501 2502 return TTI.enableMaskedInterleavedAccessVectorization(); 2503 } 2504 2505 // Try to vectorize the interleave group that \p Instr belongs to. 2506 // 2507 // E.g. Translate following interleaved load group (factor = 3): 2508 // for (i = 0; i < N; i+=3) { 2509 // R = Pic[i]; // Member of index 0 2510 // G = Pic[i+1]; // Member of index 1 2511 // B = Pic[i+2]; // Member of index 2 2512 // ... // do something to R, G, B 2513 // } 2514 // To: 2515 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2516 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2517 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2518 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2519 // 2520 // Or translate following interleaved store group (factor = 3): 2521 // for (i = 0; i < N; i+=3) { 2522 // ... do something to R, G, B 2523 // Pic[i] = R; // Member of index 0 2524 // Pic[i+1] = G; // Member of index 1 2525 // Pic[i+2] = B; // Member of index 2 2526 // } 2527 // To: 2528 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2529 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2530 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2531 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2532 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2533 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2534 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2535 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2536 VPValue *BlockInMask) { 2537 Instruction *Instr = Group->getInsertPos(); 2538 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2539 2540 // Prepare for the vector type of the interleaved load/store. 2541 Type *ScalarTy = getMemInstValueType(Instr); 2542 unsigned InterleaveFactor = Group->getFactor(); 2543 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2544 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2545 2546 // Prepare for the new pointers. 2547 SmallVector<Value *, 2> AddrParts; 2548 unsigned Index = Group->getIndex(Instr); 2549 2550 // TODO: extend the masked interleaved-group support to reversed access. 2551 assert((!BlockInMask || !Group->isReverse()) && 2552 "Reversed masked interleave-group not supported."); 2553 2554 // If the group is reverse, adjust the index to refer to the last vector lane 2555 // instead of the first. We adjust the index from the first vector lane, 2556 // rather than directly getting the pointer for lane VF - 1, because the 2557 // pointer operand of the interleaved access is supposed to be uniform. For 2558 // uniform instructions, we're only required to generate a value for the 2559 // first vector lane in each unroll iteration. 2560 assert(!VF.isScalable() && 2561 "scalable vector reverse operation is not implemented"); 2562 if (Group->isReverse()) 2563 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2564 2565 for (unsigned Part = 0; Part < UF; Part++) { 2566 Value *AddrPart = State.get(Addr, {Part, 0}); 2567 setDebugLocFromInst(Builder, AddrPart); 2568 2569 // Notice current instruction could be any index. Need to adjust the address 2570 // to the member of index 0. 2571 // 2572 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2573 // b = A[i]; // Member of index 0 2574 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2575 // 2576 // E.g. A[i+1] = a; // Member of index 1 2577 // A[i] = b; // Member of index 0 2578 // A[i+2] = c; // Member of index 2 (Current instruction) 2579 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2580 2581 bool InBounds = false; 2582 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2583 InBounds = gep->isInBounds(); 2584 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2585 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2586 2587 // Cast to the vector pointer type. 2588 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2589 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2590 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2591 } 2592 2593 setDebugLocFromInst(Builder, Instr); 2594 Value *PoisonVec = PoisonValue::get(VecTy); 2595 2596 Value *MaskForGaps = nullptr; 2597 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2598 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2599 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2600 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2601 } 2602 2603 // Vectorize the interleaved load group. 2604 if (isa<LoadInst>(Instr)) { 2605 // For each unroll part, create a wide load for the group. 2606 SmallVector<Value *, 2> NewLoads; 2607 for (unsigned Part = 0; Part < UF; Part++) { 2608 Instruction *NewLoad; 2609 if (BlockInMask || MaskForGaps) { 2610 assert(useMaskedInterleavedAccesses(*TTI) && 2611 "masked interleaved groups are not allowed."); 2612 Value *GroupMask = MaskForGaps; 2613 if (BlockInMask) { 2614 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2615 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2616 Value *ShuffledMask = Builder.CreateShuffleVector( 2617 BlockInMaskPart, 2618 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2619 "interleaved.mask"); 2620 GroupMask = MaskForGaps 2621 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2622 MaskForGaps) 2623 : ShuffledMask; 2624 } 2625 NewLoad = 2626 Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(), 2627 GroupMask, PoisonVec, "wide.masked.vec"); 2628 } 2629 else 2630 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2631 Group->getAlign(), "wide.vec"); 2632 Group->addMetadata(NewLoad); 2633 NewLoads.push_back(NewLoad); 2634 } 2635 2636 // For each member in the group, shuffle out the appropriate data from the 2637 // wide loads. 2638 unsigned J = 0; 2639 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2640 Instruction *Member = Group->getMember(I); 2641 2642 // Skip the gaps in the group. 2643 if (!Member) 2644 continue; 2645 2646 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2647 auto StrideMask = 2648 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2649 for (unsigned Part = 0; Part < UF; Part++) { 2650 Value *StridedVec = Builder.CreateShuffleVector( 2651 NewLoads[Part], StrideMask, "strided.vec"); 2652 2653 // If this member has different type, cast the result type. 2654 if (Member->getType() != ScalarTy) { 2655 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2656 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2657 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2658 } 2659 2660 if (Group->isReverse()) 2661 StridedVec = reverseVector(StridedVec); 2662 2663 State.set(VPDefs[J], Member, StridedVec, Part); 2664 } 2665 ++J; 2666 } 2667 return; 2668 } 2669 2670 // The sub vector type for current instruction. 2671 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2672 auto *SubVT = VectorType::get(ScalarTy, VF); 2673 2674 // Vectorize the interleaved store group. 2675 for (unsigned Part = 0; Part < UF; Part++) { 2676 // Collect the stored vector from each member. 2677 SmallVector<Value *, 4> StoredVecs; 2678 for (unsigned i = 0; i < InterleaveFactor; i++) { 2679 // Interleaved store group doesn't allow a gap, so each index has a member 2680 assert(Group->getMember(i) && "Fail to get a member from an interleaved store group"); 2681 2682 Value *StoredVec = State.get(StoredValues[i], Part); 2683 2684 if (Group->isReverse()) 2685 StoredVec = reverseVector(StoredVec); 2686 2687 // If this member has different type, cast it to a unified type. 2688 2689 if (StoredVec->getType() != SubVT) 2690 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2691 2692 StoredVecs.push_back(StoredVec); 2693 } 2694 2695 // Concatenate all vectors into a wide vector. 2696 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2697 2698 // Interleave the elements in the wide vector. 2699 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2700 Value *IVec = Builder.CreateShuffleVector( 2701 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2702 "interleaved.vec"); 2703 2704 Instruction *NewStoreInstr; 2705 if (BlockInMask) { 2706 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2707 Value *ShuffledMask = Builder.CreateShuffleVector( 2708 BlockInMaskPart, 2709 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2710 "interleaved.mask"); 2711 NewStoreInstr = Builder.CreateMaskedStore( 2712 IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); 2713 } 2714 else 2715 NewStoreInstr = 2716 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2717 2718 Group->addMetadata(NewStoreInstr); 2719 } 2720 } 2721 2722 void InnerLoopVectorizer::vectorizeMemoryInstruction( 2723 Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, 2724 VPValue *StoredValue, VPValue *BlockInMask) { 2725 // Attempt to issue a wide load. 2726 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2727 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2728 2729 assert((LI || SI) && "Invalid Load/Store instruction"); 2730 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2731 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2732 2733 LoopVectorizationCostModel::InstWidening Decision = 2734 Cost->getWideningDecision(Instr, VF); 2735 assert((Decision == LoopVectorizationCostModel::CM_Widen || 2736 Decision == LoopVectorizationCostModel::CM_Widen_Reverse || 2737 Decision == LoopVectorizationCostModel::CM_GatherScatter) && 2738 "CM decision is not to widen the memory instruction"); 2739 2740 Type *ScalarDataTy = getMemInstValueType(Instr); 2741 2742 auto *DataTy = VectorType::get(ScalarDataTy, VF); 2743 const Align Alignment = getLoadStoreAlignment(Instr); 2744 2745 // Determine if the pointer operand of the access is either consecutive or 2746 // reverse consecutive. 2747 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2748 bool ConsecutiveStride = 2749 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2750 bool CreateGatherScatter = 2751 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2752 2753 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2754 // gather/scatter. Otherwise Decision should have been to Scalarize. 2755 assert((ConsecutiveStride || CreateGatherScatter) && 2756 "The instruction should be scalarized"); 2757 (void)ConsecutiveStride; 2758 2759 VectorParts BlockInMaskParts(UF); 2760 bool isMaskRequired = BlockInMask; 2761 if (isMaskRequired) 2762 for (unsigned Part = 0; Part < UF; ++Part) 2763 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2764 2765 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2766 // Calculate the pointer for the specific unroll-part. 2767 GetElementPtrInst *PartPtr = nullptr; 2768 2769 bool InBounds = false; 2770 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2771 InBounds = gep->isInBounds(); 2772 2773 if (Reverse) { 2774 assert(!VF.isScalable() && 2775 "Reversing vectors is not yet supported for scalable vectors."); 2776 2777 // If the address is consecutive but reversed, then the 2778 // wide store needs to start at the last vector element. 2779 PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP( 2780 ScalarDataTy, Ptr, Builder.getInt32(-Part * VF.getKnownMinValue()))); 2781 PartPtr->setIsInBounds(InBounds); 2782 PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP( 2783 ScalarDataTy, PartPtr, Builder.getInt32(1 - VF.getKnownMinValue()))); 2784 PartPtr->setIsInBounds(InBounds); 2785 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2786 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2787 } else { 2788 Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF); 2789 PartPtr = cast<GetElementPtrInst>( 2790 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 2791 PartPtr->setIsInBounds(InBounds); 2792 } 2793 2794 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2795 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2796 }; 2797 2798 // Handle Stores: 2799 if (SI) { 2800 setDebugLocFromInst(Builder, SI); 2801 2802 for (unsigned Part = 0; Part < UF; ++Part) { 2803 Instruction *NewSI = nullptr; 2804 Value *StoredVal = State.get(StoredValue, Part); 2805 if (CreateGatherScatter) { 2806 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2807 Value *VectorGep = State.get(Addr, Part); 2808 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2809 MaskPart); 2810 } else { 2811 if (Reverse) { 2812 // If we store to reverse consecutive memory locations, then we need 2813 // to reverse the order of elements in the stored value. 2814 StoredVal = reverseVector(StoredVal); 2815 // We don't want to update the value in the map as it might be used in 2816 // another expression. So don't call resetVectorValue(StoredVal). 2817 } 2818 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0})); 2819 if (isMaskRequired) 2820 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2821 BlockInMaskParts[Part]); 2822 else 2823 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2824 } 2825 addMetadata(NewSI, SI); 2826 } 2827 return; 2828 } 2829 2830 // Handle loads. 2831 assert(LI && "Must have a load instruction"); 2832 setDebugLocFromInst(Builder, LI); 2833 for (unsigned Part = 0; Part < UF; ++Part) { 2834 Value *NewLI; 2835 if (CreateGatherScatter) { 2836 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2837 Value *VectorGep = State.get(Addr, Part); 2838 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2839 nullptr, "wide.masked.gather"); 2840 addMetadata(NewLI, LI); 2841 } else { 2842 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0})); 2843 if (isMaskRequired) 2844 NewLI = Builder.CreateMaskedLoad( 2845 VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy), 2846 "wide.masked.load"); 2847 else 2848 NewLI = 2849 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2850 2851 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2852 addMetadata(NewLI, LI); 2853 if (Reverse) 2854 NewLI = reverseVector(NewLI); 2855 } 2856 2857 State.set(Def, Instr, NewLI, Part); 2858 } 2859 } 2860 2861 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPUser &User, 2862 const VPIteration &Instance, 2863 bool IfPredicateInstr, 2864 VPTransformState &State) { 2865 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2866 2867 setDebugLocFromInst(Builder, Instr); 2868 2869 // Does this instruction return a value ? 2870 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2871 2872 Instruction *Cloned = Instr->clone(); 2873 if (!IsVoidRetTy) 2874 Cloned->setName(Instr->getName() + ".cloned"); 2875 2876 // Replace the operands of the cloned instructions with their scalar 2877 // equivalents in the new loop. 2878 for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { 2879 auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); 2880 auto InputInstance = Instance; 2881 if (!Operand || !OrigLoop->contains(Operand) || 2882 (Cost->isUniformAfterVectorization(Operand, State.VF))) 2883 InputInstance.Lane = 0; 2884 auto *NewOp = State.get(User.getOperand(op), InputInstance); 2885 Cloned->setOperand(op, NewOp); 2886 } 2887 addNewMetadata(Cloned, Instr); 2888 2889 // Place the cloned scalar in the new loop. 2890 Builder.Insert(Cloned); 2891 2892 // TODO: Set result for VPValue of VPReciplicateRecipe. This requires 2893 // representing scalar values in VPTransformState. Add the cloned scalar to 2894 // the scalar map entry. 2895 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 2896 2897 // If we just cloned a new assumption, add it the assumption cache. 2898 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2899 if (II->getIntrinsicID() == Intrinsic::assume) 2900 AC->registerAssumption(II); 2901 2902 // End if-block. 2903 if (IfPredicateInstr) 2904 PredicatedInstructions.push_back(Cloned); 2905 } 2906 2907 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2908 Value *End, Value *Step, 2909 Instruction *DL) { 2910 BasicBlock *Header = L->getHeader(); 2911 BasicBlock *Latch = L->getLoopLatch(); 2912 // As we're just creating this loop, it's possible no latch exists 2913 // yet. If so, use the header as this will be a single block loop. 2914 if (!Latch) 2915 Latch = Header; 2916 2917 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2918 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2919 setDebugLocFromInst(Builder, OldInst); 2920 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2921 2922 Builder.SetInsertPoint(Latch->getTerminator()); 2923 setDebugLocFromInst(Builder, OldInst); 2924 2925 // Create i+1 and fill the PHINode. 2926 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2927 Induction->addIncoming(Start, L->getLoopPreheader()); 2928 Induction->addIncoming(Next, Latch); 2929 // Create the compare. 2930 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2931 Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 2932 2933 // Now we have two terminators. Remove the old one from the block. 2934 Latch->getTerminator()->eraseFromParent(); 2935 2936 return Induction; 2937 } 2938 2939 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2940 if (TripCount) 2941 return TripCount; 2942 2943 assert(L && "Create Trip Count for null loop."); 2944 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2945 // Find the loop boundaries. 2946 ScalarEvolution *SE = PSE.getSE(); 2947 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2948 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 2949 "Invalid loop count"); 2950 2951 Type *IdxTy = Legal->getWidestInductionType(); 2952 assert(IdxTy && "No type for induction"); 2953 2954 // The exit count might have the type of i64 while the phi is i32. This can 2955 // happen if we have an induction variable that is sign extended before the 2956 // compare. The only way that we get a backedge taken count is that the 2957 // induction variable was signed and as such will not overflow. In such a case 2958 // truncation is legal. 2959 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2960 IdxTy->getPrimitiveSizeInBits()) 2961 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2962 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2963 2964 // Get the total trip count from the count by adding 1. 2965 const SCEV *ExitCount = SE->getAddExpr( 2966 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2967 2968 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2969 2970 // Expand the trip count and place the new instructions in the preheader. 2971 // Notice that the pre-header does not change, only the loop body. 2972 SCEVExpander Exp(*SE, DL, "induction"); 2973 2974 // Count holds the overall loop count (N). 2975 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2976 L->getLoopPreheader()->getTerminator()); 2977 2978 if (TripCount->getType()->isPointerTy()) 2979 TripCount = 2980 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2981 L->getLoopPreheader()->getTerminator()); 2982 2983 return TripCount; 2984 } 2985 2986 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2987 if (VectorTripCount) 2988 return VectorTripCount; 2989 2990 Value *TC = getOrCreateTripCount(L); 2991 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2992 2993 Type *Ty = TC->getType(); 2994 // This is where we can make the step a runtime constant. 2995 Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF); 2996 2997 // If the tail is to be folded by masking, round the number of iterations N 2998 // up to a multiple of Step instead of rounding down. This is done by first 2999 // adding Step-1 and then rounding down. Note that it's ok if this addition 3000 // overflows: the vector induction variable will eventually wrap to zero given 3001 // that it starts at zero and its Step is a power of two; the loop will then 3002 // exit, with the last early-exit vector comparison also producing all-true. 3003 if (Cost->foldTailByMasking()) { 3004 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3005 "VF*UF must be a power of 2 when folding tail by masking"); 3006 assert(!VF.isScalable() && 3007 "Tail folding not yet supported for scalable vectors"); 3008 TC = Builder.CreateAdd( 3009 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 3010 } 3011 3012 // Now we need to generate the expression for the part of the loop that the 3013 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3014 // iterations are not required for correctness, or N - Step, otherwise. Step 3015 // is equal to the vectorization factor (number of SIMD elements) times the 3016 // unroll factor (number of SIMD instructions). 3017 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3018 3019 // There are two cases where we need to ensure (at least) the last iteration 3020 // runs in the scalar remainder loop. Thus, if the step evenly divides 3021 // the trip count, we set the remainder to be equal to the step. If the step 3022 // does not evenly divide the trip count, no adjustment is necessary since 3023 // there will already be scalar iterations. Note that the minimum iterations 3024 // check ensures that N >= Step. The cases are: 3025 // 1) If there is a non-reversed interleaved group that may speculatively 3026 // access memory out-of-bounds. 3027 // 2) If any instruction may follow a conditionally taken exit. That is, if 3028 // the loop contains multiple exiting blocks, or a single exiting block 3029 // which is not the latch. 3030 if (VF.isVector() && Cost->requiresScalarEpilogue()) { 3031 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3032 R = Builder.CreateSelect(IsZero, Step, R); 3033 } 3034 3035 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3036 3037 return VectorTripCount; 3038 } 3039 3040 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3041 const DataLayout &DL) { 3042 // Verify that V is a vector type with same number of elements as DstVTy. 3043 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3044 unsigned VF = DstFVTy->getNumElements(); 3045 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3046 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3047 Type *SrcElemTy = SrcVecTy->getElementType(); 3048 Type *DstElemTy = DstFVTy->getElementType(); 3049 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3050 "Vector elements must have same size"); 3051 3052 // Do a direct cast if element types are castable. 3053 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3054 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3055 } 3056 // V cannot be directly casted to desired vector type. 3057 // May happen when V is a floating point vector but DstVTy is a vector of 3058 // pointers or vice-versa. Handle this using a two-step bitcast using an 3059 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3060 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3061 "Only one type should be a pointer type"); 3062 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3063 "Only one type should be a floating point type"); 3064 Type *IntTy = 3065 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3066 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3067 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3068 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3069 } 3070 3071 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3072 BasicBlock *Bypass) { 3073 Value *Count = getOrCreateTripCount(L); 3074 // Reuse existing vector loop preheader for TC checks. 3075 // Note that new preheader block is generated for vector loop. 3076 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3077 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3078 3079 // Generate code to check if the loop's trip count is less than VF * UF, or 3080 // equal to it in case a scalar epilogue is required; this implies that the 3081 // vector trip count is zero. This check also covers the case where adding one 3082 // to the backedge-taken count overflowed leading to an incorrect trip count 3083 // of zero. In this case we will also jump to the scalar loop. 3084 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 3085 : ICmpInst::ICMP_ULT; 3086 3087 // If tail is to be folded, vector loop takes care of all iterations. 3088 Value *CheckMinIters = Builder.getFalse(); 3089 if (!Cost->foldTailByMasking()) { 3090 Value *Step = 3091 createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF); 3092 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3093 } 3094 // Create new preheader for vector loop. 3095 LoopVectorPreHeader = 3096 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3097 "vector.ph"); 3098 3099 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3100 DT->getNode(Bypass)->getIDom()) && 3101 "TC check is expected to dominate Bypass"); 3102 3103 // Update dominator for Bypass & LoopExit. 3104 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3105 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3106 3107 ReplaceInstWithInst( 3108 TCCheckBlock->getTerminator(), 3109 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3110 LoopBypassBlocks.push_back(TCCheckBlock); 3111 } 3112 3113 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3114 // Reuse existing vector loop preheader for SCEV checks. 3115 // Note that new preheader block is generated for vector loop. 3116 BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader; 3117 3118 // Generate the code to check that the SCEV assumptions that we made. 3119 // We want the new basic block to start at the first instruction in a 3120 // sequence of instructions that form a check. 3121 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3122 "scev.check"); 3123 Value *SCEVCheck = Exp.expandCodeForPredicate( 3124 &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator()); 3125 3126 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3127 if (C->isZero()) 3128 return; 3129 3130 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3131 (OptForSizeBasedOnProfile && 3132 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3133 "Cannot SCEV check stride or overflow when optimizing for size"); 3134 3135 SCEVCheckBlock->setName("vector.scevcheck"); 3136 // Create new preheader for vector loop. 3137 LoopVectorPreHeader = 3138 SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI, 3139 nullptr, "vector.ph"); 3140 3141 // Update dominator only if this is first RT check. 3142 if (LoopBypassBlocks.empty()) { 3143 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3144 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3145 } 3146 3147 ReplaceInstWithInst( 3148 SCEVCheckBlock->getTerminator(), 3149 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck)); 3150 LoopBypassBlocks.push_back(SCEVCheckBlock); 3151 AddedSafetyChecks = true; 3152 } 3153 3154 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3155 // VPlan-native path does not do any analysis for runtime checks currently. 3156 if (EnableVPlanNativePath) 3157 return; 3158 3159 // Reuse existing vector loop preheader for runtime memory checks. 3160 // Note that new preheader block is generated for vector loop. 3161 BasicBlock *const MemCheckBlock = L->getLoopPreheader(); 3162 3163 // Generate the code that checks in runtime if arrays overlap. We put the 3164 // checks into a separate block to make the more common case of few elements 3165 // faster. 3166 auto *LAI = Legal->getLAI(); 3167 const auto &RtPtrChecking = *LAI->getRuntimePointerChecking(); 3168 if (!RtPtrChecking.Need) 3169 return; 3170 3171 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3172 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3173 "Cannot emit memory checks when optimizing for size, unless forced " 3174 "to vectorize."); 3175 ORE->emit([&]() { 3176 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3177 L->getStartLoc(), L->getHeader()) 3178 << "Code-size may be reduced by not forcing " 3179 "vectorization, or by source-code modifications " 3180 "eliminating the need for runtime checks " 3181 "(e.g., adding 'restrict')."; 3182 }); 3183 } 3184 3185 MemCheckBlock->setName("vector.memcheck"); 3186 // Create new preheader for vector loop. 3187 LoopVectorPreHeader = 3188 SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr, 3189 "vector.ph"); 3190 3191 auto *CondBranch = cast<BranchInst>( 3192 Builder.CreateCondBr(Builder.getTrue(), Bypass, LoopVectorPreHeader)); 3193 ReplaceInstWithInst(MemCheckBlock->getTerminator(), CondBranch); 3194 LoopBypassBlocks.push_back(MemCheckBlock); 3195 AddedSafetyChecks = true; 3196 3197 // Update dominator only if this is first RT check. 3198 if (LoopBypassBlocks.empty()) { 3199 DT->changeImmediateDominator(Bypass, MemCheckBlock); 3200 DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock); 3201 } 3202 3203 Instruction *FirstCheckInst; 3204 Instruction *MemRuntimeCheck; 3205 std::tie(FirstCheckInst, MemRuntimeCheck) = 3206 addRuntimeChecks(MemCheckBlock->getTerminator(), OrigLoop, 3207 RtPtrChecking.getChecks(), RtPtrChecking.getSE()); 3208 assert(MemRuntimeCheck && "no RT checks generated although RtPtrChecking " 3209 "claimed checks are required"); 3210 CondBranch->setCondition(MemRuntimeCheck); 3211 3212 // We currently don't use LoopVersioning for the actual loop cloning but we 3213 // still use it to add the noalias metadata. 3214 LVer = std::make_unique<LoopVersioning>( 3215 *Legal->getLAI(), 3216 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3217 DT, PSE.getSE()); 3218 LVer->prepareNoAliasMetadata(); 3219 } 3220 3221 Value *InnerLoopVectorizer::emitTransformedIndex( 3222 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3223 const InductionDescriptor &ID) const { 3224 3225 SCEVExpander Exp(*SE, DL, "induction"); 3226 auto Step = ID.getStep(); 3227 auto StartValue = ID.getStartValue(); 3228 assert(Index->getType() == Step->getType() && 3229 "Index type does not match StepValue type"); 3230 3231 // Note: the IR at this point is broken. We cannot use SE to create any new 3232 // SCEV and then expand it, hoping that SCEV's simplification will give us 3233 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3234 // lead to various SCEV crashes. So all we can do is to use builder and rely 3235 // on InstCombine for future simplifications. Here we handle some trivial 3236 // cases only. 3237 auto CreateAdd = [&B](Value *X, Value *Y) { 3238 assert(X->getType() == Y->getType() && "Types don't match!"); 3239 if (auto *CX = dyn_cast<ConstantInt>(X)) 3240 if (CX->isZero()) 3241 return Y; 3242 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3243 if (CY->isZero()) 3244 return X; 3245 return B.CreateAdd(X, Y); 3246 }; 3247 3248 auto CreateMul = [&B](Value *X, Value *Y) { 3249 assert(X->getType() == Y->getType() && "Types don't match!"); 3250 if (auto *CX = dyn_cast<ConstantInt>(X)) 3251 if (CX->isOne()) 3252 return Y; 3253 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3254 if (CY->isOne()) 3255 return X; 3256 return B.CreateMul(X, Y); 3257 }; 3258 3259 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3260 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3261 // the DomTree is not kept up-to-date for additional blocks generated in the 3262 // vector loop. By using the header as insertion point, we guarantee that the 3263 // expanded instructions dominate all their uses. 3264 auto GetInsertPoint = [this, &B]() { 3265 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3266 if (InsertBB != LoopVectorBody && 3267 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3268 return LoopVectorBody->getTerminator(); 3269 return &*B.GetInsertPoint(); 3270 }; 3271 switch (ID.getKind()) { 3272 case InductionDescriptor::IK_IntInduction: { 3273 assert(Index->getType() == StartValue->getType() && 3274 "Index type does not match StartValue type"); 3275 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3276 return B.CreateSub(StartValue, Index); 3277 auto *Offset = CreateMul( 3278 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3279 return CreateAdd(StartValue, Offset); 3280 } 3281 case InductionDescriptor::IK_PtrInduction: { 3282 assert(isa<SCEVConstant>(Step) && 3283 "Expected constant step for pointer induction"); 3284 return B.CreateGEP( 3285 StartValue->getType()->getPointerElementType(), StartValue, 3286 CreateMul(Index, 3287 Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()))); 3288 } 3289 case InductionDescriptor::IK_FpInduction: { 3290 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3291 auto InductionBinOp = ID.getInductionBinOp(); 3292 assert(InductionBinOp && 3293 (InductionBinOp->getOpcode() == Instruction::FAdd || 3294 InductionBinOp->getOpcode() == Instruction::FSub) && 3295 "Original bin op should be defined for FP induction"); 3296 3297 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3298 3299 // Floating point operations had to be 'fast' to enable the induction. 3300 FastMathFlags Flags; 3301 Flags.setFast(); 3302 3303 Value *MulExp = B.CreateFMul(StepValue, Index); 3304 if (isa<Instruction>(MulExp)) 3305 // We have to check, the MulExp may be a constant. 3306 cast<Instruction>(MulExp)->setFastMathFlags(Flags); 3307 3308 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3309 "induction"); 3310 if (isa<Instruction>(BOp)) 3311 cast<Instruction>(BOp)->setFastMathFlags(Flags); 3312 3313 return BOp; 3314 } 3315 case InductionDescriptor::IK_NoInduction: 3316 return nullptr; 3317 } 3318 llvm_unreachable("invalid enum"); 3319 } 3320 3321 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3322 LoopScalarBody = OrigLoop->getHeader(); 3323 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3324 LoopExitBlock = OrigLoop->getUniqueExitBlock(); 3325 assert(LoopExitBlock && "Must have an exit block"); 3326 assert(LoopVectorPreHeader && "Invalid loop structure"); 3327 3328 LoopMiddleBlock = 3329 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3330 LI, nullptr, Twine(Prefix) + "middle.block"); 3331 LoopScalarPreHeader = 3332 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3333 nullptr, Twine(Prefix) + "scalar.ph"); 3334 3335 // Set up branch from middle block to the exit and scalar preheader blocks. 3336 // completeLoopSkeleton will update the condition to use an iteration check, 3337 // if required to decide whether to execute the remainder. 3338 BranchInst *BrInst = 3339 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue()); 3340 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3341 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3342 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3343 3344 // We intentionally don't let SplitBlock to update LoopInfo since 3345 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3346 // LoopVectorBody is explicitly added to the correct place few lines later. 3347 LoopVectorBody = 3348 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3349 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3350 3351 // Update dominator for loop exit. 3352 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3353 3354 // Create and register the new vector loop. 3355 Loop *Lp = LI->AllocateLoop(); 3356 Loop *ParentLoop = OrigLoop->getParentLoop(); 3357 3358 // Insert the new loop into the loop nest and register the new basic blocks 3359 // before calling any utilities such as SCEV that require valid LoopInfo. 3360 if (ParentLoop) { 3361 ParentLoop->addChildLoop(Lp); 3362 } else { 3363 LI->addTopLevelLoop(Lp); 3364 } 3365 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3366 return Lp; 3367 } 3368 3369 void InnerLoopVectorizer::createInductionResumeValues( 3370 Loop *L, Value *VectorTripCount, 3371 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3372 assert(VectorTripCount && L && "Expected valid arguments"); 3373 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3374 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3375 "Inconsistent information about additional bypass."); 3376 // We are going to resume the execution of the scalar loop. 3377 // Go over all of the induction variables that we found and fix the 3378 // PHIs that are left in the scalar version of the loop. 3379 // The starting values of PHI nodes depend on the counter of the last 3380 // iteration in the vectorized loop. 3381 // If we come from a bypass edge then we need to start from the original 3382 // start value. 3383 for (auto &InductionEntry : Legal->getInductionVars()) { 3384 PHINode *OrigPhi = InductionEntry.first; 3385 InductionDescriptor II = InductionEntry.second; 3386 3387 // Create phi nodes to merge from the backedge-taken check block. 3388 PHINode *BCResumeVal = 3389 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3390 LoopScalarPreHeader->getTerminator()); 3391 // Copy original phi DL over to the new one. 3392 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3393 Value *&EndValue = IVEndValues[OrigPhi]; 3394 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3395 if (OrigPhi == OldInduction) { 3396 // We know what the end value is. 3397 EndValue = VectorTripCount; 3398 } else { 3399 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3400 Type *StepType = II.getStep()->getType(); 3401 Instruction::CastOps CastOp = 3402 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3403 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3404 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3405 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3406 EndValue->setName("ind.end"); 3407 3408 // Compute the end value for the additional bypass (if applicable). 3409 if (AdditionalBypass.first) { 3410 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3411 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3412 StepType, true); 3413 CRD = 3414 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3415 EndValueFromAdditionalBypass = 3416 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3417 EndValueFromAdditionalBypass->setName("ind.end"); 3418 } 3419 } 3420 // The new PHI merges the original incoming value, in case of a bypass, 3421 // or the value at the end of the vectorized loop. 3422 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3423 3424 // Fix the scalar body counter (PHI node). 3425 // The old induction's phi node in the scalar body needs the truncated 3426 // value. 3427 for (BasicBlock *BB : LoopBypassBlocks) 3428 BCResumeVal->addIncoming(II.getStartValue(), BB); 3429 3430 if (AdditionalBypass.first) 3431 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3432 EndValueFromAdditionalBypass); 3433 3434 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3435 } 3436 } 3437 3438 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3439 MDNode *OrigLoopID) { 3440 assert(L && "Expected valid loop."); 3441 3442 // The trip counts should be cached by now. 3443 Value *Count = getOrCreateTripCount(L); 3444 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3445 3446 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3447 3448 // Add a check in the middle block to see if we have completed 3449 // all of the iterations in the first vector loop. 3450 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3451 // If tail is to be folded, we know we don't need to run the remainder. 3452 if (!Cost->foldTailByMasking()) { 3453 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3454 Count, VectorTripCount, "cmp.n", 3455 LoopMiddleBlock->getTerminator()); 3456 3457 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3458 // of the corresponding compare because they may have ended up with 3459 // different line numbers and we want to avoid awkward line stepping while 3460 // debugging. Eg. if the compare has got a line number inside the loop. 3461 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3462 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3463 } 3464 3465 // Get ready to start creating new instructions into the vectorized body. 3466 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3467 "Inconsistent vector loop preheader"); 3468 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3469 3470 Optional<MDNode *> VectorizedLoopID = 3471 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3472 LLVMLoopVectorizeFollowupVectorized}); 3473 if (VectorizedLoopID.hasValue()) { 3474 L->setLoopID(VectorizedLoopID.getValue()); 3475 3476 // Do not setAlreadyVectorized if loop attributes have been defined 3477 // explicitly. 3478 return LoopVectorPreHeader; 3479 } 3480 3481 // Keep all loop hints from the original loop on the vector loop (we'll 3482 // replace the vectorizer-specific hints below). 3483 if (MDNode *LID = OrigLoop->getLoopID()) 3484 L->setLoopID(LID); 3485 3486 LoopVectorizeHints Hints(L, true, *ORE); 3487 Hints.setAlreadyVectorized(); 3488 3489 #ifdef EXPENSIVE_CHECKS 3490 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3491 LI->verify(*DT); 3492 #endif 3493 3494 return LoopVectorPreHeader; 3495 } 3496 3497 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3498 /* 3499 In this function we generate a new loop. The new loop will contain 3500 the vectorized instructions while the old loop will continue to run the 3501 scalar remainder. 3502 3503 [ ] <-- loop iteration number check. 3504 / | 3505 / v 3506 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3507 | / | 3508 | / v 3509 || [ ] <-- vector pre header. 3510 |/ | 3511 | v 3512 | [ ] \ 3513 | [ ]_| <-- vector loop. 3514 | | 3515 | v 3516 | -[ ] <--- middle-block. 3517 | / | 3518 | / v 3519 -|- >[ ] <--- new preheader. 3520 | | 3521 | v 3522 | [ ] \ 3523 | [ ]_| <-- old scalar loop to handle remainder. 3524 \ | 3525 \ v 3526 >[ ] <-- exit block. 3527 ... 3528 */ 3529 3530 // Get the metadata of the original loop before it gets modified. 3531 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3532 3533 // Create an empty vector loop, and prepare basic blocks for the runtime 3534 // checks. 3535 Loop *Lp = createVectorLoopSkeleton(""); 3536 3537 // Now, compare the new count to zero. If it is zero skip the vector loop and 3538 // jump to the scalar loop. This check also covers the case where the 3539 // backedge-taken count is uint##_max: adding one to it will overflow leading 3540 // to an incorrect trip count of zero. In this (rare) case we will also jump 3541 // to the scalar loop. 3542 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3543 3544 // Generate the code to check any assumptions that we've made for SCEV 3545 // expressions. 3546 emitSCEVChecks(Lp, LoopScalarPreHeader); 3547 3548 // Generate the code that checks in runtime if arrays overlap. We put the 3549 // checks into a separate block to make the more common case of few elements 3550 // faster. 3551 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3552 3553 // Some loops have a single integer induction variable, while other loops 3554 // don't. One example is c++ iterators that often have multiple pointer 3555 // induction variables. In the code below we also support a case where we 3556 // don't have a single induction variable. 3557 // 3558 // We try to obtain an induction variable from the original loop as hard 3559 // as possible. However if we don't find one that: 3560 // - is an integer 3561 // - counts from zero, stepping by one 3562 // - is the size of the widest induction variable type 3563 // then we create a new one. 3564 OldInduction = Legal->getPrimaryInduction(); 3565 Type *IdxTy = Legal->getWidestInductionType(); 3566 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3567 // The loop step is equal to the vectorization factor (num of SIMD elements) 3568 // times the unroll factor (num of SIMD instructions). 3569 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3570 Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF); 3571 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3572 Induction = 3573 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3574 getDebugLocFromInstOrOperands(OldInduction)); 3575 3576 // Emit phis for the new starting index of the scalar loop. 3577 createInductionResumeValues(Lp, CountRoundDown); 3578 3579 return completeLoopSkeleton(Lp, OrigLoopID); 3580 } 3581 3582 // Fix up external users of the induction variable. At this point, we are 3583 // in LCSSA form, with all external PHIs that use the IV having one input value, 3584 // coming from the remainder loop. We need those PHIs to also have a correct 3585 // value for the IV when arriving directly from the middle block. 3586 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3587 const InductionDescriptor &II, 3588 Value *CountRoundDown, Value *EndValue, 3589 BasicBlock *MiddleBlock) { 3590 // There are two kinds of external IV usages - those that use the value 3591 // computed in the last iteration (the PHI) and those that use the penultimate 3592 // value (the value that feeds into the phi from the loop latch). 3593 // We allow both, but they, obviously, have different values. 3594 3595 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3596 3597 DenseMap<Value *, Value *> MissingVals; 3598 3599 // An external user of the last iteration's value should see the value that 3600 // the remainder loop uses to initialize its own IV. 3601 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3602 for (User *U : PostInc->users()) { 3603 Instruction *UI = cast<Instruction>(U); 3604 if (!OrigLoop->contains(UI)) { 3605 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3606 MissingVals[UI] = EndValue; 3607 } 3608 } 3609 3610 // An external user of the penultimate value need to see EndValue - Step. 3611 // The simplest way to get this is to recompute it from the constituent SCEVs, 3612 // that is Start + (Step * (CRD - 1)). 3613 for (User *U : OrigPhi->users()) { 3614 auto *UI = cast<Instruction>(U); 3615 if (!OrigLoop->contains(UI)) { 3616 const DataLayout &DL = 3617 OrigLoop->getHeader()->getModule()->getDataLayout(); 3618 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3619 3620 IRBuilder<> B(MiddleBlock->getTerminator()); 3621 Value *CountMinusOne = B.CreateSub( 3622 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3623 Value *CMO = 3624 !II.getStep()->getType()->isIntegerTy() 3625 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3626 II.getStep()->getType()) 3627 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3628 CMO->setName("cast.cmo"); 3629 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3630 Escape->setName("ind.escape"); 3631 MissingVals[UI] = Escape; 3632 } 3633 } 3634 3635 for (auto &I : MissingVals) { 3636 PHINode *PHI = cast<PHINode>(I.first); 3637 // One corner case we have to handle is two IVs "chasing" each-other, 3638 // that is %IV2 = phi [...], [ %IV1, %latch ] 3639 // In this case, if IV1 has an external use, we need to avoid adding both 3640 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3641 // don't already have an incoming value for the middle block. 3642 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3643 PHI->addIncoming(I.second, MiddleBlock); 3644 } 3645 } 3646 3647 namespace { 3648 3649 struct CSEDenseMapInfo { 3650 static bool canHandle(const Instruction *I) { 3651 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3652 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3653 } 3654 3655 static inline Instruction *getEmptyKey() { 3656 return DenseMapInfo<Instruction *>::getEmptyKey(); 3657 } 3658 3659 static inline Instruction *getTombstoneKey() { 3660 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3661 } 3662 3663 static unsigned getHashValue(const Instruction *I) { 3664 assert(canHandle(I) && "Unknown instruction!"); 3665 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3666 I->value_op_end())); 3667 } 3668 3669 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3670 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3671 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3672 return LHS == RHS; 3673 return LHS->isIdenticalTo(RHS); 3674 } 3675 }; 3676 3677 } // end anonymous namespace 3678 3679 ///Perform cse of induction variable instructions. 3680 static void cse(BasicBlock *BB) { 3681 // Perform simple cse. 3682 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3683 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3684 Instruction *In = &*I++; 3685 3686 if (!CSEDenseMapInfo::canHandle(In)) 3687 continue; 3688 3689 // Check if we can replace this instruction with any of the 3690 // visited instructions. 3691 if (Instruction *V = CSEMap.lookup(In)) { 3692 In->replaceAllUsesWith(V); 3693 In->eraseFromParent(); 3694 continue; 3695 } 3696 3697 CSEMap[In] = In; 3698 } 3699 } 3700 3701 unsigned LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, 3702 ElementCount VF, 3703 bool &NeedToScalarize) { 3704 assert(!VF.isScalable() && "scalable vectors not yet supported."); 3705 Function *F = CI->getCalledFunction(); 3706 Type *ScalarRetTy = CI->getType(); 3707 SmallVector<Type *, 4> Tys, ScalarTys; 3708 for (auto &ArgOp : CI->arg_operands()) 3709 ScalarTys.push_back(ArgOp->getType()); 3710 3711 // Estimate cost of scalarized vector call. The source operands are assumed 3712 // to be vectors, so we need to extract individual elements from there, 3713 // execute VF scalar calls, and then gather the result into the vector return 3714 // value. 3715 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, 3716 TTI::TCK_RecipThroughput); 3717 if (VF.isScalar()) 3718 return ScalarCallCost; 3719 3720 // Compute corresponding vector type for return value and arguments. 3721 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3722 for (Type *ScalarTy : ScalarTys) 3723 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3724 3725 // Compute costs of unpacking argument values for the scalar calls and 3726 // packing the return values to a vector. 3727 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF); 3728 3729 unsigned Cost = ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3730 3731 // If we can't emit a vector call for this function, then the currently found 3732 // cost is the cost we need to return. 3733 NeedToScalarize = true; 3734 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3735 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3736 3737 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3738 return Cost; 3739 3740 // If the corresponding vector cost is cheaper, return its cost. 3741 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys, 3742 TTI::TCK_RecipThroughput); 3743 if (VectorCallCost < Cost) { 3744 NeedToScalarize = false; 3745 return VectorCallCost; 3746 } 3747 return Cost; 3748 } 3749 3750 unsigned LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3751 ElementCount VF) { 3752 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3753 assert(ID && "Expected intrinsic call!"); 3754 3755 IntrinsicCostAttributes CostAttrs(ID, *CI, VF); 3756 return TTI.getIntrinsicInstrCost(CostAttrs, 3757 TargetTransformInfo::TCK_RecipThroughput); 3758 } 3759 3760 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3761 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3762 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3763 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3764 } 3765 3766 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3767 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3768 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3769 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3770 } 3771 3772 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3773 // For every instruction `I` in MinBWs, truncate the operands, create a 3774 // truncated version of `I` and reextend its result. InstCombine runs 3775 // later and will remove any ext/trunc pairs. 3776 SmallPtrSet<Value *, 4> Erased; 3777 for (const auto &KV : Cost->getMinimalBitwidths()) { 3778 // If the value wasn't vectorized, we must maintain the original scalar 3779 // type. The absence of the value from VectorLoopValueMap indicates that it 3780 // wasn't vectorized. 3781 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3782 continue; 3783 for (unsigned Part = 0; Part < UF; ++Part) { 3784 Value *I = getOrCreateVectorValue(KV.first, Part); 3785 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3786 continue; 3787 Type *OriginalTy = I->getType(); 3788 Type *ScalarTruncatedTy = 3789 IntegerType::get(OriginalTy->getContext(), KV.second); 3790 auto *TruncatedTy = FixedVectorType::get( 3791 ScalarTruncatedTy, 3792 cast<FixedVectorType>(OriginalTy)->getNumElements()); 3793 if (TruncatedTy == OriginalTy) 3794 continue; 3795 3796 IRBuilder<> B(cast<Instruction>(I)); 3797 auto ShrinkOperand = [&](Value *V) -> Value * { 3798 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3799 if (ZI->getSrcTy() == TruncatedTy) 3800 return ZI->getOperand(0); 3801 return B.CreateZExtOrTrunc(V, TruncatedTy); 3802 }; 3803 3804 // The actual instruction modification depends on the instruction type, 3805 // unfortunately. 3806 Value *NewI = nullptr; 3807 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3808 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3809 ShrinkOperand(BO->getOperand(1))); 3810 3811 // Any wrapping introduced by shrinking this operation shouldn't be 3812 // considered undefined behavior. So, we can't unconditionally copy 3813 // arithmetic wrapping flags to NewI. 3814 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3815 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3816 NewI = 3817 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3818 ShrinkOperand(CI->getOperand(1))); 3819 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3820 NewI = B.CreateSelect(SI->getCondition(), 3821 ShrinkOperand(SI->getTrueValue()), 3822 ShrinkOperand(SI->getFalseValue())); 3823 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3824 switch (CI->getOpcode()) { 3825 default: 3826 llvm_unreachable("Unhandled cast!"); 3827 case Instruction::Trunc: 3828 NewI = ShrinkOperand(CI->getOperand(0)); 3829 break; 3830 case Instruction::SExt: 3831 NewI = B.CreateSExtOrTrunc( 3832 CI->getOperand(0), 3833 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3834 break; 3835 case Instruction::ZExt: 3836 NewI = B.CreateZExtOrTrunc( 3837 CI->getOperand(0), 3838 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3839 break; 3840 } 3841 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3842 auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType()) 3843 ->getNumElements(); 3844 auto *O0 = B.CreateZExtOrTrunc( 3845 SI->getOperand(0), 3846 FixedVectorType::get(ScalarTruncatedTy, Elements0)); 3847 auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType()) 3848 ->getNumElements(); 3849 auto *O1 = B.CreateZExtOrTrunc( 3850 SI->getOperand(1), 3851 FixedVectorType::get(ScalarTruncatedTy, Elements1)); 3852 3853 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3854 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3855 // Don't do anything with the operands, just extend the result. 3856 continue; 3857 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3858 auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType()) 3859 ->getNumElements(); 3860 auto *O0 = B.CreateZExtOrTrunc( 3861 IE->getOperand(0), 3862 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3863 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3864 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3865 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3866 auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType()) 3867 ->getNumElements(); 3868 auto *O0 = B.CreateZExtOrTrunc( 3869 EE->getOperand(0), 3870 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3871 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3872 } else { 3873 // If we don't know what to do, be conservative and don't do anything. 3874 continue; 3875 } 3876 3877 // Lastly, extend the result. 3878 NewI->takeName(cast<Instruction>(I)); 3879 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3880 I->replaceAllUsesWith(Res); 3881 cast<Instruction>(I)->eraseFromParent(); 3882 Erased.insert(I); 3883 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3884 } 3885 } 3886 3887 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3888 for (const auto &KV : Cost->getMinimalBitwidths()) { 3889 // If the value wasn't vectorized, we must maintain the original scalar 3890 // type. The absence of the value from VectorLoopValueMap indicates that it 3891 // wasn't vectorized. 3892 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3893 continue; 3894 for (unsigned Part = 0; Part < UF; ++Part) { 3895 Value *I = getOrCreateVectorValue(KV.first, Part); 3896 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3897 if (Inst && Inst->use_empty()) { 3898 Value *NewI = Inst->getOperand(0); 3899 Inst->eraseFromParent(); 3900 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3901 } 3902 } 3903 } 3904 } 3905 3906 void InnerLoopVectorizer::fixVectorizedLoop() { 3907 // Insert truncates and extends for any truncated instructions as hints to 3908 // InstCombine. 3909 if (VF.isVector()) 3910 truncateToMinimalBitwidths(); 3911 3912 // Fix widened non-induction PHIs by setting up the PHI operands. 3913 if (OrigPHIsToFix.size()) { 3914 assert(EnableVPlanNativePath && 3915 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3916 fixNonInductionPHIs(); 3917 } 3918 3919 // At this point every instruction in the original loop is widened to a 3920 // vector form. Now we need to fix the recurrences in the loop. These PHI 3921 // nodes are currently empty because we did not want to introduce cycles. 3922 // This is the second stage of vectorizing recurrences. 3923 fixCrossIterationPHIs(); 3924 3925 // Forget the original basic block. 3926 PSE.getSE()->forgetLoop(OrigLoop); 3927 3928 // Fix-up external users of the induction variables. 3929 for (auto &Entry : Legal->getInductionVars()) 3930 fixupIVUsers(Entry.first, Entry.second, 3931 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3932 IVEndValues[Entry.first], LoopMiddleBlock); 3933 3934 fixLCSSAPHIs(); 3935 for (Instruction *PI : PredicatedInstructions) 3936 sinkScalarOperands(&*PI); 3937 3938 // Remove redundant induction instructions. 3939 cse(LoopVectorBody); 3940 3941 // Set/update profile weights for the vector and remainder loops as original 3942 // loop iterations are now distributed among them. Note that original loop 3943 // represented by LoopScalarBody becomes remainder loop after vectorization. 3944 // 3945 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3946 // end up getting slightly roughened result but that should be OK since 3947 // profile is not inherently precise anyway. Note also possible bypass of 3948 // vector code caused by legality checks is ignored, assigning all the weight 3949 // to the vector loop, optimistically. 3950 // 3951 // For scalable vectorization we can't know at compile time how many iterations 3952 // of the loop are handled in one vector iteration, so instead assume a pessimistic 3953 // vscale of '1'. 3954 setProfileInfoAfterUnrolling( 3955 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 3956 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 3957 } 3958 3959 void InnerLoopVectorizer::fixCrossIterationPHIs() { 3960 // In order to support recurrences we need to be able to vectorize Phi nodes. 3961 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3962 // stage #2: We now need to fix the recurrences by adding incoming edges to 3963 // the currently empty PHI nodes. At this point every instruction in the 3964 // original loop is widened to a vector form so we can use them to construct 3965 // the incoming edges. 3966 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 3967 // Handle first-order recurrences and reductions that need to be fixed. 3968 if (Legal->isFirstOrderRecurrence(&Phi)) 3969 fixFirstOrderRecurrence(&Phi); 3970 else if (Legal->isReductionVariable(&Phi)) 3971 fixReduction(&Phi); 3972 } 3973 } 3974 3975 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3976 // This is the second phase of vectorizing first-order recurrences. An 3977 // overview of the transformation is described below. Suppose we have the 3978 // following loop. 3979 // 3980 // for (int i = 0; i < n; ++i) 3981 // b[i] = a[i] - a[i - 1]; 3982 // 3983 // There is a first-order recurrence on "a". For this loop, the shorthand 3984 // scalar IR looks like: 3985 // 3986 // scalar.ph: 3987 // s_init = a[-1] 3988 // br scalar.body 3989 // 3990 // scalar.body: 3991 // i = phi [0, scalar.ph], [i+1, scalar.body] 3992 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3993 // s2 = a[i] 3994 // b[i] = s2 - s1 3995 // br cond, scalar.body, ... 3996 // 3997 // In this example, s1 is a recurrence because it's value depends on the 3998 // previous iteration. In the first phase of vectorization, we created a 3999 // temporary value for s1. We now complete the vectorization and produce the 4000 // shorthand vector IR shown below (for VF = 4, UF = 1). 4001 // 4002 // vector.ph: 4003 // v_init = vector(..., ..., ..., a[-1]) 4004 // br vector.body 4005 // 4006 // vector.body 4007 // i = phi [0, vector.ph], [i+4, vector.body] 4008 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4009 // v2 = a[i, i+1, i+2, i+3]; 4010 // v3 = vector(v1(3), v2(0, 1, 2)) 4011 // b[i, i+1, i+2, i+3] = v2 - v3 4012 // br cond, vector.body, middle.block 4013 // 4014 // middle.block: 4015 // x = v2(3) 4016 // br scalar.ph 4017 // 4018 // scalar.ph: 4019 // s_init = phi [x, middle.block], [a[-1], otherwise] 4020 // br scalar.body 4021 // 4022 // After execution completes the vector loop, we extract the next value of 4023 // the recurrence (x) to use as the initial value in the scalar loop. 4024 4025 // Get the original loop preheader and single loop latch. 4026 auto *Preheader = OrigLoop->getLoopPreheader(); 4027 auto *Latch = OrigLoop->getLoopLatch(); 4028 4029 // Get the initial and previous values of the scalar recurrence. 4030 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4031 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4032 4033 // Create a vector from the initial value. 4034 auto *VectorInit = ScalarInit; 4035 if (VF.isVector()) { 4036 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4037 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 4038 VectorInit = Builder.CreateInsertElement( 4039 PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4040 Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init"); 4041 } 4042 4043 // We constructed a temporary phi node in the first phase of vectorization. 4044 // This phi node will eventually be deleted. 4045 Builder.SetInsertPoint( 4046 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 4047 4048 // Create a phi node for the new recurrence. The current value will either be 4049 // the initial value inserted into a vector or loop-varying vector value. 4050 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4051 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4052 4053 // Get the vectorized previous value of the last part UF - 1. It appears last 4054 // among all unrolled iterations, due to the order of their construction. 4055 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 4056 4057 // Find and set the insertion point after the previous value if it is an 4058 // instruction. 4059 BasicBlock::iterator InsertPt; 4060 // Note that the previous value may have been constant-folded so it is not 4061 // guaranteed to be an instruction in the vector loop. 4062 // FIXME: Loop invariant values do not form recurrences. We should deal with 4063 // them earlier. 4064 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart)) 4065 InsertPt = LoopVectorBody->getFirstInsertionPt(); 4066 else { 4067 Instruction *PreviousInst = cast<Instruction>(PreviousLastPart); 4068 if (isa<PHINode>(PreviousLastPart)) 4069 // If the previous value is a phi node, we should insert after all the phi 4070 // nodes in the block containing the PHI to avoid breaking basic block 4071 // verification. Note that the basic block may be different to 4072 // LoopVectorBody, in case we predicate the loop. 4073 InsertPt = PreviousInst->getParent()->getFirstInsertionPt(); 4074 else 4075 InsertPt = ++PreviousInst->getIterator(); 4076 } 4077 Builder.SetInsertPoint(&*InsertPt); 4078 4079 // We will construct a vector for the recurrence by combining the values for 4080 // the current and previous iterations. This is the required shuffle mask. 4081 assert(!VF.isScalable()); 4082 SmallVector<int, 8> ShuffleMask(VF.getKnownMinValue()); 4083 ShuffleMask[0] = VF.getKnownMinValue() - 1; 4084 for (unsigned I = 1; I < VF.getKnownMinValue(); ++I) 4085 ShuffleMask[I] = I + VF.getKnownMinValue() - 1; 4086 4087 // The vector from which to take the initial value for the current iteration 4088 // (actual or unrolled). Initially, this is the vector phi node. 4089 Value *Incoming = VecPhi; 4090 4091 // Shuffle the current and previous vector and update the vector parts. 4092 for (unsigned Part = 0; Part < UF; ++Part) { 4093 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 4094 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 4095 auto *Shuffle = 4096 VF.isVector() 4097 ? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask) 4098 : Incoming; 4099 PhiPart->replaceAllUsesWith(Shuffle); 4100 cast<Instruction>(PhiPart)->eraseFromParent(); 4101 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 4102 Incoming = PreviousPart; 4103 } 4104 4105 // Fix the latch value of the new recurrence in the vector loop. 4106 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4107 4108 // Extract the last vector element in the middle block. This will be the 4109 // initial value for the recurrence when jumping to the scalar loop. 4110 auto *ExtractForScalar = Incoming; 4111 if (VF.isVector()) { 4112 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4113 ExtractForScalar = Builder.CreateExtractElement( 4114 ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1), 4115 "vector.recur.extract"); 4116 } 4117 // Extract the second last element in the middle block if the 4118 // Phi is used outside the loop. We need to extract the phi itself 4119 // and not the last element (the phi update in the current iteration). This 4120 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4121 // when the scalar loop is not run at all. 4122 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4123 if (VF.isVector()) 4124 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4125 Incoming, Builder.getInt32(VF.getKnownMinValue() - 2), 4126 "vector.recur.extract.for.phi"); 4127 // When loop is unrolled without vectorizing, initialize 4128 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 4129 // `Incoming`. This is analogous to the vectorized case above: extracting the 4130 // second last element when VF > 1. 4131 else if (UF > 1) 4132 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 4133 4134 // Fix the initial value of the original recurrence in the scalar loop. 4135 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4136 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4137 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4138 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4139 Start->addIncoming(Incoming, BB); 4140 } 4141 4142 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4143 Phi->setName("scalar.recur"); 4144 4145 // Finally, fix users of the recurrence outside the loop. The users will need 4146 // either the last value of the scalar recurrence or the last value of the 4147 // vector recurrence we extracted in the middle block. Since the loop is in 4148 // LCSSA form, we just need to find all the phi nodes for the original scalar 4149 // recurrence in the exit block, and then add an edge for the middle block. 4150 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4151 if (LCSSAPhi.getIncomingValue(0) == Phi) { 4152 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4153 } 4154 } 4155 } 4156 4157 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 4158 // Get it's reduction variable descriptor. 4159 assert(Legal->isReductionVariable(Phi) && 4160 "Unable to find the reduction variable"); 4161 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 4162 4163 RecurKind RK = RdxDesc.getRecurrenceKind(); 4164 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4165 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4166 setDebugLocFromInst(Builder, ReductionStartValue); 4167 bool IsInLoopReductionPhi = Cost->isInLoopReduction(Phi); 4168 4169 // This is the vector-clone of the value that leaves the loop. 4170 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 4171 4172 // Wrap flags are in general invalid after vectorization, clear them. 4173 clearReductionWrapFlags(RdxDesc); 4174 4175 // Fix the vector-loop phi. 4176 4177 // Reductions do not have to start at zero. They can start with 4178 // any loop invariant values. 4179 BasicBlock *Latch = OrigLoop->getLoopLatch(); 4180 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 4181 4182 for (unsigned Part = 0; Part < UF; ++Part) { 4183 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 4184 Value *Val = getOrCreateVectorValue(LoopVal, Part); 4185 cast<PHINode>(VecRdxPhi) 4186 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4187 } 4188 4189 // Before each round, move the insertion point right between 4190 // the PHIs and the values we are going to write. 4191 // This allows us to write both PHINodes and the extractelement 4192 // instructions. 4193 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4194 4195 setDebugLocFromInst(Builder, LoopExitInst); 4196 4197 // If tail is folded by masking, the vector value to leave the loop should be 4198 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4199 // instead of the former. For an inloop reduction the reduction will already 4200 // be predicated, and does not need to be handled here. 4201 if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) { 4202 for (unsigned Part = 0; Part < UF; ++Part) { 4203 Value *VecLoopExitInst = 4204 VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4205 Value *Sel = nullptr; 4206 for (User *U : VecLoopExitInst->users()) { 4207 if (isa<SelectInst>(U)) { 4208 assert(!Sel && "Reduction exit feeding two selects"); 4209 Sel = U; 4210 } else 4211 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4212 } 4213 assert(Sel && "Reduction exit feeds no select"); 4214 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel); 4215 4216 // If the target can create a predicated operator for the reduction at no 4217 // extra cost in the loop (for example a predicated vadd), it can be 4218 // cheaper for the select to remain in the loop than be sunk out of it, 4219 // and so use the select value for the phi instead of the old 4220 // LoopExitValue. 4221 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 4222 if (PreferPredicatedReductionSelect || 4223 TTI->preferPredicatedReductionSelect( 4224 RdxDesc.getOpcode(), Phi->getType(), 4225 TargetTransformInfo::ReductionFlags())) { 4226 auto *VecRdxPhi = cast<PHINode>(getOrCreateVectorValue(Phi, Part)); 4227 VecRdxPhi->setIncomingValueForBlock( 4228 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4229 } 4230 } 4231 } 4232 4233 // If the vector reduction can be performed in a smaller type, we truncate 4234 // then extend the loop exit value to enable InstCombine to evaluate the 4235 // entire expression in the smaller type. 4236 if (VF.isVector() && Phi->getType() != RdxDesc.getRecurrenceType()) { 4237 assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!"); 4238 assert(!VF.isScalable() && "scalable vectors not yet supported."); 4239 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4240 Builder.SetInsertPoint( 4241 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4242 VectorParts RdxParts(UF); 4243 for (unsigned Part = 0; Part < UF; ++Part) { 4244 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4245 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4246 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4247 : Builder.CreateZExt(Trunc, VecTy); 4248 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 4249 UI != RdxParts[Part]->user_end();) 4250 if (*UI != Trunc) { 4251 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 4252 RdxParts[Part] = Extnd; 4253 } else { 4254 ++UI; 4255 } 4256 } 4257 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4258 for (unsigned Part = 0; Part < UF; ++Part) { 4259 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4260 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 4261 } 4262 } 4263 4264 // Reduce all of the unrolled parts into a single vector. 4265 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 4266 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4267 4268 // The middle block terminator has already been assigned a DebugLoc here (the 4269 // OrigLoop's single latch terminator). We want the whole middle block to 4270 // appear to execute on this line because: (a) it is all compiler generated, 4271 // (b) these instructions are always executed after evaluating the latch 4272 // conditional branch, and (c) other passes may add new predecessors which 4273 // terminate on this line. This is the easiest way to ensure we don't 4274 // accidentally cause an extra step back into the loop while debugging. 4275 setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator()); 4276 for (unsigned Part = 1; Part < UF; ++Part) { 4277 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4278 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 4279 // Floating point operations had to be 'fast' to enable the reduction. 4280 ReducedPartRdx = addFastMathFlag( 4281 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 4282 ReducedPartRdx, "bin.rdx"), 4283 RdxDesc.getFastMathFlags()); 4284 else 4285 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4286 } 4287 4288 // Create the reduction after the loop. Note that inloop reductions create the 4289 // target reduction in the loop using a Reduction recipe. 4290 if (VF.isVector() && !IsInLoopReductionPhi) { 4291 ReducedPartRdx = 4292 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx); 4293 // If the reduction can be performed in a smaller type, we need to extend 4294 // the reduction to the wider type before we branch to the original loop. 4295 if (Phi->getType() != RdxDesc.getRecurrenceType()) 4296 ReducedPartRdx = 4297 RdxDesc.isSigned() 4298 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 4299 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 4300 } 4301 4302 // Create a phi node that merges control-flow from the backedge-taken check 4303 // block and the middle block. 4304 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4305 LoopScalarPreHeader->getTerminator()); 4306 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4307 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4308 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4309 4310 // Now, we need to fix the users of the reduction variable 4311 // inside and outside of the scalar remainder loop. 4312 // We know that the loop is in LCSSA form. We need to update the 4313 // PHI nodes in the exit blocks. 4314 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4315 // All PHINodes need to have a single entry edge, or two if 4316 // we already fixed them. 4317 assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 4318 4319 // We found a reduction value exit-PHI. Update it with the 4320 // incoming bypass edge. 4321 if (LCSSAPhi.getIncomingValue(0) == LoopExitInst) 4322 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4323 } // end of the LCSSA phi scan. 4324 4325 // Fix the scalar loop reduction variable with the incoming reduction sum 4326 // from the vector body and from the backedge value. 4327 int IncomingEdgeBlockIdx = 4328 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4329 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4330 // Pick the other block. 4331 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4332 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4333 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4334 } 4335 4336 void InnerLoopVectorizer::clearReductionWrapFlags( 4337 RecurrenceDescriptor &RdxDesc) { 4338 RecurKind RK = RdxDesc.getRecurrenceKind(); 4339 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4340 return; 4341 4342 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4343 assert(LoopExitInstr && "null loop exit instruction"); 4344 SmallVector<Instruction *, 8> Worklist; 4345 SmallPtrSet<Instruction *, 8> Visited; 4346 Worklist.push_back(LoopExitInstr); 4347 Visited.insert(LoopExitInstr); 4348 4349 while (!Worklist.empty()) { 4350 Instruction *Cur = Worklist.pop_back_val(); 4351 if (isa<OverflowingBinaryOperator>(Cur)) 4352 for (unsigned Part = 0; Part < UF; ++Part) { 4353 Value *V = getOrCreateVectorValue(Cur, Part); 4354 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4355 } 4356 4357 for (User *U : Cur->users()) { 4358 Instruction *UI = cast<Instruction>(U); 4359 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4360 Visited.insert(UI).second) 4361 Worklist.push_back(UI); 4362 } 4363 } 4364 } 4365 4366 void InnerLoopVectorizer::fixLCSSAPHIs() { 4367 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4368 if (LCSSAPhi.getNumIncomingValues() == 1) { 4369 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4370 // Non-instruction incoming values will have only one value. 4371 unsigned LastLane = 0; 4372 if (isa<Instruction>(IncomingValue)) 4373 LastLane = Cost->isUniformAfterVectorization( 4374 cast<Instruction>(IncomingValue), VF) 4375 ? 0 4376 : VF.getKnownMinValue() - 1; 4377 assert((!VF.isScalable() || LastLane == 0) && 4378 "scalable vectors dont support non-uniform scalars yet"); 4379 // Can be a loop invariant incoming value or the last scalar value to be 4380 // extracted from the vectorized loop. 4381 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4382 Value *lastIncomingValue = 4383 getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane }); 4384 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4385 } 4386 } 4387 } 4388 4389 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4390 // The basic block and loop containing the predicated instruction. 4391 auto *PredBB = PredInst->getParent(); 4392 auto *VectorLoop = LI->getLoopFor(PredBB); 4393 4394 // Initialize a worklist with the operands of the predicated instruction. 4395 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4396 4397 // Holds instructions that we need to analyze again. An instruction may be 4398 // reanalyzed if we don't yet know if we can sink it or not. 4399 SmallVector<Instruction *, 8> InstsToReanalyze; 4400 4401 // Returns true if a given use occurs in the predicated block. Phi nodes use 4402 // their operands in their corresponding predecessor blocks. 4403 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4404 auto *I = cast<Instruction>(U.getUser()); 4405 BasicBlock *BB = I->getParent(); 4406 if (auto *Phi = dyn_cast<PHINode>(I)) 4407 BB = Phi->getIncomingBlock( 4408 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4409 return BB == PredBB; 4410 }; 4411 4412 // Iteratively sink the scalarized operands of the predicated instruction 4413 // into the block we created for it. When an instruction is sunk, it's 4414 // operands are then added to the worklist. The algorithm ends after one pass 4415 // through the worklist doesn't sink a single instruction. 4416 bool Changed; 4417 do { 4418 // Add the instructions that need to be reanalyzed to the worklist, and 4419 // reset the changed indicator. 4420 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4421 InstsToReanalyze.clear(); 4422 Changed = false; 4423 4424 while (!Worklist.empty()) { 4425 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4426 4427 // We can't sink an instruction if it is a phi node, is already in the 4428 // predicated block, is not in the loop, or may have side effects. 4429 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4430 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4431 continue; 4432 4433 // It's legal to sink the instruction if all its uses occur in the 4434 // predicated block. Otherwise, there's nothing to do yet, and we may 4435 // need to reanalyze the instruction. 4436 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4437 InstsToReanalyze.push_back(I); 4438 continue; 4439 } 4440 4441 // Move the instruction to the beginning of the predicated block, and add 4442 // it's operands to the worklist. 4443 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4444 Worklist.insert(I->op_begin(), I->op_end()); 4445 4446 // The sinking may have enabled other instructions to be sunk, so we will 4447 // need to iterate. 4448 Changed = true; 4449 } 4450 } while (Changed); 4451 } 4452 4453 void InnerLoopVectorizer::fixNonInductionPHIs() { 4454 for (PHINode *OrigPhi : OrigPHIsToFix) { 4455 PHINode *NewPhi = 4456 cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0)); 4457 unsigned NumIncomingValues = OrigPhi->getNumIncomingValues(); 4458 4459 SmallVector<BasicBlock *, 2> ScalarBBPredecessors( 4460 predecessors(OrigPhi->getParent())); 4461 SmallVector<BasicBlock *, 2> VectorBBPredecessors( 4462 predecessors(NewPhi->getParent())); 4463 assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() && 4464 "Scalar and Vector BB should have the same number of predecessors"); 4465 4466 // The insertion point in Builder may be invalidated by the time we get 4467 // here. Force the Builder insertion point to something valid so that we do 4468 // not run into issues during insertion point restore in 4469 // getOrCreateVectorValue calls below. 4470 Builder.SetInsertPoint(NewPhi); 4471 4472 // The predecessor order is preserved and we can rely on mapping between 4473 // scalar and vector block predecessors. 4474 for (unsigned i = 0; i < NumIncomingValues; ++i) { 4475 BasicBlock *NewPredBB = VectorBBPredecessors[i]; 4476 4477 // When looking up the new scalar/vector values to fix up, use incoming 4478 // values from original phi. 4479 Value *ScIncV = 4480 OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]); 4481 4482 // Scalar incoming value may need a broadcast 4483 Value *NewIncV = getOrCreateVectorValue(ScIncV, 0); 4484 NewPhi->addIncoming(NewIncV, NewPredBB); 4485 } 4486 } 4487 } 4488 4489 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, 4490 VPUser &Operands, unsigned UF, 4491 ElementCount VF, bool IsPtrLoopInvariant, 4492 SmallBitVector &IsIndexLoopInvariant, 4493 VPTransformState &State) { 4494 // Construct a vector GEP by widening the operands of the scalar GEP as 4495 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4496 // results in a vector of pointers when at least one operand of the GEP 4497 // is vector-typed. Thus, to keep the representation compact, we only use 4498 // vector-typed operands for loop-varying values. 4499 4500 if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4501 // If we are vectorizing, but the GEP has only loop-invariant operands, 4502 // the GEP we build (by only using vector-typed operands for 4503 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4504 // produce a vector of pointers, we need to either arbitrarily pick an 4505 // operand to broadcast, or broadcast a clone of the original GEP. 4506 // Here, we broadcast a clone of the original. 4507 // 4508 // TODO: If at some point we decide to scalarize instructions having 4509 // loop-invariant operands, this special case will no longer be 4510 // required. We would add the scalarization decision to 4511 // collectLoopScalars() and teach getVectorValue() to broadcast 4512 // the lane-zero scalar value. 4513 auto *Clone = Builder.Insert(GEP->clone()); 4514 for (unsigned Part = 0; Part < UF; ++Part) { 4515 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4516 State.set(VPDef, GEP, EntryPart, Part); 4517 addMetadata(EntryPart, GEP); 4518 } 4519 } else { 4520 // If the GEP has at least one loop-varying operand, we are sure to 4521 // produce a vector of pointers. But if we are only unrolling, we want 4522 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4523 // produce with the code below will be scalar (if VF == 1) or vector 4524 // (otherwise). Note that for the unroll-only case, we still maintain 4525 // values in the vector mapping with initVector, as we do for other 4526 // instructions. 4527 for (unsigned Part = 0; Part < UF; ++Part) { 4528 // The pointer operand of the new GEP. If it's loop-invariant, we 4529 // won't broadcast it. 4530 auto *Ptr = IsPtrLoopInvariant ? State.get(Operands.getOperand(0), {0, 0}) 4531 : State.get(Operands.getOperand(0), Part); 4532 4533 // Collect all the indices for the new GEP. If any index is 4534 // loop-invariant, we won't broadcast it. 4535 SmallVector<Value *, 4> Indices; 4536 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { 4537 VPValue *Operand = Operands.getOperand(I); 4538 if (IsIndexLoopInvariant[I - 1]) 4539 Indices.push_back(State.get(Operand, {0, 0})); 4540 else 4541 Indices.push_back(State.get(Operand, Part)); 4542 } 4543 4544 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4545 // but it should be a vector, otherwise. 4546 auto *NewGEP = 4547 GEP->isInBounds() 4548 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4549 Indices) 4550 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4551 assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) && 4552 "NewGEP is not a pointer vector"); 4553 State.set(VPDef, GEP, NewGEP, Part); 4554 addMetadata(NewGEP, GEP); 4555 } 4556 } 4557 } 4558 4559 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4560 RecurrenceDescriptor *RdxDesc, 4561 Value *StartV, unsigned UF, 4562 ElementCount VF) { 4563 assert(!VF.isScalable() && "scalable vectors not yet supported."); 4564 PHINode *P = cast<PHINode>(PN); 4565 if (EnableVPlanNativePath) { 4566 // Currently we enter here in the VPlan-native path for non-induction 4567 // PHIs where all control flow is uniform. We simply widen these PHIs. 4568 // Create a vector phi with no operands - the vector phi operands will be 4569 // set at the end of vector code generation. 4570 Type *VecTy = 4571 (VF.isScalar()) ? PN->getType() : VectorType::get(PN->getType(), VF); 4572 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4573 VectorLoopValueMap.setVectorValue(P, 0, VecPhi); 4574 OrigPHIsToFix.push_back(P); 4575 4576 return; 4577 } 4578 4579 assert(PN->getParent() == OrigLoop->getHeader() && 4580 "Non-header phis should have been handled elsewhere"); 4581 4582 // In order to support recurrences we need to be able to vectorize Phi nodes. 4583 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4584 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4585 // this value when we vectorize all of the instructions that use the PHI. 4586 if (RdxDesc || Legal->isFirstOrderRecurrence(P)) { 4587 Value *Iden = nullptr; 4588 bool ScalarPHI = 4589 (VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN)); 4590 Type *VecTy = 4591 ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), VF); 4592 4593 if (RdxDesc) { 4594 assert(Legal->isReductionVariable(P) && StartV && 4595 "RdxDesc should only be set for reduction variables; in that case " 4596 "a StartV is also required"); 4597 RecurKind RK = RdxDesc->getRecurrenceKind(); 4598 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) { 4599 // MinMax reduction have the start value as their identify. 4600 if (ScalarPHI) { 4601 Iden = StartV; 4602 } else { 4603 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4604 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4605 StartV = Iden = Builder.CreateVectorSplat(VF, StartV, "minmax.ident"); 4606 } 4607 } else { 4608 Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity( 4609 RK, VecTy->getScalarType()); 4610 Iden = IdenC; 4611 4612 if (!ScalarPHI) { 4613 Iden = ConstantVector::getSplat(VF, IdenC); 4614 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4615 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4616 Constant *Zero = Builder.getInt32(0); 4617 StartV = Builder.CreateInsertElement(Iden, StartV, Zero); 4618 } 4619 } 4620 } 4621 4622 for (unsigned Part = 0; Part < UF; ++Part) { 4623 // This is phase one of vectorizing PHIs. 4624 Value *EntryPart = PHINode::Create( 4625 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4626 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 4627 if (StartV) { 4628 // Make sure to add the reduction start value only to the 4629 // first unroll part. 4630 Value *StartVal = (Part == 0) ? StartV : Iden; 4631 cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader); 4632 } 4633 } 4634 return; 4635 } 4636 4637 assert(!Legal->isReductionVariable(P) && 4638 "reductions should be handled above"); 4639 4640 setDebugLocFromInst(Builder, P); 4641 4642 // This PHINode must be an induction variable. 4643 // Make sure that we know about it. 4644 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4645 4646 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4647 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4648 4649 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4650 // which can be found from the original scalar operations. 4651 switch (II.getKind()) { 4652 case InductionDescriptor::IK_NoInduction: 4653 llvm_unreachable("Unknown induction"); 4654 case InductionDescriptor::IK_IntInduction: 4655 case InductionDescriptor::IK_FpInduction: 4656 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4657 case InductionDescriptor::IK_PtrInduction: { 4658 // Handle the pointer induction variable case. 4659 assert(P->getType()->isPointerTy() && "Unexpected type."); 4660 4661 if (Cost->isScalarAfterVectorization(P, VF)) { 4662 // This is the normalized GEP that starts counting at zero. 4663 Value *PtrInd = 4664 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4665 // Determine the number of scalars we need to generate for each unroll 4666 // iteration. If the instruction is uniform, we only need to generate the 4667 // first lane. Otherwise, we generate all VF values. 4668 unsigned Lanes = 4669 Cost->isUniformAfterVectorization(P, VF) ? 1 : VF.getKnownMinValue(); 4670 for (unsigned Part = 0; Part < UF; ++Part) { 4671 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4672 Constant *Idx = ConstantInt::get(PtrInd->getType(), 4673 Lane + Part * VF.getKnownMinValue()); 4674 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4675 Value *SclrGep = 4676 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4677 SclrGep->setName("next.gep"); 4678 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep); 4679 } 4680 } 4681 return; 4682 } 4683 assert(isa<SCEVConstant>(II.getStep()) && 4684 "Induction step not a SCEV constant!"); 4685 Type *PhiType = II.getStep()->getType(); 4686 4687 // Build a pointer phi 4688 Value *ScalarStartValue = II.getStartValue(); 4689 Type *ScStValueType = ScalarStartValue->getType(); 4690 PHINode *NewPointerPhi = 4691 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4692 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4693 4694 // A pointer induction, performed by using a gep 4695 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4696 Instruction *InductionLoc = LoopLatch->getTerminator(); 4697 const SCEV *ScalarStep = II.getStep(); 4698 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4699 Value *ScalarStepValue = 4700 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4701 Value *InductionGEP = GetElementPtrInst::Create( 4702 ScStValueType->getPointerElementType(), NewPointerPhi, 4703 Builder.CreateMul( 4704 ScalarStepValue, 4705 ConstantInt::get(PhiType, VF.getKnownMinValue() * UF)), 4706 "ptr.ind", InductionLoc); 4707 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4708 4709 // Create UF many actual address geps that use the pointer 4710 // phi as base and a vectorized version of the step value 4711 // (<step*0, ..., step*N>) as offset. 4712 for (unsigned Part = 0; Part < UF; ++Part) { 4713 SmallVector<Constant *, 8> Indices; 4714 // Create a vector of consecutive numbers from zero to VF. 4715 for (unsigned i = 0; i < VF.getKnownMinValue(); ++i) 4716 Indices.push_back( 4717 ConstantInt::get(PhiType, i + Part * VF.getKnownMinValue())); 4718 Constant *StartOffset = ConstantVector::get(Indices); 4719 4720 Value *GEP = Builder.CreateGEP( 4721 ScStValueType->getPointerElementType(), NewPointerPhi, 4722 Builder.CreateMul( 4723 StartOffset, 4724 Builder.CreateVectorSplat(VF.getKnownMinValue(), ScalarStepValue), 4725 "vector.gep")); 4726 VectorLoopValueMap.setVectorValue(P, Part, GEP); 4727 } 4728 } 4729 } 4730 } 4731 4732 /// A helper function for checking whether an integer division-related 4733 /// instruction may divide by zero (in which case it must be predicated if 4734 /// executed conditionally in the scalar code). 4735 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4736 /// Non-zero divisors that are non compile-time constants will not be 4737 /// converted into multiplication, so we will still end up scalarizing 4738 /// the division, but can do so w/o predication. 4739 static bool mayDivideByZero(Instruction &I) { 4740 assert((I.getOpcode() == Instruction::UDiv || 4741 I.getOpcode() == Instruction::SDiv || 4742 I.getOpcode() == Instruction::URem || 4743 I.getOpcode() == Instruction::SRem) && 4744 "Unexpected instruction"); 4745 Value *Divisor = I.getOperand(1); 4746 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4747 return !CInt || CInt->isZero(); 4748 } 4749 4750 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def, 4751 VPUser &User, 4752 VPTransformState &State) { 4753 switch (I.getOpcode()) { 4754 case Instruction::Call: 4755 case Instruction::Br: 4756 case Instruction::PHI: 4757 case Instruction::GetElementPtr: 4758 case Instruction::Select: 4759 llvm_unreachable("This instruction is handled by a different recipe."); 4760 case Instruction::UDiv: 4761 case Instruction::SDiv: 4762 case Instruction::SRem: 4763 case Instruction::URem: 4764 case Instruction::Add: 4765 case Instruction::FAdd: 4766 case Instruction::Sub: 4767 case Instruction::FSub: 4768 case Instruction::FNeg: 4769 case Instruction::Mul: 4770 case Instruction::FMul: 4771 case Instruction::FDiv: 4772 case Instruction::FRem: 4773 case Instruction::Shl: 4774 case Instruction::LShr: 4775 case Instruction::AShr: 4776 case Instruction::And: 4777 case Instruction::Or: 4778 case Instruction::Xor: { 4779 // Just widen unops and binops. 4780 setDebugLocFromInst(Builder, &I); 4781 4782 for (unsigned Part = 0; Part < UF; ++Part) { 4783 SmallVector<Value *, 2> Ops; 4784 for (VPValue *VPOp : User.operands()) 4785 Ops.push_back(State.get(VPOp, Part)); 4786 4787 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4788 4789 if (auto *VecOp = dyn_cast<Instruction>(V)) 4790 VecOp->copyIRFlags(&I); 4791 4792 // Use this vector value for all users of the original instruction. 4793 State.set(Def, &I, V, Part); 4794 addMetadata(V, &I); 4795 } 4796 4797 break; 4798 } 4799 case Instruction::ICmp: 4800 case Instruction::FCmp: { 4801 // Widen compares. Generate vector compares. 4802 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4803 auto *Cmp = cast<CmpInst>(&I); 4804 setDebugLocFromInst(Builder, Cmp); 4805 for (unsigned Part = 0; Part < UF; ++Part) { 4806 Value *A = State.get(User.getOperand(0), Part); 4807 Value *B = State.get(User.getOperand(1), Part); 4808 Value *C = nullptr; 4809 if (FCmp) { 4810 // Propagate fast math flags. 4811 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4812 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4813 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4814 } else { 4815 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4816 } 4817 State.set(Def, &I, C, Part); 4818 addMetadata(C, &I); 4819 } 4820 4821 break; 4822 } 4823 4824 case Instruction::ZExt: 4825 case Instruction::SExt: 4826 case Instruction::FPToUI: 4827 case Instruction::FPToSI: 4828 case Instruction::FPExt: 4829 case Instruction::PtrToInt: 4830 case Instruction::IntToPtr: 4831 case Instruction::SIToFP: 4832 case Instruction::UIToFP: 4833 case Instruction::Trunc: 4834 case Instruction::FPTrunc: 4835 case Instruction::BitCast: { 4836 auto *CI = cast<CastInst>(&I); 4837 setDebugLocFromInst(Builder, CI); 4838 4839 /// Vectorize casts. 4840 Type *DestTy = 4841 (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); 4842 4843 for (unsigned Part = 0; Part < UF; ++Part) { 4844 Value *A = State.get(User.getOperand(0), Part); 4845 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4846 State.set(Def, &I, Cast, Part); 4847 addMetadata(Cast, &I); 4848 } 4849 break; 4850 } 4851 default: 4852 // This instruction is not vectorized by simple widening. 4853 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4854 llvm_unreachable("Unhandled instruction!"); 4855 } // end of switch. 4856 } 4857 4858 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4859 VPUser &ArgOperands, 4860 VPTransformState &State) { 4861 assert(!isa<DbgInfoIntrinsic>(I) && 4862 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4863 setDebugLocFromInst(Builder, &I); 4864 4865 Module *M = I.getParent()->getParent()->getParent(); 4866 auto *CI = cast<CallInst>(&I); 4867 4868 SmallVector<Type *, 4> Tys; 4869 for (Value *ArgOperand : CI->arg_operands()) 4870 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4871 4872 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4873 4874 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4875 // version of the instruction. 4876 // Is it beneficial to perform intrinsic call compared to lib call? 4877 bool NeedToScalarize = false; 4878 unsigned CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4879 bool UseVectorIntrinsic = 4880 ID && Cost->getVectorIntrinsicCost(CI, VF) <= CallCost; 4881 assert((UseVectorIntrinsic || !NeedToScalarize) && 4882 "Instruction should be scalarized elsewhere."); 4883 4884 for (unsigned Part = 0; Part < UF; ++Part) { 4885 SmallVector<Value *, 4> Args; 4886 for (auto &I : enumerate(ArgOperands.operands())) { 4887 // Some intrinsics have a scalar argument - don't replace it with a 4888 // vector. 4889 Value *Arg; 4890 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4891 Arg = State.get(I.value(), Part); 4892 else 4893 Arg = State.get(I.value(), {0, 0}); 4894 Args.push_back(Arg); 4895 } 4896 4897 Function *VectorF; 4898 if (UseVectorIntrinsic) { 4899 // Use vector version of the intrinsic. 4900 Type *TysForDecl[] = {CI->getType()}; 4901 if (VF.isVector()) { 4902 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 4903 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4904 } 4905 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4906 assert(VectorF && "Can't retrieve vector intrinsic."); 4907 } else { 4908 // Use vector version of the function call. 4909 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4910 #ifndef NDEBUG 4911 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4912 "Can't create vector function."); 4913 #endif 4914 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4915 } 4916 SmallVector<OperandBundleDef, 1> OpBundles; 4917 CI->getOperandBundlesAsDefs(OpBundles); 4918 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4919 4920 if (isa<FPMathOperator>(V)) 4921 V->copyFastMathFlags(CI); 4922 4923 State.set(Def, &I, V, Part); 4924 addMetadata(V, &I); 4925 } 4926 } 4927 4928 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, 4929 VPUser &Operands, 4930 bool InvariantCond, 4931 VPTransformState &State) { 4932 setDebugLocFromInst(Builder, &I); 4933 4934 // The condition can be loop invariant but still defined inside the 4935 // loop. This means that we can't just use the original 'cond' value. 4936 // We have to take the 'vectorized' value and pick the first lane. 4937 // Instcombine will make this a no-op. 4938 auto *InvarCond = 4939 InvariantCond ? State.get(Operands.getOperand(0), {0, 0}) : nullptr; 4940 4941 for (unsigned Part = 0; Part < UF; ++Part) { 4942 Value *Cond = 4943 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); 4944 Value *Op0 = State.get(Operands.getOperand(1), Part); 4945 Value *Op1 = State.get(Operands.getOperand(2), Part); 4946 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); 4947 State.set(VPDef, &I, Sel, Part); 4948 addMetadata(Sel, &I); 4949 } 4950 } 4951 4952 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4953 // We should not collect Scalars more than once per VF. Right now, this 4954 // function is called from collectUniformsAndScalars(), which already does 4955 // this check. Collecting Scalars for VF=1 does not make any sense. 4956 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4957 "This function should not be visited twice for the same VF"); 4958 4959 SmallSetVector<Instruction *, 8> Worklist; 4960 4961 // These sets are used to seed the analysis with pointers used by memory 4962 // accesses that will remain scalar. 4963 SmallSetVector<Instruction *, 8> ScalarPtrs; 4964 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4965 auto *Latch = TheLoop->getLoopLatch(); 4966 4967 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4968 // The pointer operands of loads and stores will be scalar as long as the 4969 // memory access is not a gather or scatter operation. The value operand of a 4970 // store will remain scalar if the store is scalarized. 4971 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4972 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4973 assert(WideningDecision != CM_Unknown && 4974 "Widening decision should be ready at this moment"); 4975 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4976 if (Ptr == Store->getValueOperand()) 4977 return WideningDecision == CM_Scalarize; 4978 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4979 "Ptr is neither a value or pointer operand"); 4980 return WideningDecision != CM_GatherScatter; 4981 }; 4982 4983 // A helper that returns true if the given value is a bitcast or 4984 // getelementptr instruction contained in the loop. 4985 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4986 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4987 isa<GetElementPtrInst>(V)) && 4988 !TheLoop->isLoopInvariant(V); 4989 }; 4990 4991 auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { 4992 if (!isa<PHINode>(Ptr) || 4993 !Legal->getInductionVars().count(cast<PHINode>(Ptr))) 4994 return false; 4995 auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; 4996 if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) 4997 return false; 4998 return isScalarUse(MemAccess, Ptr); 4999 }; 5000 5001 // A helper that evaluates a memory access's use of a pointer. If the 5002 // pointer is actually the pointer induction of a loop, it is being 5003 // inserted into Worklist. If the use will be a scalar use, and the 5004 // pointer is only used by memory accesses, we place the pointer in 5005 // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. 5006 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5007 if (isScalarPtrInduction(MemAccess, Ptr)) { 5008 Worklist.insert(cast<Instruction>(Ptr)); 5009 Instruction *Update = cast<Instruction>( 5010 cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); 5011 Worklist.insert(Update); 5012 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr 5013 << "\n"); 5014 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update 5015 << "\n"); 5016 return; 5017 } 5018 // We only care about bitcast and getelementptr instructions contained in 5019 // the loop. 5020 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5021 return; 5022 5023 // If the pointer has already been identified as scalar (e.g., if it was 5024 // also identified as uniform), there's nothing to do. 5025 auto *I = cast<Instruction>(Ptr); 5026 if (Worklist.count(I)) 5027 return; 5028 5029 // If the use of the pointer will be a scalar use, and all users of the 5030 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 5031 // place the pointer in PossibleNonScalarPtrs. 5032 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 5033 return isa<LoadInst>(U) || isa<StoreInst>(U); 5034 })) 5035 ScalarPtrs.insert(I); 5036 else 5037 PossibleNonScalarPtrs.insert(I); 5038 }; 5039 5040 // We seed the scalars analysis with three classes of instructions: (1) 5041 // instructions marked uniform-after-vectorization and (2) bitcast, 5042 // getelementptr and (pointer) phi instructions used by memory accesses 5043 // requiring a scalar use. 5044 // 5045 // (1) Add to the worklist all instructions that have been identified as 5046 // uniform-after-vectorization. 5047 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5048 5049 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5050 // memory accesses requiring a scalar use. The pointer operands of loads and 5051 // stores will be scalar as long as the memory accesses is not a gather or 5052 // scatter operation. The value operand of a store will remain scalar if the 5053 // store is scalarized. 5054 for (auto *BB : TheLoop->blocks()) 5055 for (auto &I : *BB) { 5056 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5057 evaluatePtrUse(Load, Load->getPointerOperand()); 5058 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5059 evaluatePtrUse(Store, Store->getPointerOperand()); 5060 evaluatePtrUse(Store, Store->getValueOperand()); 5061 } 5062 } 5063 for (auto *I : ScalarPtrs) 5064 if (!PossibleNonScalarPtrs.count(I)) { 5065 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5066 Worklist.insert(I); 5067 } 5068 5069 // Insert the forced scalars. 5070 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5071 // induction variable when the PHI user is scalarized. 5072 auto ForcedScalar = ForcedScalars.find(VF); 5073 if (ForcedScalar != ForcedScalars.end()) 5074 for (auto *I : ForcedScalar->second) 5075 Worklist.insert(I); 5076 5077 // Expand the worklist by looking through any bitcasts and getelementptr 5078 // instructions we've already identified as scalar. This is similar to the 5079 // expansion step in collectLoopUniforms(); however, here we're only 5080 // expanding to include additional bitcasts and getelementptr instructions. 5081 unsigned Idx = 0; 5082 while (Idx != Worklist.size()) { 5083 Instruction *Dst = Worklist[Idx++]; 5084 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5085 continue; 5086 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5087 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 5088 auto *J = cast<Instruction>(U); 5089 return !TheLoop->contains(J) || Worklist.count(J) || 5090 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5091 isScalarUse(J, Src)); 5092 })) { 5093 Worklist.insert(Src); 5094 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5095 } 5096 } 5097 5098 // An induction variable will remain scalar if all users of the induction 5099 // variable and induction variable update remain scalar. 5100 for (auto &Induction : Legal->getInductionVars()) { 5101 auto *Ind = Induction.first; 5102 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5103 5104 // If tail-folding is applied, the primary induction variable will be used 5105 // to feed a vector compare. 5106 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 5107 continue; 5108 5109 // Determine if all users of the induction variable are scalar after 5110 // vectorization. 5111 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5112 auto *I = cast<Instruction>(U); 5113 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5114 }); 5115 if (!ScalarInd) 5116 continue; 5117 5118 // Determine if all users of the induction variable update instruction are 5119 // scalar after vectorization. 5120 auto ScalarIndUpdate = 5121 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5122 auto *I = cast<Instruction>(U); 5123 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5124 }); 5125 if (!ScalarIndUpdate) 5126 continue; 5127 5128 // The induction variable and its update instruction will remain scalar. 5129 Worklist.insert(Ind); 5130 Worklist.insert(IndUpdate); 5131 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5132 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 5133 << "\n"); 5134 } 5135 5136 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5137 } 5138 5139 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, 5140 ElementCount VF) { 5141 if (!blockNeedsPredication(I->getParent())) 5142 return false; 5143 switch(I->getOpcode()) { 5144 default: 5145 break; 5146 case Instruction::Load: 5147 case Instruction::Store: { 5148 if (!Legal->isMaskRequired(I)) 5149 return false; 5150 auto *Ptr = getLoadStorePointerOperand(I); 5151 auto *Ty = getMemInstValueType(I); 5152 // We have already decided how to vectorize this instruction, get that 5153 // result. 5154 if (VF.isVector()) { 5155 InstWidening WideningDecision = getWideningDecision(I, VF); 5156 assert(WideningDecision != CM_Unknown && 5157 "Widening decision should be ready at this moment"); 5158 return WideningDecision == CM_Scalarize; 5159 } 5160 const Align Alignment = getLoadStoreAlignment(I); 5161 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 5162 isLegalMaskedGather(Ty, Alignment)) 5163 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 5164 isLegalMaskedScatter(Ty, Alignment)); 5165 } 5166 case Instruction::UDiv: 5167 case Instruction::SDiv: 5168 case Instruction::SRem: 5169 case Instruction::URem: 5170 return mayDivideByZero(*I); 5171 } 5172 return false; 5173 } 5174 5175 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5176 Instruction *I, ElementCount VF) { 5177 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5178 assert(getWideningDecision(I, VF) == CM_Unknown && 5179 "Decision should not be set yet."); 5180 auto *Group = getInterleavedAccessGroup(I); 5181 assert(Group && "Must have a group."); 5182 5183 // If the instruction's allocated size doesn't equal it's type size, it 5184 // requires padding and will be scalarized. 5185 auto &DL = I->getModule()->getDataLayout(); 5186 auto *ScalarTy = getMemInstValueType(I); 5187 if (hasIrregularType(ScalarTy, DL, VF)) 5188 return false; 5189 5190 // Check if masking is required. 5191 // A Group may need masking for one of two reasons: it resides in a block that 5192 // needs predication, or it was decided to use masking to deal with gaps. 5193 bool PredicatedAccessRequiresMasking = 5194 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 5195 bool AccessWithGapsRequiresMasking = 5196 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 5197 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 5198 return true; 5199 5200 // If masked interleaving is required, we expect that the user/target had 5201 // enabled it, because otherwise it either wouldn't have been created or 5202 // it should have been invalidated by the CostModel. 5203 assert(useMaskedInterleavedAccesses(TTI) && 5204 "Masked interleave-groups for predicated accesses are not enabled."); 5205 5206 auto *Ty = getMemInstValueType(I); 5207 const Align Alignment = getLoadStoreAlignment(I); 5208 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5209 : TTI.isLegalMaskedStore(Ty, Alignment); 5210 } 5211 5212 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5213 Instruction *I, ElementCount VF) { 5214 // Get and ensure we have a valid memory instruction. 5215 LoadInst *LI = dyn_cast<LoadInst>(I); 5216 StoreInst *SI = dyn_cast<StoreInst>(I); 5217 assert((LI || SI) && "Invalid memory instruction"); 5218 5219 auto *Ptr = getLoadStorePointerOperand(I); 5220 5221 // In order to be widened, the pointer should be consecutive, first of all. 5222 if (!Legal->isConsecutivePtr(Ptr)) 5223 return false; 5224 5225 // If the instruction is a store located in a predicated block, it will be 5226 // scalarized. 5227 if (isScalarWithPredication(I)) 5228 return false; 5229 5230 // If the instruction's allocated size doesn't equal it's type size, it 5231 // requires padding and will be scalarized. 5232 auto &DL = I->getModule()->getDataLayout(); 5233 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5234 if (hasIrregularType(ScalarTy, DL, VF)) 5235 return false; 5236 5237 return true; 5238 } 5239 5240 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5241 // We should not collect Uniforms more than once per VF. Right now, 5242 // this function is called from collectUniformsAndScalars(), which 5243 // already does this check. Collecting Uniforms for VF=1 does not make any 5244 // sense. 5245 5246 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5247 "This function should not be visited twice for the same VF"); 5248 5249 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5250 // not analyze again. Uniforms.count(VF) will return 1. 5251 Uniforms[VF].clear(); 5252 5253 // We now know that the loop is vectorizable! 5254 // Collect instructions inside the loop that will remain uniform after 5255 // vectorization. 5256 5257 // Global values, params and instructions outside of current loop are out of 5258 // scope. 5259 auto isOutOfScope = [&](Value *V) -> bool { 5260 Instruction *I = dyn_cast<Instruction>(V); 5261 return (!I || !TheLoop->contains(I)); 5262 }; 5263 5264 SetVector<Instruction *> Worklist; 5265 BasicBlock *Latch = TheLoop->getLoopLatch(); 5266 5267 // Instructions that are scalar with predication must not be considered 5268 // uniform after vectorization, because that would create an erroneous 5269 // replicating region where only a single instance out of VF should be formed. 5270 // TODO: optimize such seldom cases if found important, see PR40816. 5271 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5272 if (isOutOfScope(I)) { 5273 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5274 << *I << "\n"); 5275 return; 5276 } 5277 if (isScalarWithPredication(I, VF)) { 5278 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5279 << *I << "\n"); 5280 return; 5281 } 5282 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5283 Worklist.insert(I); 5284 }; 5285 5286 // Start with the conditional branch. If the branch condition is an 5287 // instruction contained in the loop that is only used by the branch, it is 5288 // uniform. 5289 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5290 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5291 addToWorklistIfAllowed(Cmp); 5292 5293 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5294 InstWidening WideningDecision = getWideningDecision(I, VF); 5295 assert(WideningDecision != CM_Unknown && 5296 "Widening decision should be ready at this moment"); 5297 5298 // A uniform memory op is itself uniform. We exclude uniform stores 5299 // here as they demand the last lane, not the first one. 5300 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5301 assert(WideningDecision == CM_Scalarize); 5302 return true; 5303 } 5304 5305 return (WideningDecision == CM_Widen || 5306 WideningDecision == CM_Widen_Reverse || 5307 WideningDecision == CM_Interleave); 5308 }; 5309 5310 5311 // Returns true if Ptr is the pointer operand of a memory access instruction 5312 // I, and I is known to not require scalarization. 5313 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5314 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5315 }; 5316 5317 // Holds a list of values which are known to have at least one uniform use. 5318 // Note that there may be other uses which aren't uniform. A "uniform use" 5319 // here is something which only demands lane 0 of the unrolled iterations; 5320 // it does not imply that all lanes produce the same value (e.g. this is not 5321 // the usual meaning of uniform) 5322 SmallPtrSet<Value *, 8> HasUniformUse; 5323 5324 // Scan the loop for instructions which are either a) known to have only 5325 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5326 for (auto *BB : TheLoop->blocks()) 5327 for (auto &I : *BB) { 5328 // If there's no pointer operand, there's nothing to do. 5329 auto *Ptr = getLoadStorePointerOperand(&I); 5330 if (!Ptr) 5331 continue; 5332 5333 // A uniform memory op is itself uniform. We exclude uniform stores 5334 // here as they demand the last lane, not the first one. 5335 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5336 addToWorklistIfAllowed(&I); 5337 5338 if (isUniformDecision(&I, VF)) { 5339 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5340 HasUniformUse.insert(Ptr); 5341 } 5342 } 5343 5344 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5345 // demanding) users. Since loops are assumed to be in LCSSA form, this 5346 // disallows uses outside the loop as well. 5347 for (auto *V : HasUniformUse) { 5348 if (isOutOfScope(V)) 5349 continue; 5350 auto *I = cast<Instruction>(V); 5351 auto UsersAreMemAccesses = 5352 llvm::all_of(I->users(), [&](User *U) -> bool { 5353 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5354 }); 5355 if (UsersAreMemAccesses) 5356 addToWorklistIfAllowed(I); 5357 } 5358 5359 // Expand Worklist in topological order: whenever a new instruction 5360 // is added , its users should be already inside Worklist. It ensures 5361 // a uniform instruction will only be used by uniform instructions. 5362 unsigned idx = 0; 5363 while (idx != Worklist.size()) { 5364 Instruction *I = Worklist[idx++]; 5365 5366 for (auto OV : I->operand_values()) { 5367 // isOutOfScope operands cannot be uniform instructions. 5368 if (isOutOfScope(OV)) 5369 continue; 5370 // First order recurrence Phi's should typically be considered 5371 // non-uniform. 5372 auto *OP = dyn_cast<PHINode>(OV); 5373 if (OP && Legal->isFirstOrderRecurrence(OP)) 5374 continue; 5375 // If all the users of the operand are uniform, then add the 5376 // operand into the uniform worklist. 5377 auto *OI = cast<Instruction>(OV); 5378 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5379 auto *J = cast<Instruction>(U); 5380 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5381 })) 5382 addToWorklistIfAllowed(OI); 5383 } 5384 } 5385 5386 // For an instruction to be added into Worklist above, all its users inside 5387 // the loop should also be in Worklist. However, this condition cannot be 5388 // true for phi nodes that form a cyclic dependence. We must process phi 5389 // nodes separately. An induction variable will remain uniform if all users 5390 // of the induction variable and induction variable update remain uniform. 5391 // The code below handles both pointer and non-pointer induction variables. 5392 for (auto &Induction : Legal->getInductionVars()) { 5393 auto *Ind = Induction.first; 5394 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5395 5396 // Determine if all users of the induction variable are uniform after 5397 // vectorization. 5398 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5399 auto *I = cast<Instruction>(U); 5400 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5401 isVectorizedMemAccessUse(I, Ind); 5402 }); 5403 if (!UniformInd) 5404 continue; 5405 5406 // Determine if all users of the induction variable update instruction are 5407 // uniform after vectorization. 5408 auto UniformIndUpdate = 5409 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5410 auto *I = cast<Instruction>(U); 5411 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5412 isVectorizedMemAccessUse(I, IndUpdate); 5413 }); 5414 if (!UniformIndUpdate) 5415 continue; 5416 5417 // The induction variable and its update instruction will remain uniform. 5418 addToWorklistIfAllowed(Ind); 5419 addToWorklistIfAllowed(IndUpdate); 5420 } 5421 5422 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5423 } 5424 5425 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5426 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5427 5428 if (Legal->getRuntimePointerChecking()->Need) { 5429 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5430 "runtime pointer checks needed. Enable vectorization of this " 5431 "loop with '#pragma clang loop vectorize(enable)' when " 5432 "compiling with -Os/-Oz", 5433 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5434 return true; 5435 } 5436 5437 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5438 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5439 "runtime SCEV checks needed. Enable vectorization of this " 5440 "loop with '#pragma clang loop vectorize(enable)' when " 5441 "compiling with -Os/-Oz", 5442 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5443 return true; 5444 } 5445 5446 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5447 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5448 reportVectorizationFailure("Runtime stride check for small trip count", 5449 "runtime stride == 1 checks needed. Enable vectorization of " 5450 "this loop without such check by compiling with -Os/-Oz", 5451 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5452 return true; 5453 } 5454 5455 return false; 5456 } 5457 5458 Optional<ElementCount> 5459 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5460 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5461 // TODO: It may by useful to do since it's still likely to be dynamically 5462 // uniform if the target can skip. 5463 reportVectorizationFailure( 5464 "Not inserting runtime ptr check for divergent target", 5465 "runtime pointer checks needed. Not enabled for divergent target", 5466 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5467 return None; 5468 } 5469 5470 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5471 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5472 if (TC == 1) { 5473 reportVectorizationFailure("Single iteration (non) loop", 5474 "loop trip count is one, irrelevant for vectorization", 5475 "SingleIterationLoop", ORE, TheLoop); 5476 return None; 5477 } 5478 5479 ElementCount MaxVF = computeFeasibleMaxVF(TC, UserVF); 5480 5481 switch (ScalarEpilogueStatus) { 5482 case CM_ScalarEpilogueAllowed: 5483 return MaxVF; 5484 case CM_ScalarEpilogueNotAllowedUsePredicate: 5485 LLVM_FALLTHROUGH; 5486 case CM_ScalarEpilogueNotNeededUsePredicate: 5487 LLVM_DEBUG( 5488 dbgs() << "LV: vector predicate hint/switch found.\n" 5489 << "LV: Not allowing scalar epilogue, creating predicated " 5490 << "vector loop.\n"); 5491 break; 5492 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5493 // fallthrough as a special case of OptForSize 5494 case CM_ScalarEpilogueNotAllowedOptSize: 5495 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5496 LLVM_DEBUG( 5497 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5498 else 5499 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5500 << "count.\n"); 5501 5502 // Bail if runtime checks are required, which are not good when optimising 5503 // for size. 5504 if (runtimeChecksRequired()) 5505 return None; 5506 5507 break; 5508 } 5509 5510 // The only loops we can vectorize without a scalar epilogue, are loops with 5511 // a bottom-test and a single exiting block. We'd have to handle the fact 5512 // that not every instruction executes on the last iteration. This will 5513 // require a lane mask which varies through the vector loop body. (TODO) 5514 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5515 // If there was a tail-folding hint/switch, but we can't fold the tail by 5516 // masking, fallback to a vectorization with a scalar epilogue. 5517 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5518 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5519 "scalar epilogue instead.\n"); 5520 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5521 return MaxVF; 5522 } 5523 return None; 5524 } 5525 5526 // Now try the tail folding 5527 5528 // Invalidate interleave groups that require an epilogue if we can't mask 5529 // the interleave-group. 5530 if (!useMaskedInterleavedAccesses(TTI)) { 5531 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5532 "No decisions should have been taken at this point"); 5533 // Note: There is no need to invalidate any cost modeling decisions here, as 5534 // non where taken so far. 5535 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5536 } 5537 5538 assert(!MaxVF.isScalable() && 5539 "Scalable vectors do not yet support tail folding"); 5540 assert((UserVF.isNonZero() || isPowerOf2_32(MaxVF.getFixedValue())) && 5541 "MaxVF must be a power of 2"); 5542 unsigned MaxVFtimesIC = 5543 UserIC ? MaxVF.getFixedValue() * UserIC : MaxVF.getFixedValue(); 5544 // Avoid tail folding if the trip count is known to be a multiple of any VF we 5545 // chose. 5546 ScalarEvolution *SE = PSE.getSE(); 5547 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5548 const SCEV *ExitCount = SE->getAddExpr( 5549 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5550 const SCEV *Rem = SE->getURemExpr( 5551 ExitCount, SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5552 if (Rem->isZero()) { 5553 // Accept MaxVF if we do not have a tail. 5554 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5555 return MaxVF; 5556 } 5557 5558 // If we don't know the precise trip count, or if the trip count that we 5559 // found modulo the vectorization factor is not zero, try to fold the tail 5560 // by masking. 5561 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5562 if (Legal->prepareToFoldTailByMasking()) { 5563 FoldTailByMasking = true; 5564 return MaxVF; 5565 } 5566 5567 // If there was a tail-folding hint/switch, but we can't fold the tail by 5568 // masking, fallback to a vectorization with a scalar epilogue. 5569 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5570 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5571 "scalar epilogue instead.\n"); 5572 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5573 return MaxVF; 5574 } 5575 5576 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5577 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5578 return None; 5579 } 5580 5581 if (TC == 0) { 5582 reportVectorizationFailure( 5583 "Unable to calculate the loop count due to complex control flow", 5584 "unable to calculate the loop count due to complex control flow", 5585 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5586 return None; 5587 } 5588 5589 reportVectorizationFailure( 5590 "Cannot optimize for size and vectorize at the same time.", 5591 "cannot optimize for size and vectorize at the same time. " 5592 "Enable vectorization of this loop with '#pragma clang loop " 5593 "vectorize(enable)' when compiling with -Os/-Oz", 5594 "NoTailLoopWithOptForSize", ORE, TheLoop); 5595 return None; 5596 } 5597 5598 ElementCount 5599 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5600 ElementCount UserVF) { 5601 bool IgnoreScalableUserVF = UserVF.isScalable() && 5602 !TTI.supportsScalableVectors() && 5603 !ForceTargetSupportsScalableVectors; 5604 if (IgnoreScalableUserVF) { 5605 LLVM_DEBUG( 5606 dbgs() << "LV: Ignoring VF=" << UserVF 5607 << " because target does not support scalable vectors.\n"); 5608 ORE->emit([&]() { 5609 return OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreScalableUserVF", 5610 TheLoop->getStartLoc(), 5611 TheLoop->getHeader()) 5612 << "Ignoring VF=" << ore::NV("UserVF", UserVF) 5613 << " because target does not support scalable vectors."; 5614 }); 5615 } 5616 5617 // Beyond this point two scenarios are handled. If UserVF isn't specified 5618 // then a suitable VF is chosen. If UserVF is specified and there are 5619 // dependencies, check if it's legal. However, if a UserVF is specified and 5620 // there are no dependencies, then there's nothing to do. 5621 if (UserVF.isNonZero() && !IgnoreScalableUserVF && 5622 Legal->isSafeForAnyVectorWidth()) 5623 return UserVF; 5624 5625 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5626 unsigned SmallestType, WidestType; 5627 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5628 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5629 5630 // Get the maximum safe dependence distance in bits computed by LAA. 5631 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5632 // the memory accesses that is most restrictive (involved in the smallest 5633 // dependence distance). 5634 unsigned MaxSafeVectorWidthInBits = Legal->getMaxSafeVectorWidthInBits(); 5635 5636 // If the user vectorization factor is legally unsafe, clamp it to a safe 5637 // value. Otherwise, return as is. 5638 if (UserVF.isNonZero() && !IgnoreScalableUserVF) { 5639 unsigned MaxSafeElements = 5640 PowerOf2Floor(MaxSafeVectorWidthInBits / WidestType); 5641 ElementCount MaxSafeVF = ElementCount::getFixed(MaxSafeElements); 5642 5643 if (UserVF.isScalable()) { 5644 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5645 5646 // Scale VF by vscale before checking if it's safe. 5647 MaxSafeVF = ElementCount::getScalable( 5648 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5649 5650 if (MaxSafeVF.isZero()) { 5651 // The dependence distance is too small to use scalable vectors, 5652 // fallback on fixed. 5653 LLVM_DEBUG( 5654 dbgs() 5655 << "LV: Max legal vector width too small, scalable vectorization " 5656 "unfeasible. Using fixed-width vectorization instead.\n"); 5657 ORE->emit([&]() { 5658 return OptimizationRemarkAnalysis(DEBUG_TYPE, "ScalableVFUnfeasible", 5659 TheLoop->getStartLoc(), 5660 TheLoop->getHeader()) 5661 << "Max legal vector width too small, scalable vectorization " 5662 << "unfeasible. Using fixed-width vectorization instead."; 5663 }); 5664 return computeFeasibleMaxVF( 5665 ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue())); 5666 } 5667 } 5668 5669 LLVM_DEBUG(dbgs() << "LV: The max safe VF is: " << MaxSafeVF << ".\n"); 5670 5671 if (ElementCount::isKnownLE(UserVF, MaxSafeVF)) 5672 return UserVF; 5673 5674 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5675 << " is unsafe, clamping to max safe VF=" << MaxSafeVF 5676 << ".\n"); 5677 ORE->emit([&]() { 5678 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5679 TheLoop->getStartLoc(), 5680 TheLoop->getHeader()) 5681 << "User-specified vectorization factor " 5682 << ore::NV("UserVectorizationFactor", UserVF) 5683 << " is unsafe, clamping to maximum safe vectorization factor " 5684 << ore::NV("VectorizationFactor", MaxSafeVF); 5685 }); 5686 return MaxSafeVF; 5687 } 5688 5689 WidestRegister = std::min(WidestRegister, MaxSafeVectorWidthInBits); 5690 5691 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5692 // Note that both WidestRegister and WidestType may not be a powers of 2. 5693 unsigned MaxVectorSize = PowerOf2Floor(WidestRegister / WidestType); 5694 5695 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5696 << " / " << WidestType << " bits.\n"); 5697 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5698 << WidestRegister << " bits.\n"); 5699 5700 assert(MaxVectorSize <= WidestRegister && 5701 "Did not expect to pack so many elements" 5702 " into one vector!"); 5703 if (MaxVectorSize == 0) { 5704 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5705 MaxVectorSize = 1; 5706 return ElementCount::getFixed(MaxVectorSize); 5707 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 5708 isPowerOf2_32(ConstTripCount)) { 5709 // We need to clamp the VF to be the ConstTripCount. There is no point in 5710 // choosing a higher viable VF as done in the loop below. 5711 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5712 << ConstTripCount << "\n"); 5713 MaxVectorSize = ConstTripCount; 5714 return ElementCount::getFixed(MaxVectorSize); 5715 } 5716 5717 unsigned MaxVF = MaxVectorSize; 5718 if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) || 5719 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5720 // Collect all viable vectorization factors larger than the default MaxVF 5721 // (i.e. MaxVectorSize). 5722 SmallVector<ElementCount, 8> VFs; 5723 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5724 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 5725 VFs.push_back(ElementCount::getFixed(VS)); 5726 5727 // For each VF calculate its register usage. 5728 auto RUs = calculateRegisterUsage(VFs); 5729 5730 // Select the largest VF which doesn't require more registers than existing 5731 // ones. 5732 for (int i = RUs.size() - 1; i >= 0; --i) { 5733 bool Selected = true; 5734 for (auto& pair : RUs[i].MaxLocalUsers) { 5735 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5736 if (pair.second > TargetNumRegisters) 5737 Selected = false; 5738 } 5739 if (Selected) { 5740 MaxVF = VFs[i].getKnownMinValue(); 5741 break; 5742 } 5743 } 5744 if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) { 5745 if (MaxVF < MinVF) { 5746 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5747 << ") with target's minimum: " << MinVF << '\n'); 5748 MaxVF = MinVF; 5749 } 5750 } 5751 } 5752 return ElementCount::getFixed(MaxVF); 5753 } 5754 5755 VectorizationFactor 5756 LoopVectorizationCostModel::selectVectorizationFactor(ElementCount MaxVF) { 5757 // FIXME: This can be fixed for scalable vectors later, because at this stage 5758 // the LoopVectorizer will only consider vectorizing a loop with scalable 5759 // vectors when the loop has a hint to enable vectorization for a given VF. 5760 assert(!MaxVF.isScalable() && "scalable vectors not yet supported"); 5761 5762 float Cost = expectedCost(ElementCount::getFixed(1)).first; 5763 const float ScalarCost = Cost; 5764 unsigned Width = 1; 5765 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 5766 5767 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5768 if (ForceVectorization && MaxVF.isVector()) { 5769 // Ignore scalar width, because the user explicitly wants vectorization. 5770 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5771 // evaluation. 5772 Cost = std::numeric_limits<float>::max(); 5773 } 5774 5775 for (unsigned i = 2; i <= MaxVF.getFixedValue(); i *= 2) { 5776 // Notice that the vector loop needs to be executed less times, so 5777 // we need to divide the cost of the vector loops by the width of 5778 // the vector elements. 5779 VectorizationCostTy C = expectedCost(ElementCount::getFixed(i)); 5780 float VectorCost = C.first / (float)i; 5781 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5782 << " costs: " << (int)VectorCost << ".\n"); 5783 if (!C.second && !ForceVectorization) { 5784 LLVM_DEBUG( 5785 dbgs() << "LV: Not considering vector loop of width " << i 5786 << " because it will not generate any vector instructions.\n"); 5787 continue; 5788 } 5789 5790 // If profitable add it to ProfitableVF list. 5791 if (VectorCost < ScalarCost) { 5792 ProfitableVFs.push_back(VectorizationFactor( 5793 {ElementCount::getFixed(i), (unsigned)VectorCost})); 5794 } 5795 5796 if (VectorCost < Cost) { 5797 Cost = VectorCost; 5798 Width = i; 5799 } 5800 } 5801 5802 if (!EnableCondStoresVectorization && NumPredStores) { 5803 reportVectorizationFailure("There are conditional stores.", 5804 "store that is conditionally executed prevents vectorization", 5805 "ConditionalStore", ORE, TheLoop); 5806 Width = 1; 5807 Cost = ScalarCost; 5808 } 5809 5810 LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5811 << "LV: Vectorization seems to be not beneficial, " 5812 << "but was forced by a user.\n"); 5813 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5814 VectorizationFactor Factor = {ElementCount::getFixed(Width), 5815 (unsigned)(Width * Cost)}; 5816 return Factor; 5817 } 5818 5819 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5820 const Loop &L, ElementCount VF) const { 5821 // Cross iteration phis such as reductions need special handling and are 5822 // currently unsupported. 5823 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 5824 return Legal->isFirstOrderRecurrence(&Phi) || 5825 Legal->isReductionVariable(&Phi); 5826 })) 5827 return false; 5828 5829 // Phis with uses outside of the loop require special handling and are 5830 // currently unsupported. 5831 for (auto &Entry : Legal->getInductionVars()) { 5832 // Look for uses of the value of the induction at the last iteration. 5833 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5834 for (User *U : PostInc->users()) 5835 if (!L.contains(cast<Instruction>(U))) 5836 return false; 5837 // Look for uses of penultimate value of the induction. 5838 for (User *U : Entry.first->users()) 5839 if (!L.contains(cast<Instruction>(U))) 5840 return false; 5841 } 5842 5843 // Induction variables that are widened require special handling that is 5844 // currently not supported. 5845 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5846 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5847 this->isProfitableToScalarize(Entry.first, VF)); 5848 })) 5849 return false; 5850 5851 return true; 5852 } 5853 5854 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5855 const ElementCount VF) const { 5856 // FIXME: We need a much better cost-model to take different parameters such 5857 // as register pressure, code size increase and cost of extra branches into 5858 // account. For now we apply a very crude heuristic and only consider loops 5859 // with vectorization factors larger than a certain value. 5860 // We also consider epilogue vectorization unprofitable for targets that don't 5861 // consider interleaving beneficial (eg. MVE). 5862 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5863 return false; 5864 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 5865 return true; 5866 return false; 5867 } 5868 5869 VectorizationFactor 5870 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5871 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5872 VectorizationFactor Result = VectorizationFactor::Disabled(); 5873 if (!EnableEpilogueVectorization) { 5874 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5875 return Result; 5876 } 5877 5878 if (!isScalarEpilogueAllowed()) { 5879 LLVM_DEBUG( 5880 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5881 "allowed.\n";); 5882 return Result; 5883 } 5884 5885 // FIXME: This can be fixed for scalable vectors later, because at this stage 5886 // the LoopVectorizer will only consider vectorizing a loop with scalable 5887 // vectors when the loop has a hint to enable vectorization for a given VF. 5888 if (MainLoopVF.isScalable()) { 5889 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not " 5890 "yet supported.\n"); 5891 return Result; 5892 } 5893 5894 // Not really a cost consideration, but check for unsupported cases here to 5895 // simplify the logic. 5896 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5897 LLVM_DEBUG( 5898 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5899 "not a supported candidate.\n";); 5900 return Result; 5901 } 5902 5903 if (EpilogueVectorizationForceVF > 1) { 5904 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5905 if (LVP.hasPlanWithVFs( 5906 {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)})) 5907 return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0}; 5908 else { 5909 LLVM_DEBUG( 5910 dbgs() 5911 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5912 return Result; 5913 } 5914 } 5915 5916 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5917 TheLoop->getHeader()->getParent()->hasMinSize()) { 5918 LLVM_DEBUG( 5919 dbgs() 5920 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5921 return Result; 5922 } 5923 5924 if (!isEpilogueVectorizationProfitable(MainLoopVF)) 5925 return Result; 5926 5927 for (auto &NextVF : ProfitableVFs) 5928 if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) && 5929 (Result.Width.getFixedValue() == 1 || NextVF.Cost < Result.Cost) && 5930 LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width})) 5931 Result = NextVF; 5932 5933 if (Result != VectorizationFactor::Disabled()) 5934 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5935 << Result.Width.getFixedValue() << "\n";); 5936 return Result; 5937 } 5938 5939 std::pair<unsigned, unsigned> 5940 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5941 unsigned MinWidth = -1U; 5942 unsigned MaxWidth = 8; 5943 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5944 5945 // For each block. 5946 for (BasicBlock *BB : TheLoop->blocks()) { 5947 // For each instruction in the loop. 5948 for (Instruction &I : BB->instructionsWithoutDebug()) { 5949 Type *T = I.getType(); 5950 5951 // Skip ignored values. 5952 if (ValuesToIgnore.count(&I)) 5953 continue; 5954 5955 // Only examine Loads, Stores and PHINodes. 5956 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5957 continue; 5958 5959 // Examine PHI nodes that are reduction variables. Update the type to 5960 // account for the recurrence type. 5961 if (auto *PN = dyn_cast<PHINode>(&I)) { 5962 if (!Legal->isReductionVariable(PN)) 5963 continue; 5964 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN]; 5965 T = RdxDesc.getRecurrenceType(); 5966 } 5967 5968 // Examine the stored values. 5969 if (auto *ST = dyn_cast<StoreInst>(&I)) 5970 T = ST->getValueOperand()->getType(); 5971 5972 // Ignore loaded pointer types and stored pointer types that are not 5973 // vectorizable. 5974 // 5975 // FIXME: The check here attempts to predict whether a load or store will 5976 // be vectorized. We only know this for certain after a VF has 5977 // been selected. Here, we assume that if an access can be 5978 // vectorized, it will be. We should also look at extending this 5979 // optimization to non-pointer types. 5980 // 5981 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 5982 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 5983 continue; 5984 5985 MinWidth = std::min(MinWidth, 5986 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5987 MaxWidth = std::max(MaxWidth, 5988 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5989 } 5990 } 5991 5992 return {MinWidth, MaxWidth}; 5993 } 5994 5995 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 5996 unsigned LoopCost) { 5997 // -- The interleave heuristics -- 5998 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5999 // There are many micro-architectural considerations that we can't predict 6000 // at this level. For example, frontend pressure (on decode or fetch) due to 6001 // code size, or the number and capabilities of the execution ports. 6002 // 6003 // We use the following heuristics to select the interleave count: 6004 // 1. If the code has reductions, then we interleave to break the cross 6005 // iteration dependency. 6006 // 2. If the loop is really small, then we interleave to reduce the loop 6007 // overhead. 6008 // 3. We don't interleave if we think that we will spill registers to memory 6009 // due to the increased register pressure. 6010 6011 if (!isScalarEpilogueAllowed()) 6012 return 1; 6013 6014 // We used the distance for the interleave count. 6015 if (Legal->getMaxSafeDepDistBytes() != -1U) 6016 return 1; 6017 6018 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6019 const bool HasReductions = !Legal->getReductionVars().empty(); 6020 // Do not interleave loops with a relatively small known or estimated trip 6021 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6022 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6023 // because with the above conditions interleaving can expose ILP and break 6024 // cross iteration dependences for reductions. 6025 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6026 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6027 return 1; 6028 6029 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6030 // We divide by these constants so assume that we have at least one 6031 // instruction that uses at least one register. 6032 for (auto& pair : R.MaxLocalUsers) { 6033 pair.second = std::max(pair.second, 1U); 6034 } 6035 6036 // We calculate the interleave count using the following formula. 6037 // Subtract the number of loop invariants from the number of available 6038 // registers. These registers are used by all of the interleaved instances. 6039 // Next, divide the remaining registers by the number of registers that is 6040 // required by the loop, in order to estimate how many parallel instances 6041 // fit without causing spills. All of this is rounded down if necessary to be 6042 // a power of two. We want power of two interleave count to simplify any 6043 // addressing operations or alignment considerations. 6044 // We also want power of two interleave counts to ensure that the induction 6045 // variable of the vector loop wraps to zero, when tail is folded by masking; 6046 // this currently happens when OptForSize, in which case IC is set to 1 above. 6047 unsigned IC = UINT_MAX; 6048 6049 for (auto& pair : R.MaxLocalUsers) { 6050 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6051 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6052 << " registers of " 6053 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6054 if (VF.isScalar()) { 6055 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6056 TargetNumRegisters = ForceTargetNumScalarRegs; 6057 } else { 6058 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6059 TargetNumRegisters = ForceTargetNumVectorRegs; 6060 } 6061 unsigned MaxLocalUsers = pair.second; 6062 unsigned LoopInvariantRegs = 0; 6063 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6064 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6065 6066 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6067 // Don't count the induction variable as interleaved. 6068 if (EnableIndVarRegisterHeur) { 6069 TmpIC = 6070 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6071 std::max(1U, (MaxLocalUsers - 1))); 6072 } 6073 6074 IC = std::min(IC, TmpIC); 6075 } 6076 6077 // Clamp the interleave ranges to reasonable counts. 6078 unsigned MaxInterleaveCount = 6079 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6080 6081 // Check if the user has overridden the max. 6082 if (VF.isScalar()) { 6083 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6084 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6085 } else { 6086 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6087 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6088 } 6089 6090 // If trip count is known or estimated compile time constant, limit the 6091 // interleave count to be less than the trip count divided by VF, provided it 6092 // is at least 1. 6093 // 6094 // For scalable vectors we can't know if interleaving is beneficial. It may 6095 // not be beneficial for small loops if none of the lanes in the second vector 6096 // iterations is enabled. However, for larger loops, there is likely to be a 6097 // similar benefit as for fixed-width vectors. For now, we choose to leave 6098 // the InterleaveCount as if vscale is '1', although if some information about 6099 // the vector is known (e.g. min vector size), we can make a better decision. 6100 if (BestKnownTC) { 6101 MaxInterleaveCount = 6102 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6103 // Make sure MaxInterleaveCount is greater than 0. 6104 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6105 } 6106 6107 assert(MaxInterleaveCount > 0 && 6108 "Maximum interleave count must be greater than 0"); 6109 6110 // Clamp the calculated IC to be between the 1 and the max interleave count 6111 // that the target and trip count allows. 6112 if (IC > MaxInterleaveCount) 6113 IC = MaxInterleaveCount; 6114 else 6115 // Make sure IC is greater than 0. 6116 IC = std::max(1u, IC); 6117 6118 assert(IC > 0 && "Interleave count must be greater than 0."); 6119 6120 // If we did not calculate the cost for VF (because the user selected the VF) 6121 // then we calculate the cost of VF here. 6122 if (LoopCost == 0) 6123 LoopCost = expectedCost(VF).first; 6124 6125 assert(LoopCost && "Non-zero loop cost expected"); 6126 6127 // Interleave if we vectorized this loop and there is a reduction that could 6128 // benefit from interleaving. 6129 if (VF.isVector() && HasReductions) { 6130 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6131 return IC; 6132 } 6133 6134 // Note that if we've already vectorized the loop we will have done the 6135 // runtime check and so interleaving won't require further checks. 6136 bool InterleavingRequiresRuntimePointerCheck = 6137 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6138 6139 // We want to interleave small loops in order to reduce the loop overhead and 6140 // potentially expose ILP opportunities. 6141 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6142 << "LV: IC is " << IC << '\n' 6143 << "LV: VF is " << VF << '\n'); 6144 const bool AggressivelyInterleaveReductions = 6145 TTI.enableAggressiveInterleaving(HasReductions); 6146 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6147 // We assume that the cost overhead is 1 and we use the cost model 6148 // to estimate the cost of the loop and interleave until the cost of the 6149 // loop overhead is about 5% of the cost of the loop. 6150 unsigned SmallIC = 6151 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6152 6153 // Interleave until store/load ports (estimated by max interleave count) are 6154 // saturated. 6155 unsigned NumStores = Legal->getNumStores(); 6156 unsigned NumLoads = Legal->getNumLoads(); 6157 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6158 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6159 6160 // If we have a scalar reduction (vector reductions are already dealt with 6161 // by this point), we can increase the critical path length if the loop 6162 // we're interleaving is inside another loop. Limit, by default to 2, so the 6163 // critical path only gets increased by one reduction operation. 6164 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6165 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6166 SmallIC = std::min(SmallIC, F); 6167 StoresIC = std::min(StoresIC, F); 6168 LoadsIC = std::min(LoadsIC, F); 6169 } 6170 6171 if (EnableLoadStoreRuntimeInterleave && 6172 std::max(StoresIC, LoadsIC) > SmallIC) { 6173 LLVM_DEBUG( 6174 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6175 return std::max(StoresIC, LoadsIC); 6176 } 6177 6178 // If there are scalar reductions and TTI has enabled aggressive 6179 // interleaving for reductions, we will interleave to expose ILP. 6180 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6181 AggressivelyInterleaveReductions) { 6182 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6183 // Interleave no less than SmallIC but not as aggressive as the normal IC 6184 // to satisfy the rare situation when resources are too limited. 6185 return std::max(IC / 2, SmallIC); 6186 } else { 6187 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6188 return SmallIC; 6189 } 6190 } 6191 6192 // Interleave if this is a large loop (small loops are already dealt with by 6193 // this point) that could benefit from interleaving. 6194 if (AggressivelyInterleaveReductions) { 6195 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6196 return IC; 6197 } 6198 6199 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6200 return 1; 6201 } 6202 6203 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6204 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6205 // This function calculates the register usage by measuring the highest number 6206 // of values that are alive at a single location. Obviously, this is a very 6207 // rough estimation. We scan the loop in a topological order in order and 6208 // assign a number to each instruction. We use RPO to ensure that defs are 6209 // met before their users. We assume that each instruction that has in-loop 6210 // users starts an interval. We record every time that an in-loop value is 6211 // used, so we have a list of the first and last occurrences of each 6212 // instruction. Next, we transpose this data structure into a multi map that 6213 // holds the list of intervals that *end* at a specific location. This multi 6214 // map allows us to perform a linear search. We scan the instructions linearly 6215 // and record each time that a new interval starts, by placing it in a set. 6216 // If we find this value in the multi-map then we remove it from the set. 6217 // The max register usage is the maximum size of the set. 6218 // We also search for instructions that are defined outside the loop, but are 6219 // used inside the loop. We need this number separately from the max-interval 6220 // usage number because when we unroll, loop-invariant values do not take 6221 // more register. 6222 LoopBlocksDFS DFS(TheLoop); 6223 DFS.perform(LI); 6224 6225 RegisterUsage RU; 6226 6227 // Each 'key' in the map opens a new interval. The values 6228 // of the map are the index of the 'last seen' usage of the 6229 // instruction that is the key. 6230 using IntervalMap = DenseMap<Instruction *, unsigned>; 6231 6232 // Maps instruction to its index. 6233 SmallVector<Instruction *, 64> IdxToInstr; 6234 // Marks the end of each interval. 6235 IntervalMap EndPoint; 6236 // Saves the list of instruction indices that are used in the loop. 6237 SmallPtrSet<Instruction *, 8> Ends; 6238 // Saves the list of values that are used in the loop but are 6239 // defined outside the loop, such as arguments and constants. 6240 SmallPtrSet<Value *, 8> LoopInvariants; 6241 6242 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6243 for (Instruction &I : BB->instructionsWithoutDebug()) { 6244 IdxToInstr.push_back(&I); 6245 6246 // Save the end location of each USE. 6247 for (Value *U : I.operands()) { 6248 auto *Instr = dyn_cast<Instruction>(U); 6249 6250 // Ignore non-instruction values such as arguments, constants, etc. 6251 if (!Instr) 6252 continue; 6253 6254 // If this instruction is outside the loop then record it and continue. 6255 if (!TheLoop->contains(Instr)) { 6256 LoopInvariants.insert(Instr); 6257 continue; 6258 } 6259 6260 // Overwrite previous end points. 6261 EndPoint[Instr] = IdxToInstr.size(); 6262 Ends.insert(Instr); 6263 } 6264 } 6265 } 6266 6267 // Saves the list of intervals that end with the index in 'key'. 6268 using InstrList = SmallVector<Instruction *, 2>; 6269 DenseMap<unsigned, InstrList> TransposeEnds; 6270 6271 // Transpose the EndPoints to a list of values that end at each index. 6272 for (auto &Interval : EndPoint) 6273 TransposeEnds[Interval.second].push_back(Interval.first); 6274 6275 SmallPtrSet<Instruction *, 8> OpenIntervals; 6276 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6277 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6278 6279 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6280 6281 // A lambda that gets the register usage for the given type and VF. 6282 const auto &TTICapture = TTI; 6283 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) { 6284 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6285 return 0U; 6286 return TTICapture.getRegUsageForType(VectorType::get(Ty, VF)); 6287 }; 6288 6289 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6290 Instruction *I = IdxToInstr[i]; 6291 6292 // Remove all of the instructions that end at this location. 6293 InstrList &List = TransposeEnds[i]; 6294 for (Instruction *ToRemove : List) 6295 OpenIntervals.erase(ToRemove); 6296 6297 // Ignore instructions that are never used within the loop. 6298 if (!Ends.count(I)) 6299 continue; 6300 6301 // Skip ignored values. 6302 if (ValuesToIgnore.count(I)) 6303 continue; 6304 6305 // For each VF find the maximum usage of registers. 6306 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6307 // Count the number of live intervals. 6308 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6309 6310 if (VFs[j].isScalar()) { 6311 for (auto Inst : OpenIntervals) { 6312 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6313 if (RegUsage.find(ClassID) == RegUsage.end()) 6314 RegUsage[ClassID] = 1; 6315 else 6316 RegUsage[ClassID] += 1; 6317 } 6318 } else { 6319 collectUniformsAndScalars(VFs[j]); 6320 for (auto Inst : OpenIntervals) { 6321 // Skip ignored values for VF > 1. 6322 if (VecValuesToIgnore.count(Inst)) 6323 continue; 6324 if (isScalarAfterVectorization(Inst, VFs[j])) { 6325 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6326 if (RegUsage.find(ClassID) == RegUsage.end()) 6327 RegUsage[ClassID] = 1; 6328 else 6329 RegUsage[ClassID] += 1; 6330 } else { 6331 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6332 if (RegUsage.find(ClassID) == RegUsage.end()) 6333 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6334 else 6335 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6336 } 6337 } 6338 } 6339 6340 for (auto& pair : RegUsage) { 6341 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6342 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6343 else 6344 MaxUsages[j][pair.first] = pair.second; 6345 } 6346 } 6347 6348 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6349 << OpenIntervals.size() << '\n'); 6350 6351 // Add the current instruction to the list of open intervals. 6352 OpenIntervals.insert(I); 6353 } 6354 6355 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6356 SmallMapVector<unsigned, unsigned, 4> Invariant; 6357 6358 for (auto Inst : LoopInvariants) { 6359 unsigned Usage = 6360 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6361 unsigned ClassID = 6362 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6363 if (Invariant.find(ClassID) == Invariant.end()) 6364 Invariant[ClassID] = Usage; 6365 else 6366 Invariant[ClassID] += Usage; 6367 } 6368 6369 LLVM_DEBUG({ 6370 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6371 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6372 << " item\n"; 6373 for (const auto &pair : MaxUsages[i]) { 6374 dbgs() << "LV(REG): RegisterClass: " 6375 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6376 << " registers\n"; 6377 } 6378 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6379 << " item\n"; 6380 for (const auto &pair : Invariant) { 6381 dbgs() << "LV(REG): RegisterClass: " 6382 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6383 << " registers\n"; 6384 } 6385 }); 6386 6387 RU.LoopInvariantRegs = Invariant; 6388 RU.MaxLocalUsers = MaxUsages[i]; 6389 RUs[i] = RU; 6390 } 6391 6392 return RUs; 6393 } 6394 6395 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6396 // TODO: Cost model for emulated masked load/store is completely 6397 // broken. This hack guides the cost model to use an artificially 6398 // high enough value to practically disable vectorization with such 6399 // operations, except where previously deployed legality hack allowed 6400 // using very low cost values. This is to avoid regressions coming simply 6401 // from moving "masked load/store" check from legality to cost model. 6402 // Masked Load/Gather emulation was previously never allowed. 6403 // Limited number of Masked Store/Scatter emulation was allowed. 6404 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 6405 return isa<LoadInst>(I) || 6406 (isa<StoreInst>(I) && 6407 NumPredStores > NumberOfStoresToPredicate); 6408 } 6409 6410 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6411 // If we aren't vectorizing the loop, or if we've already collected the 6412 // instructions to scalarize, there's nothing to do. Collection may already 6413 // have occurred if we have a user-selected VF and are now computing the 6414 // expected cost for interleaving. 6415 if (VF.isScalar() || VF.isZero() || 6416 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6417 return; 6418 6419 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6420 // not profitable to scalarize any instructions, the presence of VF in the 6421 // map will indicate that we've analyzed it already. 6422 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6423 6424 // Find all the instructions that are scalar with predication in the loop and 6425 // determine if it would be better to not if-convert the blocks they are in. 6426 // If so, we also record the instructions to scalarize. 6427 for (BasicBlock *BB : TheLoop->blocks()) { 6428 if (!blockNeedsPredication(BB)) 6429 continue; 6430 for (Instruction &I : *BB) 6431 if (isScalarWithPredication(&I)) { 6432 ScalarCostsTy ScalarCosts; 6433 // Do not apply discount logic if hacked cost is needed 6434 // for emulated masked memrefs. 6435 if (!useEmulatedMaskMemRefHack(&I) && 6436 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6437 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6438 // Remember that BB will remain after vectorization. 6439 PredicatedBBsAfterVectorization.insert(BB); 6440 } 6441 } 6442 } 6443 6444 int LoopVectorizationCostModel::computePredInstDiscount( 6445 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 6446 ElementCount VF) { 6447 assert(!isUniformAfterVectorization(PredInst, VF) && 6448 "Instruction marked uniform-after-vectorization will be predicated"); 6449 6450 // Initialize the discount to zero, meaning that the scalar version and the 6451 // vector version cost the same. 6452 int Discount = 0; 6453 6454 // Holds instructions to analyze. The instructions we visit are mapped in 6455 // ScalarCosts. Those instructions are the ones that would be scalarized if 6456 // we find that the scalar version costs less. 6457 SmallVector<Instruction *, 8> Worklist; 6458 6459 // Returns true if the given instruction can be scalarized. 6460 auto canBeScalarized = [&](Instruction *I) -> bool { 6461 // We only attempt to scalarize instructions forming a single-use chain 6462 // from the original predicated block that would otherwise be vectorized. 6463 // Although not strictly necessary, we give up on instructions we know will 6464 // already be scalar to avoid traversing chains that are unlikely to be 6465 // beneficial. 6466 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6467 isScalarAfterVectorization(I, VF)) 6468 return false; 6469 6470 // If the instruction is scalar with predication, it will be analyzed 6471 // separately. We ignore it within the context of PredInst. 6472 if (isScalarWithPredication(I)) 6473 return false; 6474 6475 // If any of the instruction's operands are uniform after vectorization, 6476 // the instruction cannot be scalarized. This prevents, for example, a 6477 // masked load from being scalarized. 6478 // 6479 // We assume we will only emit a value for lane zero of an instruction 6480 // marked uniform after vectorization, rather than VF identical values. 6481 // Thus, if we scalarize an instruction that uses a uniform, we would 6482 // create uses of values corresponding to the lanes we aren't emitting code 6483 // for. This behavior can be changed by allowing getScalarValue to clone 6484 // the lane zero values for uniforms rather than asserting. 6485 for (Use &U : I->operands()) 6486 if (auto *J = dyn_cast<Instruction>(U.get())) 6487 if (isUniformAfterVectorization(J, VF)) 6488 return false; 6489 6490 // Otherwise, we can scalarize the instruction. 6491 return true; 6492 }; 6493 6494 // Compute the expected cost discount from scalarizing the entire expression 6495 // feeding the predicated instruction. We currently only consider expressions 6496 // that are single-use instruction chains. 6497 Worklist.push_back(PredInst); 6498 while (!Worklist.empty()) { 6499 Instruction *I = Worklist.pop_back_val(); 6500 6501 // If we've already analyzed the instruction, there's nothing to do. 6502 if (ScalarCosts.find(I) != ScalarCosts.end()) 6503 continue; 6504 6505 // Compute the cost of the vector instruction. Note that this cost already 6506 // includes the scalarization overhead of the predicated instruction. 6507 unsigned VectorCost = getInstructionCost(I, VF).first; 6508 6509 // Compute the cost of the scalarized instruction. This cost is the cost of 6510 // the instruction as if it wasn't if-converted and instead remained in the 6511 // predicated block. We will scale this cost by block probability after 6512 // computing the scalarization overhead. 6513 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6514 unsigned ScalarCost = 6515 VF.getKnownMinValue() * 6516 getInstructionCost(I, ElementCount::getFixed(1)).first; 6517 6518 // Compute the scalarization overhead of needed insertelement instructions 6519 // and phi nodes. 6520 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6521 ScalarCost += TTI.getScalarizationOverhead( 6522 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6523 APInt::getAllOnesValue(VF.getKnownMinValue()), true, false); 6524 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6525 ScalarCost += 6526 VF.getKnownMinValue() * 6527 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6528 } 6529 6530 // Compute the scalarization overhead of needed extractelement 6531 // instructions. For each of the instruction's operands, if the operand can 6532 // be scalarized, add it to the worklist; otherwise, account for the 6533 // overhead. 6534 for (Use &U : I->operands()) 6535 if (auto *J = dyn_cast<Instruction>(U.get())) { 6536 assert(VectorType::isValidElementType(J->getType()) && 6537 "Instruction has non-scalar type"); 6538 if (canBeScalarized(J)) 6539 Worklist.push_back(J); 6540 else if (needsExtract(J, VF)) { 6541 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6542 ScalarCost += TTI.getScalarizationOverhead( 6543 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6544 APInt::getAllOnesValue(VF.getKnownMinValue()), false, true); 6545 } 6546 } 6547 6548 // Scale the total scalar cost by block probability. 6549 ScalarCost /= getReciprocalPredBlockProb(); 6550 6551 // Compute the discount. A non-negative discount means the vector version 6552 // of the instruction costs more, and scalarizing would be beneficial. 6553 Discount += VectorCost - ScalarCost; 6554 ScalarCosts[I] = ScalarCost; 6555 } 6556 6557 return Discount; 6558 } 6559 6560 LoopVectorizationCostModel::VectorizationCostTy 6561 LoopVectorizationCostModel::expectedCost(ElementCount VF) { 6562 VectorizationCostTy Cost; 6563 6564 // For each block. 6565 for (BasicBlock *BB : TheLoop->blocks()) { 6566 VectorizationCostTy BlockCost; 6567 6568 // For each instruction in the old loop. 6569 for (Instruction &I : BB->instructionsWithoutDebug()) { 6570 // Skip ignored values. 6571 if (ValuesToIgnore.count(&I) || 6572 (VF.isVector() && VecValuesToIgnore.count(&I))) 6573 continue; 6574 6575 VectorizationCostTy C = getInstructionCost(&I, VF); 6576 6577 // Check if we should override the cost. 6578 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6579 C.first = ForceTargetInstructionCost; 6580 6581 BlockCost.first += C.first; 6582 BlockCost.second |= C.second; 6583 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6584 << " for VF " << VF << " For instruction: " << I 6585 << '\n'); 6586 } 6587 6588 // If we are vectorizing a predicated block, it will have been 6589 // if-converted. This means that the block's instructions (aside from 6590 // stores and instructions that may divide by zero) will now be 6591 // unconditionally executed. For the scalar case, we may not always execute 6592 // the predicated block, if it is an if-else block. Thus, scale the block's 6593 // cost by the probability of executing it. blockNeedsPredication from 6594 // Legal is used so as to not include all blocks in tail folded loops. 6595 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6596 BlockCost.first /= getReciprocalPredBlockProb(); 6597 6598 Cost.first += BlockCost.first; 6599 Cost.second |= BlockCost.second; 6600 } 6601 6602 return Cost; 6603 } 6604 6605 /// Gets Address Access SCEV after verifying that the access pattern 6606 /// is loop invariant except the induction variable dependence. 6607 /// 6608 /// This SCEV can be sent to the Target in order to estimate the address 6609 /// calculation cost. 6610 static const SCEV *getAddressAccessSCEV( 6611 Value *Ptr, 6612 LoopVectorizationLegality *Legal, 6613 PredicatedScalarEvolution &PSE, 6614 const Loop *TheLoop) { 6615 6616 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6617 if (!Gep) 6618 return nullptr; 6619 6620 // We are looking for a gep with all loop invariant indices except for one 6621 // which should be an induction variable. 6622 auto SE = PSE.getSE(); 6623 unsigned NumOperands = Gep->getNumOperands(); 6624 for (unsigned i = 1; i < NumOperands; ++i) { 6625 Value *Opd = Gep->getOperand(i); 6626 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6627 !Legal->isInductionVariable(Opd)) 6628 return nullptr; 6629 } 6630 6631 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6632 return PSE.getSCEV(Ptr); 6633 } 6634 6635 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6636 return Legal->hasStride(I->getOperand(0)) || 6637 Legal->hasStride(I->getOperand(1)); 6638 } 6639 6640 unsigned 6641 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6642 ElementCount VF) { 6643 assert(VF.isVector() && 6644 "Scalarization cost of instruction implies vectorization."); 6645 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6646 Type *ValTy = getMemInstValueType(I); 6647 auto SE = PSE.getSE(); 6648 6649 unsigned AS = getLoadStoreAddressSpace(I); 6650 Value *Ptr = getLoadStorePointerOperand(I); 6651 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6652 6653 // Figure out whether the access is strided and get the stride value 6654 // if it's known in compile time 6655 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6656 6657 // Get the cost of the scalar memory instruction and address computation. 6658 unsigned Cost = 6659 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6660 6661 // Don't pass *I here, since it is scalar but will actually be part of a 6662 // vectorized loop where the user of it is a vectorized instruction. 6663 const Align Alignment = getLoadStoreAlignment(I); 6664 Cost += VF.getKnownMinValue() * 6665 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6666 AS, TTI::TCK_RecipThroughput); 6667 6668 // Get the overhead of the extractelement and insertelement instructions 6669 // we might create due to scalarization. 6670 Cost += getScalarizationOverhead(I, VF); 6671 6672 // If we have a predicated store, it may not be executed for each vector 6673 // lane. Scale the cost by the probability of executing the predicated 6674 // block. 6675 if (isPredicatedInst(I)) { 6676 Cost /= getReciprocalPredBlockProb(); 6677 6678 if (useEmulatedMaskMemRefHack(I)) 6679 // Artificially setting to a high enough value to practically disable 6680 // vectorization with such operations. 6681 Cost = 3000000; 6682 } 6683 6684 return Cost; 6685 } 6686 6687 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6688 ElementCount VF) { 6689 Type *ValTy = getMemInstValueType(I); 6690 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6691 Value *Ptr = getLoadStorePointerOperand(I); 6692 unsigned AS = getLoadStoreAddressSpace(I); 6693 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6694 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6695 6696 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6697 "Stride should be 1 or -1 for consecutive memory access"); 6698 const Align Alignment = getLoadStoreAlignment(I); 6699 unsigned Cost = 0; 6700 if (Legal->isMaskRequired(I)) 6701 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6702 CostKind); 6703 else 6704 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6705 CostKind, I); 6706 6707 bool Reverse = ConsecutiveStride < 0; 6708 if (Reverse) 6709 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6710 return Cost; 6711 } 6712 6713 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6714 ElementCount VF) { 6715 assert(Legal->isUniformMemOp(*I)); 6716 6717 Type *ValTy = getMemInstValueType(I); 6718 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6719 const Align Alignment = getLoadStoreAlignment(I); 6720 unsigned AS = getLoadStoreAddressSpace(I); 6721 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6722 if (isa<LoadInst>(I)) { 6723 return TTI.getAddressComputationCost(ValTy) + 6724 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6725 CostKind) + 6726 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6727 } 6728 StoreInst *SI = cast<StoreInst>(I); 6729 6730 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6731 return TTI.getAddressComputationCost(ValTy) + 6732 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6733 CostKind) + 6734 (isLoopInvariantStoreValue 6735 ? 0 6736 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6737 VF.getKnownMinValue() - 1)); 6738 } 6739 6740 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6741 ElementCount VF) { 6742 Type *ValTy = getMemInstValueType(I); 6743 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6744 const Align Alignment = getLoadStoreAlignment(I); 6745 const Value *Ptr = getLoadStorePointerOperand(I); 6746 6747 return TTI.getAddressComputationCost(VectorTy) + 6748 TTI.getGatherScatterOpCost( 6749 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6750 TargetTransformInfo::TCK_RecipThroughput, I); 6751 } 6752 6753 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6754 ElementCount VF) { 6755 Type *ValTy = getMemInstValueType(I); 6756 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6757 unsigned AS = getLoadStoreAddressSpace(I); 6758 6759 auto Group = getInterleavedAccessGroup(I); 6760 assert(Group && "Fail to get an interleaved access group."); 6761 6762 unsigned InterleaveFactor = Group->getFactor(); 6763 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6764 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6765 6766 // Holds the indices of existing members in an interleaved load group. 6767 // An interleaved store group doesn't need this as it doesn't allow gaps. 6768 SmallVector<unsigned, 4> Indices; 6769 if (isa<LoadInst>(I)) { 6770 for (unsigned i = 0; i < InterleaveFactor; i++) 6771 if (Group->getMember(i)) 6772 Indices.push_back(i); 6773 } 6774 6775 // Calculate the cost of the whole interleaved group. 6776 bool UseMaskForGaps = 6777 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 6778 unsigned Cost = TTI.getInterleavedMemoryOpCost( 6779 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6780 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6781 6782 if (Group->isReverse()) { 6783 // TODO: Add support for reversed masked interleaved access. 6784 assert(!Legal->isMaskRequired(I) && 6785 "Reverse masked interleaved access not supported."); 6786 Cost += Group->getNumMembers() * 6787 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6788 } 6789 return Cost; 6790 } 6791 6792 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6793 ElementCount VF) { 6794 // Calculate scalar cost only. Vectorization cost should be ready at this 6795 // moment. 6796 if (VF.isScalar()) { 6797 Type *ValTy = getMemInstValueType(I); 6798 const Align Alignment = getLoadStoreAlignment(I); 6799 unsigned AS = getLoadStoreAddressSpace(I); 6800 6801 return TTI.getAddressComputationCost(ValTy) + 6802 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 6803 TTI::TCK_RecipThroughput, I); 6804 } 6805 return getWideningCost(I, VF); 6806 } 6807 6808 LoopVectorizationCostModel::VectorizationCostTy 6809 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6810 ElementCount VF) { 6811 // If we know that this instruction will remain uniform, check the cost of 6812 // the scalar version. 6813 if (isUniformAfterVectorization(I, VF)) 6814 VF = ElementCount::getFixed(1); 6815 6816 if (VF.isVector() && isProfitableToScalarize(I, VF)) 6817 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6818 6819 // Forced scalars do not have any scalarization overhead. 6820 auto ForcedScalar = ForcedScalars.find(VF); 6821 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 6822 auto InstSet = ForcedScalar->second; 6823 if (InstSet.count(I)) 6824 return VectorizationCostTy( 6825 (getInstructionCost(I, ElementCount::getFixed(1)).first * 6826 VF.getKnownMinValue()), 6827 false); 6828 } 6829 6830 Type *VectorTy; 6831 unsigned C = getInstructionCost(I, VF, VectorTy); 6832 6833 bool TypeNotScalarized = 6834 VF.isVector() && VectorTy->isVectorTy() && 6835 TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue(); 6836 return VectorizationCostTy(C, TypeNotScalarized); 6837 } 6838 6839 unsigned LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6840 ElementCount VF) { 6841 6842 assert(!VF.isScalable() && 6843 "cannot compute scalarization overhead for scalable vectorization"); 6844 if (VF.isScalar()) 6845 return 0; 6846 6847 unsigned Cost = 0; 6848 Type *RetTy = ToVectorTy(I->getType(), VF); 6849 if (!RetTy->isVoidTy() && 6850 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 6851 Cost += TTI.getScalarizationOverhead( 6852 cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()), 6853 true, false); 6854 6855 // Some targets keep addresses scalar. 6856 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 6857 return Cost; 6858 6859 // Some targets support efficient element stores. 6860 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 6861 return Cost; 6862 6863 // Collect operands to consider. 6864 CallInst *CI = dyn_cast<CallInst>(I); 6865 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 6866 6867 // Skip operands that do not require extraction/scalarization and do not incur 6868 // any overhead. 6869 return Cost + TTI.getOperandsScalarizationOverhead( 6870 filterExtractingOperands(Ops, VF), VF.getKnownMinValue()); 6871 } 6872 6873 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 6874 if (VF.isScalar()) 6875 return; 6876 NumPredStores = 0; 6877 for (BasicBlock *BB : TheLoop->blocks()) { 6878 // For each instruction in the old loop. 6879 for (Instruction &I : *BB) { 6880 Value *Ptr = getLoadStorePointerOperand(&I); 6881 if (!Ptr) 6882 continue; 6883 6884 // TODO: We should generate better code and update the cost model for 6885 // predicated uniform stores. Today they are treated as any other 6886 // predicated store (see added test cases in 6887 // invariant-store-vectorization.ll). 6888 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 6889 NumPredStores++; 6890 6891 if (Legal->isUniformMemOp(I)) { 6892 // TODO: Avoid replicating loads and stores instead of 6893 // relying on instcombine to remove them. 6894 // Load: Scalar load + broadcast 6895 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 6896 unsigned Cost = getUniformMemOpCost(&I, VF); 6897 setWideningDecision(&I, VF, CM_Scalarize, Cost); 6898 continue; 6899 } 6900 6901 // We assume that widening is the best solution when possible. 6902 if (memoryInstructionCanBeWidened(&I, VF)) { 6903 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 6904 int ConsecutiveStride = 6905 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 6906 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6907 "Expected consecutive stride."); 6908 InstWidening Decision = 6909 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 6910 setWideningDecision(&I, VF, Decision, Cost); 6911 continue; 6912 } 6913 6914 // Choose between Interleaving, Gather/Scatter or Scalarization. 6915 unsigned InterleaveCost = std::numeric_limits<unsigned>::max(); 6916 unsigned NumAccesses = 1; 6917 if (isAccessInterleaved(&I)) { 6918 auto Group = getInterleavedAccessGroup(&I); 6919 assert(Group && "Fail to get an interleaved access group."); 6920 6921 // Make one decision for the whole group. 6922 if (getWideningDecision(&I, VF) != CM_Unknown) 6923 continue; 6924 6925 NumAccesses = Group->getNumMembers(); 6926 if (interleavedAccessCanBeWidened(&I, VF)) 6927 InterleaveCost = getInterleaveGroupCost(&I, VF); 6928 } 6929 6930 unsigned GatherScatterCost = 6931 isLegalGatherOrScatter(&I) 6932 ? getGatherScatterCost(&I, VF) * NumAccesses 6933 : std::numeric_limits<unsigned>::max(); 6934 6935 unsigned ScalarizationCost = 6936 getMemInstScalarizationCost(&I, VF) * NumAccesses; 6937 6938 // Choose better solution for the current VF, 6939 // write down this decision and use it during vectorization. 6940 unsigned Cost; 6941 InstWidening Decision; 6942 if (InterleaveCost <= GatherScatterCost && 6943 InterleaveCost < ScalarizationCost) { 6944 Decision = CM_Interleave; 6945 Cost = InterleaveCost; 6946 } else if (GatherScatterCost < ScalarizationCost) { 6947 Decision = CM_GatherScatter; 6948 Cost = GatherScatterCost; 6949 } else { 6950 Decision = CM_Scalarize; 6951 Cost = ScalarizationCost; 6952 } 6953 // If the instructions belongs to an interleave group, the whole group 6954 // receives the same decision. The whole group receives the cost, but 6955 // the cost will actually be assigned to one instruction. 6956 if (auto Group = getInterleavedAccessGroup(&I)) 6957 setWideningDecision(Group, VF, Decision, Cost); 6958 else 6959 setWideningDecision(&I, VF, Decision, Cost); 6960 } 6961 } 6962 6963 // Make sure that any load of address and any other address computation 6964 // remains scalar unless there is gather/scatter support. This avoids 6965 // inevitable extracts into address registers, and also has the benefit of 6966 // activating LSR more, since that pass can't optimize vectorized 6967 // addresses. 6968 if (TTI.prefersVectorizedAddressing()) 6969 return; 6970 6971 // Start with all scalar pointer uses. 6972 SmallPtrSet<Instruction *, 8> AddrDefs; 6973 for (BasicBlock *BB : TheLoop->blocks()) 6974 for (Instruction &I : *BB) { 6975 Instruction *PtrDef = 6976 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 6977 if (PtrDef && TheLoop->contains(PtrDef) && 6978 getWideningDecision(&I, VF) != CM_GatherScatter) 6979 AddrDefs.insert(PtrDef); 6980 } 6981 6982 // Add all instructions used to generate the addresses. 6983 SmallVector<Instruction *, 4> Worklist; 6984 for (auto *I : AddrDefs) 6985 Worklist.push_back(I); 6986 while (!Worklist.empty()) { 6987 Instruction *I = Worklist.pop_back_val(); 6988 for (auto &Op : I->operands()) 6989 if (auto *InstOp = dyn_cast<Instruction>(Op)) 6990 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 6991 AddrDefs.insert(InstOp).second) 6992 Worklist.push_back(InstOp); 6993 } 6994 6995 for (auto *I : AddrDefs) { 6996 if (isa<LoadInst>(I)) { 6997 // Setting the desired widening decision should ideally be handled in 6998 // by cost functions, but since this involves the task of finding out 6999 // if the loaded register is involved in an address computation, it is 7000 // instead changed here when we know this is the case. 7001 InstWidening Decision = getWideningDecision(I, VF); 7002 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7003 // Scalarize a widened load of address. 7004 setWideningDecision( 7005 I, VF, CM_Scalarize, 7006 (VF.getKnownMinValue() * 7007 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7008 else if (auto Group = getInterleavedAccessGroup(I)) { 7009 // Scalarize an interleave group of address loads. 7010 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7011 if (Instruction *Member = Group->getMember(I)) 7012 setWideningDecision( 7013 Member, VF, CM_Scalarize, 7014 (VF.getKnownMinValue() * 7015 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7016 } 7017 } 7018 } else 7019 // Make sure I gets scalarized and a cost estimate without 7020 // scalarization overhead. 7021 ForcedScalars[VF].insert(I); 7022 } 7023 } 7024 7025 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7026 ElementCount VF, 7027 Type *&VectorTy) { 7028 Type *RetTy = I->getType(); 7029 if (canTruncateToMinimalBitwidth(I, VF)) 7030 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7031 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 7032 auto SE = PSE.getSE(); 7033 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7034 7035 // TODO: We need to estimate the cost of intrinsic calls. 7036 switch (I->getOpcode()) { 7037 case Instruction::GetElementPtr: 7038 // We mark this instruction as zero-cost because the cost of GEPs in 7039 // vectorized code depends on whether the corresponding memory instruction 7040 // is scalarized or not. Therefore, we handle GEPs with the memory 7041 // instruction cost. 7042 return 0; 7043 case Instruction::Br: { 7044 // In cases of scalarized and predicated instructions, there will be VF 7045 // predicated blocks in the vectorized loop. Each branch around these 7046 // blocks requires also an extract of its vector compare i1 element. 7047 bool ScalarPredicatedBB = false; 7048 BranchInst *BI = cast<BranchInst>(I); 7049 if (VF.isVector() && BI->isConditional() && 7050 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7051 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7052 ScalarPredicatedBB = true; 7053 7054 if (ScalarPredicatedBB) { 7055 // Return cost for branches around scalarized and predicated blocks. 7056 assert(!VF.isScalable() && "scalable vectors not yet supported."); 7057 auto *Vec_i1Ty = 7058 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7059 return (TTI.getScalarizationOverhead( 7060 Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), 7061 false, true) + 7062 (TTI.getCFInstrCost(Instruction::Br, CostKind) * 7063 VF.getKnownMinValue())); 7064 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7065 // The back-edge branch will remain, as will all scalar branches. 7066 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7067 else 7068 // This branch will be eliminated by if-conversion. 7069 return 0; 7070 // Note: We currently assume zero cost for an unconditional branch inside 7071 // a predicated block since it will become a fall-through, although we 7072 // may decide in the future to call TTI for all branches. 7073 } 7074 case Instruction::PHI: { 7075 auto *Phi = cast<PHINode>(I); 7076 7077 // First-order recurrences are replaced by vector shuffles inside the loop. 7078 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7079 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7080 return TTI.getShuffleCost( 7081 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7082 VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7083 7084 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7085 // converted into select instructions. We require N - 1 selects per phi 7086 // node, where N is the number of incoming values. 7087 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7088 return (Phi->getNumIncomingValues() - 1) * 7089 TTI.getCmpSelInstrCost( 7090 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7091 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7092 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7093 7094 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7095 } 7096 case Instruction::UDiv: 7097 case Instruction::SDiv: 7098 case Instruction::URem: 7099 case Instruction::SRem: 7100 // If we have a predicated instruction, it may not be executed for each 7101 // vector lane. Get the scalarization cost and scale this amount by the 7102 // probability of executing the predicated block. If the instruction is not 7103 // predicated, we fall through to the next case. 7104 if (VF.isVector() && isScalarWithPredication(I)) { 7105 unsigned Cost = 0; 7106 7107 // These instructions have a non-void type, so account for the phi nodes 7108 // that we will create. This cost is likely to be zero. The phi node 7109 // cost, if any, should be scaled by the block probability because it 7110 // models a copy at the end of each predicated block. 7111 Cost += VF.getKnownMinValue() * 7112 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7113 7114 // The cost of the non-predicated instruction. 7115 Cost += VF.getKnownMinValue() * 7116 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7117 7118 // The cost of insertelement and extractelement instructions needed for 7119 // scalarization. 7120 Cost += getScalarizationOverhead(I, VF); 7121 7122 // Scale the cost by the probability of executing the predicated blocks. 7123 // This assumes the predicated block for each vector lane is equally 7124 // likely. 7125 return Cost / getReciprocalPredBlockProb(); 7126 } 7127 LLVM_FALLTHROUGH; 7128 case Instruction::Add: 7129 case Instruction::FAdd: 7130 case Instruction::Sub: 7131 case Instruction::FSub: 7132 case Instruction::Mul: 7133 case Instruction::FMul: 7134 case Instruction::FDiv: 7135 case Instruction::FRem: 7136 case Instruction::Shl: 7137 case Instruction::LShr: 7138 case Instruction::AShr: 7139 case Instruction::And: 7140 case Instruction::Or: 7141 case Instruction::Xor: { 7142 // Since we will replace the stride by 1 the multiplication should go away. 7143 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7144 return 0; 7145 // Certain instructions can be cheaper to vectorize if they have a constant 7146 // second vector operand. One example of this are shifts on x86. 7147 Value *Op2 = I->getOperand(1); 7148 TargetTransformInfo::OperandValueProperties Op2VP; 7149 TargetTransformInfo::OperandValueKind Op2VK = 7150 TTI.getOperandInfo(Op2, Op2VP); 7151 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7152 Op2VK = TargetTransformInfo::OK_UniformValue; 7153 7154 SmallVector<const Value *, 4> Operands(I->operand_values()); 7155 unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1; 7156 return N * TTI.getArithmeticInstrCost( 7157 I->getOpcode(), VectorTy, CostKind, 7158 TargetTransformInfo::OK_AnyValue, 7159 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7160 } 7161 case Instruction::FNeg: { 7162 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 7163 unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1; 7164 return N * TTI.getArithmeticInstrCost( 7165 I->getOpcode(), VectorTy, CostKind, 7166 TargetTransformInfo::OK_AnyValue, 7167 TargetTransformInfo::OK_AnyValue, 7168 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None, 7169 I->getOperand(0), I); 7170 } 7171 case Instruction::Select: { 7172 SelectInst *SI = cast<SelectInst>(I); 7173 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7174 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7175 Type *CondTy = SI->getCondition()->getType(); 7176 if (!ScalarCond) { 7177 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 7178 CondTy = VectorType::get(CondTy, VF); 7179 } 7180 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 7181 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7182 } 7183 case Instruction::ICmp: 7184 case Instruction::FCmp: { 7185 Type *ValTy = I->getOperand(0)->getType(); 7186 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7187 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7188 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7189 VectorTy = ToVectorTy(ValTy, VF); 7190 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7191 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7192 } 7193 case Instruction::Store: 7194 case Instruction::Load: { 7195 ElementCount Width = VF; 7196 if (Width.isVector()) { 7197 InstWidening Decision = getWideningDecision(I, Width); 7198 assert(Decision != CM_Unknown && 7199 "CM decision should be taken at this point"); 7200 if (Decision == CM_Scalarize) 7201 Width = ElementCount::getFixed(1); 7202 } 7203 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 7204 return getMemoryInstructionCost(I, VF); 7205 } 7206 case Instruction::ZExt: 7207 case Instruction::SExt: 7208 case Instruction::FPToUI: 7209 case Instruction::FPToSI: 7210 case Instruction::FPExt: 7211 case Instruction::PtrToInt: 7212 case Instruction::IntToPtr: 7213 case Instruction::SIToFP: 7214 case Instruction::UIToFP: 7215 case Instruction::Trunc: 7216 case Instruction::FPTrunc: 7217 case Instruction::BitCast: { 7218 // Computes the CastContextHint from a Load/Store instruction. 7219 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7220 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7221 "Expected a load or a store!"); 7222 7223 if (VF.isScalar() || !TheLoop->contains(I)) 7224 return TTI::CastContextHint::Normal; 7225 7226 switch (getWideningDecision(I, VF)) { 7227 case LoopVectorizationCostModel::CM_GatherScatter: 7228 return TTI::CastContextHint::GatherScatter; 7229 case LoopVectorizationCostModel::CM_Interleave: 7230 return TTI::CastContextHint::Interleave; 7231 case LoopVectorizationCostModel::CM_Scalarize: 7232 case LoopVectorizationCostModel::CM_Widen: 7233 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7234 : TTI::CastContextHint::Normal; 7235 case LoopVectorizationCostModel::CM_Widen_Reverse: 7236 return TTI::CastContextHint::Reversed; 7237 case LoopVectorizationCostModel::CM_Unknown: 7238 llvm_unreachable("Instr did not go through cost modelling?"); 7239 } 7240 7241 llvm_unreachable("Unhandled case!"); 7242 }; 7243 7244 unsigned Opcode = I->getOpcode(); 7245 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7246 // For Trunc, the context is the only user, which must be a StoreInst. 7247 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7248 if (I->hasOneUse()) 7249 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7250 CCH = ComputeCCH(Store); 7251 } 7252 // For Z/Sext, the context is the operand, which must be a LoadInst. 7253 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7254 Opcode == Instruction::FPExt) { 7255 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7256 CCH = ComputeCCH(Load); 7257 } 7258 7259 // We optimize the truncation of induction variables having constant 7260 // integer steps. The cost of these truncations is the same as the scalar 7261 // operation. 7262 if (isOptimizableIVTruncate(I, VF)) { 7263 auto *Trunc = cast<TruncInst>(I); 7264 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7265 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7266 } 7267 7268 Type *SrcScalarTy = I->getOperand(0)->getType(); 7269 Type *SrcVecTy = 7270 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7271 if (canTruncateToMinimalBitwidth(I, VF)) { 7272 // This cast is going to be shrunk. This may remove the cast or it might 7273 // turn it into slightly different cast. For example, if MinBW == 16, 7274 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7275 // 7276 // Calculate the modified src and dest types. 7277 Type *MinVecTy = VectorTy; 7278 if (Opcode == Instruction::Trunc) { 7279 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7280 VectorTy = 7281 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7282 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7283 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7284 VectorTy = 7285 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7286 } 7287 } 7288 7289 assert(!VF.isScalable() && "VF is assumed to be non scalable"); 7290 unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1; 7291 return N * 7292 TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7293 } 7294 case Instruction::Call: { 7295 bool NeedToScalarize; 7296 CallInst *CI = cast<CallInst>(I); 7297 unsigned CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7298 if (getVectorIntrinsicIDForCall(CI, TLI)) 7299 return std::min(CallCost, getVectorIntrinsicCost(CI, VF)); 7300 return CallCost; 7301 } 7302 case Instruction::ExtractValue: { 7303 InstructionCost ExtractCost = 7304 TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7305 assert(ExtractCost.isValid() && "Invalid cost for ExtractValue"); 7306 return *(ExtractCost.getValue()); 7307 } 7308 default: 7309 // The cost of executing VF copies of the scalar instruction. This opcode 7310 // is unknown. Assume that it is the same as 'mul'. 7311 return VF.getKnownMinValue() * TTI.getArithmeticInstrCost( 7312 Instruction::Mul, VectorTy, CostKind) + 7313 getScalarizationOverhead(I, VF); 7314 } // end of switch. 7315 } 7316 7317 char LoopVectorize::ID = 0; 7318 7319 static const char lv_name[] = "Loop Vectorization"; 7320 7321 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7322 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7323 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7324 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7325 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7326 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7327 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7328 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7329 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7330 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7331 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7332 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7333 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7334 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7335 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7336 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7337 7338 namespace llvm { 7339 7340 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7341 7342 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7343 bool VectorizeOnlyWhenForced) { 7344 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7345 } 7346 7347 } // end namespace llvm 7348 7349 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7350 // Check if the pointer operand of a load or store instruction is 7351 // consecutive. 7352 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7353 return Legal->isConsecutivePtr(Ptr); 7354 return false; 7355 } 7356 7357 void LoopVectorizationCostModel::collectValuesToIgnore() { 7358 // Ignore ephemeral values. 7359 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7360 7361 // Ignore type-promoting instructions we identified during reduction 7362 // detection. 7363 for (auto &Reduction : Legal->getReductionVars()) { 7364 RecurrenceDescriptor &RedDes = Reduction.second; 7365 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7366 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7367 } 7368 // Ignore type-casting instructions we identified during induction 7369 // detection. 7370 for (auto &Induction : Legal->getInductionVars()) { 7371 InductionDescriptor &IndDes = Induction.second; 7372 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7373 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7374 } 7375 } 7376 7377 void LoopVectorizationCostModel::collectInLoopReductions() { 7378 for (auto &Reduction : Legal->getReductionVars()) { 7379 PHINode *Phi = Reduction.first; 7380 RecurrenceDescriptor &RdxDesc = Reduction.second; 7381 7382 // We don't collect reductions that are type promoted (yet). 7383 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7384 continue; 7385 7386 // If the target would prefer this reduction to happen "in-loop", then we 7387 // want to record it as such. 7388 unsigned Opcode = RdxDesc.getOpcode(); 7389 if (!PreferInLoopReductions && 7390 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7391 TargetTransformInfo::ReductionFlags())) 7392 continue; 7393 7394 // Check that we can correctly put the reductions into the loop, by 7395 // finding the chain of operations that leads from the phi to the loop 7396 // exit value. 7397 SmallVector<Instruction *, 4> ReductionOperations = 7398 RdxDesc.getReductionOpChain(Phi, TheLoop); 7399 bool InLoop = !ReductionOperations.empty(); 7400 if (InLoop) 7401 InLoopReductionChains[Phi] = ReductionOperations; 7402 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7403 << " reduction for phi: " << *Phi << "\n"); 7404 } 7405 } 7406 7407 // TODO: we could return a pair of values that specify the max VF and 7408 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7409 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7410 // doesn't have a cost model that can choose which plan to execute if 7411 // more than one is generated. 7412 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7413 LoopVectorizationCostModel &CM) { 7414 unsigned WidestType; 7415 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7416 return WidestVectorRegBits / WidestType; 7417 } 7418 7419 VectorizationFactor 7420 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7421 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7422 ElementCount VF = UserVF; 7423 // Outer loop handling: They may require CFG and instruction level 7424 // transformations before even evaluating whether vectorization is profitable. 7425 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7426 // the vectorization pipeline. 7427 if (!OrigLoop->isInnermost()) { 7428 // If the user doesn't provide a vectorization factor, determine a 7429 // reasonable one. 7430 if (UserVF.isZero()) { 7431 VF = ElementCount::getFixed( 7432 determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM)); 7433 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7434 7435 // Make sure we have a VF > 1 for stress testing. 7436 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7437 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7438 << "overriding computed VF.\n"); 7439 VF = ElementCount::getFixed(4); 7440 } 7441 } 7442 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7443 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7444 "VF needs to be a power of two"); 7445 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7446 << "VF " << VF << " to build VPlans.\n"); 7447 buildVPlans(VF, VF); 7448 7449 // For VPlan build stress testing, we bail out after VPlan construction. 7450 if (VPlanBuildStressTest) 7451 return VectorizationFactor::Disabled(); 7452 7453 return {VF, 0 /*Cost*/}; 7454 } 7455 7456 LLVM_DEBUG( 7457 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7458 "VPlan-native path.\n"); 7459 return VectorizationFactor::Disabled(); 7460 } 7461 7462 Optional<VectorizationFactor> 7463 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7464 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7465 Optional<ElementCount> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC); 7466 if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved. 7467 return None; 7468 7469 // Invalidate interleave groups if all blocks of loop will be predicated. 7470 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 7471 !useMaskedInterleavedAccesses(*TTI)) { 7472 LLVM_DEBUG( 7473 dbgs() 7474 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7475 "which requires masked-interleaved support.\n"); 7476 if (CM.InterleaveInfo.invalidateGroups()) 7477 // Invalidating interleave groups also requires invalidating all decisions 7478 // based on them, which includes widening decisions and uniform and scalar 7479 // values. 7480 CM.invalidateCostModelingDecisions(); 7481 } 7482 7483 ElementCount MaxVF = MaybeMaxVF.getValue(); 7484 assert(MaxVF.isNonZero() && "MaxVF is zero."); 7485 7486 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxVF); 7487 if (!UserVF.isZero() && 7488 (UserVFIsLegal || (UserVF.isScalable() && MaxVF.isScalable()))) { 7489 // FIXME: MaxVF is temporarily used inplace of UserVF for illegal scalable 7490 // VFs here, this should be reverted to only use legal UserVFs once the 7491 // loop below supports scalable VFs. 7492 ElementCount VF = UserVFIsLegal ? UserVF : MaxVF; 7493 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max") 7494 << " VF " << VF << ".\n"); 7495 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7496 "VF needs to be a power of two"); 7497 // Collect the instructions (and their associated costs) that will be more 7498 // profitable to scalarize. 7499 CM.selectUserVectorizationFactor(VF); 7500 CM.collectInLoopReductions(); 7501 buildVPlansWithVPRecipes(VF, VF); 7502 LLVM_DEBUG(printPlans(dbgs())); 7503 return {{VF, 0}}; 7504 } 7505 7506 assert(!MaxVF.isScalable() && 7507 "Scalable vectors not yet supported beyond this point"); 7508 7509 for (ElementCount VF = ElementCount::getFixed(1); 7510 ElementCount::isKnownLE(VF, MaxVF); VF *= 2) { 7511 // Collect Uniform and Scalar instructions after vectorization with VF. 7512 CM.collectUniformsAndScalars(VF); 7513 7514 // Collect the instructions (and their associated costs) that will be more 7515 // profitable to scalarize. 7516 if (VF.isVector()) 7517 CM.collectInstsToScalarize(VF); 7518 } 7519 7520 CM.collectInLoopReductions(); 7521 7522 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxVF); 7523 LLVM_DEBUG(printPlans(dbgs())); 7524 if (MaxVF.isScalar()) 7525 return VectorizationFactor::Disabled(); 7526 7527 // Select the optimal vectorization factor. 7528 return CM.selectVectorizationFactor(MaxVF); 7529 } 7530 7531 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) { 7532 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 7533 << '\n'); 7534 BestVF = VF; 7535 BestUF = UF; 7536 7537 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 7538 return !Plan->hasVF(VF); 7539 }); 7540 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 7541 } 7542 7543 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 7544 DominatorTree *DT) { 7545 // Perform the actual loop transformation. 7546 7547 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7548 VPCallbackILV CallbackILV(ILV); 7549 7550 assert(BestVF.hasValue() && "Vectorization Factor is missing"); 7551 7552 VPTransformState State{*BestVF, BestUF, LI, 7553 DT, ILV.Builder, ILV.VectorLoopValueMap, 7554 &ILV, CallbackILV}; 7555 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 7556 State.TripCount = ILV.getOrCreateTripCount(nullptr); 7557 State.CanonicalIV = ILV.Induction; 7558 7559 ILV.printDebugTracesAtStart(); 7560 7561 //===------------------------------------------------===// 7562 // 7563 // Notice: any optimization or new instruction that go 7564 // into the code below should also be implemented in 7565 // the cost-model. 7566 // 7567 //===------------------------------------------------===// 7568 7569 // 2. Copy and widen instructions from the old loop into the new loop. 7570 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 7571 VPlans.front()->execute(&State); 7572 7573 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7574 // predication, updating analyses. 7575 ILV.fixVectorizedLoop(); 7576 7577 ILV.printDebugTracesAtEnd(); 7578 } 7579 7580 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7581 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7582 7583 // We create new control-flow for the vectorized loop, so the original exit 7584 // conditions will be dead after vectorization if it's only used by the 7585 // terminator 7586 SmallVector<BasicBlock*> ExitingBlocks; 7587 OrigLoop->getExitingBlocks(ExitingBlocks); 7588 for (auto *BB : ExitingBlocks) { 7589 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 7590 if (!Cmp || !Cmp->hasOneUse()) 7591 continue; 7592 7593 // TODO: we should introduce a getUniqueExitingBlocks on Loop 7594 if (!DeadInstructions.insert(Cmp).second) 7595 continue; 7596 7597 // The operands of the icmp is often a dead trunc, used by IndUpdate. 7598 // TODO: can recurse through operands in general 7599 for (Value *Op : Cmp->operands()) { 7600 if (isa<TruncInst>(Op) && Op->hasOneUse()) 7601 DeadInstructions.insert(cast<Instruction>(Op)); 7602 } 7603 } 7604 7605 // We create new "steps" for induction variable updates to which the original 7606 // induction variables map. An original update instruction will be dead if 7607 // all its users except the induction variable are dead. 7608 auto *Latch = OrigLoop->getLoopLatch(); 7609 for (auto &Induction : Legal->getInductionVars()) { 7610 PHINode *Ind = Induction.first; 7611 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7612 7613 // If the tail is to be folded by masking, the primary induction variable, 7614 // if exists, isn't dead: it will be used for masking. Don't kill it. 7615 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 7616 continue; 7617 7618 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7619 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7620 })) 7621 DeadInstructions.insert(IndUpdate); 7622 7623 // We record as "Dead" also the type-casting instructions we had identified 7624 // during induction analysis. We don't need any handling for them in the 7625 // vectorized loop because we have proven that, under a proper runtime 7626 // test guarding the vectorized loop, the value of the phi, and the casted 7627 // value of the phi, are the same. The last instruction in this casting chain 7628 // will get its scalar/vector/widened def from the scalar/vector/widened def 7629 // of the respective phi node. Any other casts in the induction def-use chain 7630 // have no other uses outside the phi update chain, and will be ignored. 7631 InductionDescriptor &IndDes = Induction.second; 7632 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7633 DeadInstructions.insert(Casts.begin(), Casts.end()); 7634 } 7635 } 7636 7637 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 7638 7639 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7640 7641 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 7642 Instruction::BinaryOps BinOp) { 7643 // When unrolling and the VF is 1, we only need to add a simple scalar. 7644 Type *Ty = Val->getType(); 7645 assert(!Ty->isVectorTy() && "Val must be a scalar"); 7646 7647 if (Ty->isFloatingPointTy()) { 7648 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 7649 7650 // Floating point operations had to be 'fast' to enable the unrolling. 7651 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 7652 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 7653 } 7654 Constant *C = ConstantInt::get(Ty, StartIdx); 7655 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 7656 } 7657 7658 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7659 SmallVector<Metadata *, 4> MDs; 7660 // Reserve first location for self reference to the LoopID metadata node. 7661 MDs.push_back(nullptr); 7662 bool IsUnrollMetadata = false; 7663 MDNode *LoopID = L->getLoopID(); 7664 if (LoopID) { 7665 // First find existing loop unrolling disable metadata. 7666 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7667 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7668 if (MD) { 7669 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7670 IsUnrollMetadata = 7671 S && S->getString().startswith("llvm.loop.unroll.disable"); 7672 } 7673 MDs.push_back(LoopID->getOperand(i)); 7674 } 7675 } 7676 7677 if (!IsUnrollMetadata) { 7678 // Add runtime unroll disable metadata. 7679 LLVMContext &Context = L->getHeader()->getContext(); 7680 SmallVector<Metadata *, 1> DisableOperands; 7681 DisableOperands.push_back( 7682 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7683 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7684 MDs.push_back(DisableNode); 7685 MDNode *NewLoopID = MDNode::get(Context, MDs); 7686 // Set operand 0 to refer to the loop id itself. 7687 NewLoopID->replaceOperandWith(0, NewLoopID); 7688 L->setLoopID(NewLoopID); 7689 } 7690 } 7691 7692 //===--------------------------------------------------------------------===// 7693 // EpilogueVectorizerMainLoop 7694 //===--------------------------------------------------------------------===// 7695 7696 /// This function is partially responsible for generating the control flow 7697 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7698 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 7699 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7700 Loop *Lp = createVectorLoopSkeleton(""); 7701 7702 // Generate the code to check the minimum iteration count of the vector 7703 // epilogue (see below). 7704 EPI.EpilogueIterationCountCheck = 7705 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 7706 EPI.EpilogueIterationCountCheck->setName("iter.check"); 7707 7708 // Generate the code to check any assumptions that we've made for SCEV 7709 // expressions. 7710 BasicBlock *SavedPreHeader = LoopVectorPreHeader; 7711 emitSCEVChecks(Lp, LoopScalarPreHeader); 7712 7713 // If a safety check was generated save it. 7714 if (SavedPreHeader != LoopVectorPreHeader) 7715 EPI.SCEVSafetyCheck = SavedPreHeader; 7716 7717 // Generate the code that checks at runtime if arrays overlap. We put the 7718 // checks into a separate block to make the more common case of few elements 7719 // faster. 7720 SavedPreHeader = LoopVectorPreHeader; 7721 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 7722 7723 // If a safety check was generated save/overwite it. 7724 if (SavedPreHeader != LoopVectorPreHeader) 7725 EPI.MemSafetyCheck = SavedPreHeader; 7726 7727 // Generate the iteration count check for the main loop, *after* the check 7728 // for the epilogue loop, so that the path-length is shorter for the case 7729 // that goes directly through the vector epilogue. The longer-path length for 7730 // the main loop is compensated for, by the gain from vectorizing the larger 7731 // trip count. Note: the branch will get updated later on when we vectorize 7732 // the epilogue. 7733 EPI.MainLoopIterationCountCheck = 7734 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 7735 7736 // Generate the induction variable. 7737 OldInduction = Legal->getPrimaryInduction(); 7738 Type *IdxTy = Legal->getWidestInductionType(); 7739 Value *StartIdx = ConstantInt::get(IdxTy, 0); 7740 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 7741 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 7742 EPI.VectorTripCount = CountRoundDown; 7743 Induction = 7744 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 7745 getDebugLocFromInstOrOperands(OldInduction)); 7746 7747 // Skip induction resume value creation here because they will be created in 7748 // the second pass. If we created them here, they wouldn't be used anyway, 7749 // because the vplan in the second pass still contains the inductions from the 7750 // original loop. 7751 7752 return completeLoopSkeleton(Lp, OrigLoopID); 7753 } 7754 7755 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 7756 LLVM_DEBUG({ 7757 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 7758 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 7759 << ", Main Loop UF:" << EPI.MainLoopUF 7760 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 7761 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7762 }); 7763 } 7764 7765 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 7766 DEBUG_WITH_TYPE(VerboseDebug, { 7767 dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; 7768 }); 7769 } 7770 7771 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 7772 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 7773 assert(L && "Expected valid Loop."); 7774 assert(Bypass && "Expected valid bypass basic block."); 7775 unsigned VFactor = 7776 ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue(); 7777 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 7778 Value *Count = getOrCreateTripCount(L); 7779 // Reuse existing vector loop preheader for TC checks. 7780 // Note that new preheader block is generated for vector loop. 7781 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 7782 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 7783 7784 // Generate code to check if the loop's trip count is less than VF * UF of the 7785 // main vector loop. 7786 auto P = 7787 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7788 7789 Value *CheckMinIters = Builder.CreateICmp( 7790 P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor), 7791 "min.iters.check"); 7792 7793 if (!ForEpilogue) 7794 TCCheckBlock->setName("vector.main.loop.iter.check"); 7795 7796 // Create new preheader for vector loop. 7797 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 7798 DT, LI, nullptr, "vector.ph"); 7799 7800 if (ForEpilogue) { 7801 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 7802 DT->getNode(Bypass)->getIDom()) && 7803 "TC check is expected to dominate Bypass"); 7804 7805 // Update dominator for Bypass & LoopExit. 7806 DT->changeImmediateDominator(Bypass, TCCheckBlock); 7807 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 7808 7809 LoopBypassBlocks.push_back(TCCheckBlock); 7810 7811 // Save the trip count so we don't have to regenerate it in the 7812 // vec.epilog.iter.check. This is safe to do because the trip count 7813 // generated here dominates the vector epilog iter check. 7814 EPI.TripCount = Count; 7815 } 7816 7817 ReplaceInstWithInst( 7818 TCCheckBlock->getTerminator(), 7819 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7820 7821 return TCCheckBlock; 7822 } 7823 7824 //===--------------------------------------------------------------------===// 7825 // EpilogueVectorizerEpilogueLoop 7826 //===--------------------------------------------------------------------===// 7827 7828 /// This function is partially responsible for generating the control flow 7829 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7830 BasicBlock * 7831 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 7832 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7833 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 7834 7835 // Now, compare the remaining count and if there aren't enough iterations to 7836 // execute the vectorized epilogue skip to the scalar part. 7837 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 7838 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 7839 LoopVectorPreHeader = 7840 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 7841 LI, nullptr, "vec.epilog.ph"); 7842 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 7843 VecEpilogueIterationCountCheck); 7844 7845 // Adjust the control flow taking the state info from the main loop 7846 // vectorization into account. 7847 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 7848 "expected this to be saved from the previous pass."); 7849 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 7850 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 7851 7852 DT->changeImmediateDominator(LoopVectorPreHeader, 7853 EPI.MainLoopIterationCountCheck); 7854 7855 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 7856 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7857 7858 if (EPI.SCEVSafetyCheck) 7859 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 7860 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7861 if (EPI.MemSafetyCheck) 7862 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 7863 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7864 7865 DT->changeImmediateDominator( 7866 VecEpilogueIterationCountCheck, 7867 VecEpilogueIterationCountCheck->getSinglePredecessor()); 7868 7869 DT->changeImmediateDominator(LoopScalarPreHeader, 7870 EPI.EpilogueIterationCountCheck); 7871 DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck); 7872 7873 // Keep track of bypass blocks, as they feed start values to the induction 7874 // phis in the scalar loop preheader. 7875 if (EPI.SCEVSafetyCheck) 7876 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 7877 if (EPI.MemSafetyCheck) 7878 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 7879 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 7880 7881 // Generate a resume induction for the vector epilogue and put it in the 7882 // vector epilogue preheader 7883 Type *IdxTy = Legal->getWidestInductionType(); 7884 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 7885 LoopVectorPreHeader->getFirstNonPHI()); 7886 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 7887 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 7888 EPI.MainLoopIterationCountCheck); 7889 7890 // Generate the induction variable. 7891 OldInduction = Legal->getPrimaryInduction(); 7892 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 7893 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 7894 Value *StartIdx = EPResumeVal; 7895 Induction = 7896 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 7897 getDebugLocFromInstOrOperands(OldInduction)); 7898 7899 // Generate induction resume values. These variables save the new starting 7900 // indexes for the scalar loop. They are used to test if there are any tail 7901 // iterations left once the vector loop has completed. 7902 // Note that when the vectorized epilogue is skipped due to iteration count 7903 // check, then the resume value for the induction variable comes from 7904 // the trip count of the main vector loop, hence passing the AdditionalBypass 7905 // argument. 7906 createInductionResumeValues(Lp, CountRoundDown, 7907 {VecEpilogueIterationCountCheck, 7908 EPI.VectorTripCount} /* AdditionalBypass */); 7909 7910 AddRuntimeUnrollDisableMetaData(Lp); 7911 return completeLoopSkeleton(Lp, OrigLoopID); 7912 } 7913 7914 BasicBlock * 7915 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 7916 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 7917 7918 assert(EPI.TripCount && 7919 "Expected trip count to have been safed in the first pass."); 7920 assert( 7921 (!isa<Instruction>(EPI.TripCount) || 7922 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 7923 "saved trip count does not dominate insertion point."); 7924 Value *TC = EPI.TripCount; 7925 IRBuilder<> Builder(Insert->getTerminator()); 7926 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 7927 7928 // Generate code to check if the loop's trip count is less than VF * UF of the 7929 // vector epilogue loop. 7930 auto P = 7931 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7932 7933 Value *CheckMinIters = Builder.CreateICmp( 7934 P, Count, 7935 ConstantInt::get(Count->getType(), 7936 EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF), 7937 "min.epilog.iters.check"); 7938 7939 ReplaceInstWithInst( 7940 Insert->getTerminator(), 7941 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7942 7943 LoopBypassBlocks.push_back(Insert); 7944 return Insert; 7945 } 7946 7947 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 7948 LLVM_DEBUG({ 7949 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 7950 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 7951 << ", Main Loop UF:" << EPI.MainLoopUF 7952 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 7953 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7954 }); 7955 } 7956 7957 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 7958 DEBUG_WITH_TYPE(VerboseDebug, { 7959 dbgs() << "final fn:\n" << *Induction->getFunction() << "\n"; 7960 }); 7961 } 7962 7963 bool LoopVectorizationPlanner::getDecisionAndClampRange( 7964 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 7965 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 7966 bool PredicateAtRangeStart = Predicate(Range.Start); 7967 7968 for (ElementCount TmpVF = Range.Start * 2; 7969 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 7970 if (Predicate(TmpVF) != PredicateAtRangeStart) { 7971 Range.End = TmpVF; 7972 break; 7973 } 7974 7975 return PredicateAtRangeStart; 7976 } 7977 7978 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 7979 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 7980 /// of VF's starting at a given VF and extending it as much as possible. Each 7981 /// vectorization decision can potentially shorten this sub-range during 7982 /// buildVPlan(). 7983 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 7984 ElementCount MaxVF) { 7985 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 7986 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 7987 VFRange SubRange = {VF, MaxVFPlusOne}; 7988 VPlans.push_back(buildVPlan(SubRange)); 7989 VF = SubRange.End; 7990 } 7991 } 7992 7993 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 7994 VPlanPtr &Plan) { 7995 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 7996 7997 // Look for cached value. 7998 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 7999 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8000 if (ECEntryIt != EdgeMaskCache.end()) 8001 return ECEntryIt->second; 8002 8003 VPValue *SrcMask = createBlockInMask(Src, Plan); 8004 8005 // The terminator has to be a branch inst! 8006 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8007 assert(BI && "Unexpected terminator found"); 8008 8009 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8010 return EdgeMaskCache[Edge] = SrcMask; 8011 8012 // If source is an exiting block, we know the exit edge is dynamically dead 8013 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8014 // adding uses of an otherwise potentially dead instruction. 8015 if (OrigLoop->isLoopExiting(Src)) 8016 return EdgeMaskCache[Edge] = SrcMask; 8017 8018 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8019 assert(EdgeMask && "No Edge Mask found for condition"); 8020 8021 if (BI->getSuccessor(0) != Dst) 8022 EdgeMask = Builder.createNot(EdgeMask); 8023 8024 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 8025 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 8026 8027 return EdgeMaskCache[Edge] = EdgeMask; 8028 } 8029 8030 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8031 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8032 8033 // Look for cached value. 8034 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8035 if (BCEntryIt != BlockMaskCache.end()) 8036 return BCEntryIt->second; 8037 8038 // All-one mask is modelled as no-mask following the convention for masked 8039 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8040 VPValue *BlockMask = nullptr; 8041 8042 if (OrigLoop->getHeader() == BB) { 8043 if (!CM.blockNeedsPredication(BB)) 8044 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8045 8046 // Create the block in mask as the first non-phi instruction in the block. 8047 VPBuilder::InsertPointGuard Guard(Builder); 8048 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8049 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8050 8051 // Introduce the early-exit compare IV <= BTC to form header block mask. 8052 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8053 // Start by constructing the desired canonical IV. 8054 VPValue *IV = nullptr; 8055 if (Legal->getPrimaryInduction()) 8056 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8057 else { 8058 auto IVRecipe = new VPWidenCanonicalIVRecipe(); 8059 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8060 IV = IVRecipe->getVPValue(); 8061 } 8062 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8063 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8064 8065 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8066 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8067 // as a second argument, we only pass the IV here and extract the 8068 // tripcount from the transform state where codegen of the VP instructions 8069 // happen. 8070 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8071 } else { 8072 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8073 } 8074 return BlockMaskCache[BB] = BlockMask; 8075 } 8076 8077 // This is the block mask. We OR all incoming edges. 8078 for (auto *Predecessor : predecessors(BB)) { 8079 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8080 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8081 return BlockMaskCache[BB] = EdgeMask; 8082 8083 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8084 BlockMask = EdgeMask; 8085 continue; 8086 } 8087 8088 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8089 } 8090 8091 return BlockMaskCache[BB] = BlockMask; 8092 } 8093 8094 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range, 8095 VPlanPtr &Plan) { 8096 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8097 "Must be called with either a load or store"); 8098 8099 auto willWiden = [&](ElementCount VF) -> bool { 8100 if (VF.isScalar()) 8101 return false; 8102 LoopVectorizationCostModel::InstWidening Decision = 8103 CM.getWideningDecision(I, VF); 8104 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8105 "CM decision should be taken at this point."); 8106 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8107 return true; 8108 if (CM.isScalarAfterVectorization(I, VF) || 8109 CM.isProfitableToScalarize(I, VF)) 8110 return false; 8111 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8112 }; 8113 8114 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8115 return nullptr; 8116 8117 VPValue *Mask = nullptr; 8118 if (Legal->isMaskRequired(I)) 8119 Mask = createBlockInMask(I->getParent(), Plan); 8120 8121 VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I)); 8122 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8123 return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask); 8124 8125 StoreInst *Store = cast<StoreInst>(I); 8126 VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand()); 8127 return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask); 8128 } 8129 8130 VPWidenIntOrFpInductionRecipe * 8131 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, VPlan &Plan) const { 8132 // Check if this is an integer or fp induction. If so, build the recipe that 8133 // produces its scalar and vector values. 8134 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8135 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8136 II.getKind() == InductionDescriptor::IK_FpInduction) { 8137 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8138 return new VPWidenIntOrFpInductionRecipe(Phi, Start); 8139 } 8140 8141 return nullptr; 8142 } 8143 8144 VPWidenIntOrFpInductionRecipe * 8145 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I, VFRange &Range, 8146 VPlan &Plan) const { 8147 // Optimize the special case where the source is a constant integer 8148 // induction variable. Notice that we can only optimize the 'trunc' case 8149 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8150 // (c) other casts depend on pointer size. 8151 8152 // Determine whether \p K is a truncation based on an induction variable that 8153 // can be optimized. 8154 auto isOptimizableIVTruncate = 8155 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8156 return [=](ElementCount VF) -> bool { 8157 return CM.isOptimizableIVTruncate(K, VF); 8158 }; 8159 }; 8160 8161 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8162 isOptimizableIVTruncate(I), Range)) { 8163 8164 InductionDescriptor II = 8165 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 8166 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8167 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8168 Start, I); 8169 } 8170 return nullptr; 8171 } 8172 8173 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) { 8174 // We know that all PHIs in non-header blocks are converted into selects, so 8175 // we don't have to worry about the insertion order and we can just use the 8176 // builder. At this point we generate the predication tree. There may be 8177 // duplications since this is a simple recursive scan, but future 8178 // optimizations will clean it up. 8179 8180 SmallVector<VPValue *, 2> Operands; 8181 unsigned NumIncoming = Phi->getNumIncomingValues(); 8182 for (unsigned In = 0; In < NumIncoming; In++) { 8183 VPValue *EdgeMask = 8184 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8185 assert((EdgeMask || NumIncoming == 1) && 8186 "Multiple predecessors with one having a full mask"); 8187 Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In))); 8188 if (EdgeMask) 8189 Operands.push_back(EdgeMask); 8190 } 8191 return new VPBlendRecipe(Phi, Operands); 8192 } 8193 8194 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range, 8195 VPlan &Plan) const { 8196 8197 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8198 [this, CI](ElementCount VF) { 8199 return CM.isScalarWithPredication(CI, VF); 8200 }, 8201 Range); 8202 8203 if (IsPredicated) 8204 return nullptr; 8205 8206 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8207 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8208 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8209 ID == Intrinsic::pseudoprobe)) 8210 return nullptr; 8211 8212 auto willWiden = [&](ElementCount VF) -> bool { 8213 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8214 // The following case may be scalarized depending on the VF. 8215 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8216 // version of the instruction. 8217 // Is it beneficial to perform intrinsic call compared to lib call? 8218 bool NeedToScalarize = false; 8219 unsigned CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8220 bool UseVectorIntrinsic = 8221 ID && CM.getVectorIntrinsicCost(CI, VF) <= CallCost; 8222 return UseVectorIntrinsic || !NeedToScalarize; 8223 }; 8224 8225 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8226 return nullptr; 8227 8228 return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands())); 8229 } 8230 8231 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8232 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8233 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8234 // Instruction should be widened, unless it is scalar after vectorization, 8235 // scalarization is profitable or it is predicated. 8236 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8237 return CM.isScalarAfterVectorization(I, VF) || 8238 CM.isProfitableToScalarize(I, VF) || 8239 CM.isScalarWithPredication(I, VF); 8240 }; 8241 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8242 Range); 8243 } 8244 8245 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const { 8246 auto IsVectorizableOpcode = [](unsigned Opcode) { 8247 switch (Opcode) { 8248 case Instruction::Add: 8249 case Instruction::And: 8250 case Instruction::AShr: 8251 case Instruction::BitCast: 8252 case Instruction::FAdd: 8253 case Instruction::FCmp: 8254 case Instruction::FDiv: 8255 case Instruction::FMul: 8256 case Instruction::FNeg: 8257 case Instruction::FPExt: 8258 case Instruction::FPToSI: 8259 case Instruction::FPToUI: 8260 case Instruction::FPTrunc: 8261 case Instruction::FRem: 8262 case Instruction::FSub: 8263 case Instruction::ICmp: 8264 case Instruction::IntToPtr: 8265 case Instruction::LShr: 8266 case Instruction::Mul: 8267 case Instruction::Or: 8268 case Instruction::PtrToInt: 8269 case Instruction::SDiv: 8270 case Instruction::Select: 8271 case Instruction::SExt: 8272 case Instruction::Shl: 8273 case Instruction::SIToFP: 8274 case Instruction::SRem: 8275 case Instruction::Sub: 8276 case Instruction::Trunc: 8277 case Instruction::UDiv: 8278 case Instruction::UIToFP: 8279 case Instruction::URem: 8280 case Instruction::Xor: 8281 case Instruction::ZExt: 8282 return true; 8283 } 8284 return false; 8285 }; 8286 8287 if (!IsVectorizableOpcode(I->getOpcode())) 8288 return nullptr; 8289 8290 // Success: widen this instruction. 8291 return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands())); 8292 } 8293 8294 VPBasicBlock *VPRecipeBuilder::handleReplication( 8295 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8296 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 8297 VPlanPtr &Plan) { 8298 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8299 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8300 Range); 8301 8302 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8303 [&](ElementCount VF) { return CM.isScalarWithPredication(I, VF); }, 8304 Range); 8305 8306 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8307 IsUniform, IsPredicated); 8308 setRecipe(I, Recipe); 8309 Plan->addVPValue(I, Recipe); 8310 8311 // Find if I uses a predicated instruction. If so, it will use its scalar 8312 // value. Avoid hoisting the insert-element which packs the scalar value into 8313 // a vector value, as that happens iff all users use the vector value. 8314 for (auto &Op : I->operands()) 8315 if (auto *PredInst = dyn_cast<Instruction>(Op)) 8316 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 8317 PredInst2Recipe[PredInst]->setAlsoPack(false); 8318 8319 // Finalize the recipe for Instr, first if it is not predicated. 8320 if (!IsPredicated) { 8321 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8322 VPBB->appendRecipe(Recipe); 8323 return VPBB; 8324 } 8325 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8326 assert(VPBB->getSuccessors().empty() && 8327 "VPBB has successors when handling predicated replication."); 8328 // Record predicated instructions for above packing optimizations. 8329 PredInst2Recipe[I] = Recipe; 8330 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8331 VPBlockUtils::insertBlockAfter(Region, VPBB); 8332 auto *RegSucc = new VPBasicBlock(); 8333 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8334 return RegSucc; 8335 } 8336 8337 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8338 VPRecipeBase *PredRecipe, 8339 VPlanPtr &Plan) { 8340 // Instructions marked for predication are replicated and placed under an 8341 // if-then construct to prevent side-effects. 8342 8343 // Generate recipes to compute the block mask for this region. 8344 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8345 8346 // Build the triangular if-then region. 8347 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8348 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8349 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8350 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8351 auto *PHIRecipe = Instr->getType()->isVoidTy() 8352 ? nullptr 8353 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8354 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8355 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8356 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8357 8358 // Note: first set Entry as region entry and then connect successors starting 8359 // from it in order, to propagate the "parent" of each VPBasicBlock. 8360 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8361 VPBlockUtils::connectBlocks(Pred, Exit); 8362 8363 return Region; 8364 } 8365 8366 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8367 VFRange &Range, 8368 VPlanPtr &Plan) { 8369 // First, check for specific widening recipes that deal with calls, memory 8370 // operations, inductions and Phi nodes. 8371 if (auto *CI = dyn_cast<CallInst>(Instr)) 8372 return tryToWidenCall(CI, Range, *Plan); 8373 8374 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8375 return tryToWidenMemory(Instr, Range, Plan); 8376 8377 VPRecipeBase *Recipe; 8378 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8379 if (Phi->getParent() != OrigLoop->getHeader()) 8380 return tryToBlend(Phi, Plan); 8381 if ((Recipe = tryToOptimizeInductionPHI(Phi, *Plan))) 8382 return Recipe; 8383 8384 if (Legal->isReductionVariable(Phi)) { 8385 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 8386 return new VPWidenPHIRecipe(Phi, RdxDesc); 8387 } 8388 8389 return new VPWidenPHIRecipe(Phi); 8390 } 8391 8392 if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate( 8393 cast<TruncInst>(Instr), Range, *Plan))) 8394 return Recipe; 8395 8396 if (!shouldWiden(Instr, Range)) 8397 return nullptr; 8398 8399 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8400 return new VPWidenGEPRecipe(GEP, Plan->mapToVPValues(GEP->operands()), 8401 OrigLoop); 8402 8403 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8404 bool InvariantCond = 8405 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8406 return new VPWidenSelectRecipe(*SI, Plan->mapToVPValues(SI->operands()), 8407 InvariantCond); 8408 } 8409 8410 return tryToWiden(Instr, *Plan); 8411 } 8412 8413 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8414 ElementCount MaxVF) { 8415 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8416 8417 // Collect instructions from the original loop that will become trivially dead 8418 // in the vectorized loop. We don't need to vectorize these instructions. For 8419 // example, original induction update instructions can become dead because we 8420 // separately emit induction "steps" when generating code for the new loop. 8421 // Similarly, we create a new latch condition when setting up the structure 8422 // of the new loop, so the old one can become dead. 8423 SmallPtrSet<Instruction *, 4> DeadInstructions; 8424 collectTriviallyDeadInstructions(DeadInstructions); 8425 8426 // Add assume instructions we need to drop to DeadInstructions, to prevent 8427 // them from being added to the VPlan. 8428 // TODO: We only need to drop assumes in blocks that get flattend. If the 8429 // control flow is preserved, we should keep them. 8430 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8431 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8432 8433 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8434 // Dead instructions do not need sinking. Remove them from SinkAfter. 8435 for (Instruction *I : DeadInstructions) 8436 SinkAfter.erase(I); 8437 8438 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8439 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8440 VFRange SubRange = {VF, MaxVFPlusOne}; 8441 VPlans.push_back( 8442 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8443 VF = SubRange.End; 8444 } 8445 } 8446 8447 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8448 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8449 const DenseMap<Instruction *, Instruction *> &SinkAfter) { 8450 8451 // Hold a mapping from predicated instructions to their recipes, in order to 8452 // fix their AlsoPack behavior if a user is determined to replicate and use a 8453 // scalar instead of vector value. 8454 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 8455 8456 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8457 8458 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8459 8460 // --------------------------------------------------------------------------- 8461 // Pre-construction: record ingredients whose recipes we'll need to further 8462 // process after constructing the initial VPlan. 8463 // --------------------------------------------------------------------------- 8464 8465 // Mark instructions we'll need to sink later and their targets as 8466 // ingredients whose recipe we'll need to record. 8467 for (auto &Entry : SinkAfter) { 8468 RecipeBuilder.recordRecipeOf(Entry.first); 8469 RecipeBuilder.recordRecipeOf(Entry.second); 8470 } 8471 for (auto &Reduction : CM.getInLoopReductionChains()) { 8472 PHINode *Phi = Reduction.first; 8473 RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); 8474 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8475 8476 RecipeBuilder.recordRecipeOf(Phi); 8477 for (auto &R : ReductionOperations) { 8478 RecipeBuilder.recordRecipeOf(R); 8479 // For min/max reducitons, where we have a pair of icmp/select, we also 8480 // need to record the ICmp recipe, so it can be removed later. 8481 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8482 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8483 } 8484 } 8485 8486 // For each interleave group which is relevant for this (possibly trimmed) 8487 // Range, add it to the set of groups to be later applied to the VPlan and add 8488 // placeholders for its members' Recipes which we'll be replacing with a 8489 // single VPInterleaveRecipe. 8490 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8491 auto applyIG = [IG, this](ElementCount VF) -> bool { 8492 return (VF.isVector() && // Query is illegal for VF == 1 8493 CM.getWideningDecision(IG->getInsertPos(), VF) == 8494 LoopVectorizationCostModel::CM_Interleave); 8495 }; 8496 if (!getDecisionAndClampRange(applyIG, Range)) 8497 continue; 8498 InterleaveGroups.insert(IG); 8499 for (unsigned i = 0; i < IG->getFactor(); i++) 8500 if (Instruction *Member = IG->getMember(i)) 8501 RecipeBuilder.recordRecipeOf(Member); 8502 }; 8503 8504 // --------------------------------------------------------------------------- 8505 // Build initial VPlan: Scan the body of the loop in a topological order to 8506 // visit each basic block after having visited its predecessor basic blocks. 8507 // --------------------------------------------------------------------------- 8508 8509 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 8510 auto Plan = std::make_unique<VPlan>(); 8511 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 8512 Plan->setEntry(VPBB); 8513 8514 // Scan the body of the loop in a topological order to visit each basic block 8515 // after having visited its predecessor basic blocks. 8516 LoopBlocksDFS DFS(OrigLoop); 8517 DFS.perform(LI); 8518 8519 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8520 // Relevant instructions from basic block BB will be grouped into VPRecipe 8521 // ingredients and fill a new VPBasicBlock. 8522 unsigned VPBBsForBB = 0; 8523 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 8524 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 8525 VPBB = FirstVPBBForBB; 8526 Builder.setInsertPoint(VPBB); 8527 8528 // Introduce each ingredient into VPlan. 8529 // TODO: Model and preserve debug instrinsics in VPlan. 8530 for (Instruction &I : BB->instructionsWithoutDebug()) { 8531 Instruction *Instr = &I; 8532 8533 // First filter out irrelevant instructions, to ensure no recipes are 8534 // built for them. 8535 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 8536 continue; 8537 8538 if (auto Recipe = 8539 RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) { 8540 for (auto *Def : Recipe->definedValues()) { 8541 auto *UV = Def->getUnderlyingValue(); 8542 Plan->addVPValue(UV, Def); 8543 } 8544 8545 RecipeBuilder.setRecipe(Instr, Recipe); 8546 VPBB->appendRecipe(Recipe); 8547 continue; 8548 } 8549 8550 // Otherwise, if all widening options failed, Instruction is to be 8551 // replicated. This may create a successor for VPBB. 8552 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication( 8553 Instr, Range, VPBB, PredInst2Recipe, Plan); 8554 if (NextVPBB != VPBB) { 8555 VPBB = NextVPBB; 8556 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8557 : ""); 8558 } 8559 } 8560 } 8561 8562 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 8563 // may also be empty, such as the last one VPBB, reflecting original 8564 // basic-blocks with no recipes. 8565 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 8566 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 8567 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 8568 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 8569 delete PreEntry; 8570 8571 // --------------------------------------------------------------------------- 8572 // Transform initial VPlan: Apply previously taken decisions, in order, to 8573 // bring the VPlan to its final state. 8574 // --------------------------------------------------------------------------- 8575 8576 // Apply Sink-After legal constraints. 8577 for (auto &Entry : SinkAfter) { 8578 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 8579 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 8580 // If the target is in a replication region, make sure to move Sink to the 8581 // block after it, not into the replication region itself. 8582 if (auto *Region = 8583 dyn_cast_or_null<VPRegionBlock>(Target->getParent()->getParent())) { 8584 if (Region->isReplicator()) { 8585 assert(Region->getNumSuccessors() == 1 && "Expected SESE region!"); 8586 VPBasicBlock *NextBlock = 8587 cast<VPBasicBlock>(Region->getSuccessors().front()); 8588 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 8589 continue; 8590 } 8591 } 8592 Sink->moveAfter(Target); 8593 } 8594 8595 // Interleave memory: for each Interleave Group we marked earlier as relevant 8596 // for this VPlan, replace the Recipes widening its memory instructions with a 8597 // single VPInterleaveRecipe at its insertion point. 8598 for (auto IG : InterleaveGroups) { 8599 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 8600 RecipeBuilder.getRecipe(IG->getInsertPos())); 8601 SmallVector<VPValue *, 4> StoredValues; 8602 for (unsigned i = 0; i < IG->getFactor(); ++i) 8603 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) 8604 StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0))); 8605 8606 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 8607 Recipe->getMask()); 8608 VPIG->insertBefore(Recipe); 8609 unsigned J = 0; 8610 for (unsigned i = 0; i < IG->getFactor(); ++i) 8611 if (Instruction *Member = IG->getMember(i)) { 8612 if (!Member->getType()->isVoidTy()) { 8613 VPValue *OriginalV = Plan->getVPValue(Member); 8614 Plan->removeVPValueFor(Member); 8615 Plan->addVPValue(Member, VPIG->getVPValue(J)); 8616 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 8617 J++; 8618 } 8619 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 8620 } 8621 } 8622 8623 // Adjust the recipes for any inloop reductions. 8624 if (Range.Start.isVector()) 8625 adjustRecipesForInLoopReductions(Plan, RecipeBuilder); 8626 8627 // Finally, if tail is folded by masking, introduce selects between the phi 8628 // and the live-out instruction of each reduction, at the end of the latch. 8629 if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) { 8630 Builder.setInsertPoint(VPBB); 8631 auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 8632 for (auto &Reduction : Legal->getReductionVars()) { 8633 if (CM.isInLoopReduction(Reduction.first)) 8634 continue; 8635 VPValue *Phi = Plan->getOrAddVPValue(Reduction.first); 8636 VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr()); 8637 Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi}); 8638 } 8639 } 8640 8641 std::string PlanName; 8642 raw_string_ostream RSO(PlanName); 8643 ElementCount VF = Range.Start; 8644 Plan->addVF(VF); 8645 RSO << "Initial VPlan for VF={" << VF; 8646 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 8647 Plan->addVF(VF); 8648 RSO << "," << VF; 8649 } 8650 RSO << "},UF>=1"; 8651 RSO.flush(); 8652 Plan->setName(PlanName); 8653 8654 return Plan; 8655 } 8656 8657 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 8658 // Outer loop handling: They may require CFG and instruction level 8659 // transformations before even evaluating whether vectorization is profitable. 8660 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 8661 // the vectorization pipeline. 8662 assert(!OrigLoop->isInnermost()); 8663 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 8664 8665 // Create new empty VPlan 8666 auto Plan = std::make_unique<VPlan>(); 8667 8668 // Build hierarchical CFG 8669 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 8670 HCFGBuilder.buildHierarchicalCFG(); 8671 8672 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 8673 VF *= 2) 8674 Plan->addVF(VF); 8675 8676 if (EnableVPlanPredication) { 8677 VPlanPredicator VPP(*Plan); 8678 VPP.predicate(); 8679 8680 // Avoid running transformation to recipes until masked code generation in 8681 // VPlan-native path is in place. 8682 return Plan; 8683 } 8684 8685 SmallPtrSet<Instruction *, 1> DeadInstructions; 8686 VPlanTransforms::VPInstructionsToVPRecipes( 8687 OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions); 8688 return Plan; 8689 } 8690 8691 // Adjust the recipes for any inloop reductions. The chain of instructions 8692 // leading from the loop exit instr to the phi need to be converted to 8693 // reductions, with one operand being vector and the other being the scalar 8694 // reduction chain. 8695 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions( 8696 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) { 8697 for (auto &Reduction : CM.getInLoopReductionChains()) { 8698 PHINode *Phi = Reduction.first; 8699 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 8700 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8701 8702 // ReductionOperations are orders top-down from the phi's use to the 8703 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 8704 // which of the two operands will remain scalar and which will be reduced. 8705 // For minmax the chain will be the select instructions. 8706 Instruction *Chain = Phi; 8707 for (Instruction *R : ReductionOperations) { 8708 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 8709 RecurKind Kind = RdxDesc.getRecurrenceKind(); 8710 8711 VPValue *ChainOp = Plan->getVPValue(Chain); 8712 unsigned FirstOpId; 8713 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 8714 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 8715 "Expected to replace a VPWidenSelectSC"); 8716 FirstOpId = 1; 8717 } else { 8718 assert(isa<VPWidenRecipe>(WidenRecipe) && 8719 "Expected to replace a VPWidenSC"); 8720 FirstOpId = 0; 8721 } 8722 unsigned VecOpId = 8723 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 8724 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 8725 8726 auto *CondOp = CM.foldTailByMasking() 8727 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 8728 : nullptr; 8729 VPReductionRecipe *RedRecipe = new VPReductionRecipe( 8730 &RdxDesc, R, ChainOp, VecOp, CondOp, Legal->hasFunNoNaNAttr(), TTI); 8731 WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe); 8732 Plan->removeVPValueFor(R); 8733 Plan->addVPValue(R, RedRecipe); 8734 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 8735 WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe); 8736 WidenRecipe->eraseFromParent(); 8737 8738 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 8739 VPRecipeBase *CompareRecipe = 8740 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 8741 assert(isa<VPWidenRecipe>(CompareRecipe) && 8742 "Expected to replace a VPWidenSC"); 8743 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 8744 "Expected no remaining users"); 8745 CompareRecipe->eraseFromParent(); 8746 } 8747 Chain = R; 8748 } 8749 } 8750 } 8751 8752 Value* LoopVectorizationPlanner::VPCallbackILV:: 8753 getOrCreateVectorValues(Value *V, unsigned Part) { 8754 return ILV.getOrCreateVectorValue(V, Part); 8755 } 8756 8757 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue( 8758 Value *V, const VPIteration &Instance) { 8759 return ILV.getOrCreateScalarValue(V, Instance); 8760 } 8761 8762 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 8763 VPSlotTracker &SlotTracker) const { 8764 O << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 8765 IG->getInsertPos()->printAsOperand(O, false); 8766 O << ", "; 8767 getAddr()->printAsOperand(O, SlotTracker); 8768 VPValue *Mask = getMask(); 8769 if (Mask) { 8770 O << ", "; 8771 Mask->printAsOperand(O, SlotTracker); 8772 } 8773 for (unsigned i = 0; i < IG->getFactor(); ++i) 8774 if (Instruction *I = IG->getMember(i)) 8775 O << "\\l\" +\n" << Indent << "\" " << VPlanIngredient(I) << " " << i; 8776 } 8777 8778 void VPWidenCallRecipe::execute(VPTransformState &State) { 8779 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 8780 *this, State); 8781 } 8782 8783 void VPWidenSelectRecipe::execute(VPTransformState &State) { 8784 State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), 8785 this, *this, InvariantCond, State); 8786 } 8787 8788 void VPWidenRecipe::execute(VPTransformState &State) { 8789 State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State); 8790 } 8791 8792 void VPWidenGEPRecipe::execute(VPTransformState &State) { 8793 State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, 8794 *this, State.UF, State.VF, IsPtrLoopInvariant, 8795 IsIndexLoopInvariant, State); 8796 } 8797 8798 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 8799 assert(!State.Instance && "Int or FP induction being replicated."); 8800 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 8801 Trunc); 8802 } 8803 8804 void VPWidenPHIRecipe::execute(VPTransformState &State) { 8805 Value *StartV = nullptr; 8806 if (RdxDesc) 8807 StartV = RdxDesc->getRecurrenceStartValue(); 8808 State.ILV->widenPHIInstruction(Phi, RdxDesc, StartV, State.UF, State.VF); 8809 } 8810 8811 void VPBlendRecipe::execute(VPTransformState &State) { 8812 State.ILV->setDebugLocFromInst(State.Builder, Phi); 8813 // We know that all PHIs in non-header blocks are converted into 8814 // selects, so we don't have to worry about the insertion order and we 8815 // can just use the builder. 8816 // At this point we generate the predication tree. There may be 8817 // duplications since this is a simple recursive scan, but future 8818 // optimizations will clean it up. 8819 8820 unsigned NumIncoming = getNumIncomingValues(); 8821 8822 // Generate a sequence of selects of the form: 8823 // SELECT(Mask3, In3, 8824 // SELECT(Mask2, In2, 8825 // SELECT(Mask1, In1, 8826 // In0))) 8827 // Note that Mask0 is never used: lanes for which no path reaches this phi and 8828 // are essentially undef are taken from In0. 8829 InnerLoopVectorizer::VectorParts Entry(State.UF); 8830 for (unsigned In = 0; In < NumIncoming; ++In) { 8831 for (unsigned Part = 0; Part < State.UF; ++Part) { 8832 // We might have single edge PHIs (blocks) - use an identity 8833 // 'select' for the first PHI operand. 8834 Value *In0 = State.get(getIncomingValue(In), Part); 8835 if (In == 0) 8836 Entry[Part] = In0; // Initialize with the first incoming value. 8837 else { 8838 // Select between the current value and the previous incoming edge 8839 // based on the incoming mask. 8840 Value *Cond = State.get(getMask(In), Part); 8841 Entry[Part] = 8842 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 8843 } 8844 } 8845 } 8846 for (unsigned Part = 0; Part < State.UF; ++Part) 8847 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 8848 } 8849 8850 void VPInterleaveRecipe::execute(VPTransformState &State) { 8851 assert(!State.Instance && "Interleave group being replicated."); 8852 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 8853 getStoredValues(), getMask()); 8854 } 8855 8856 void VPReductionRecipe::execute(VPTransformState &State) { 8857 assert(!State.Instance && "Reduction being replicated."); 8858 for (unsigned Part = 0; Part < State.UF; ++Part) { 8859 RecurKind Kind = RdxDesc->getRecurrenceKind(); 8860 Value *NewVecOp = State.get(getVecOp(), Part); 8861 if (VPValue *Cond = getCondOp()) { 8862 Value *NewCond = State.get(Cond, Part); 8863 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 8864 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 8865 Kind, VecTy->getElementType()); 8866 Constant *IdenVec = 8867 ConstantVector::getSplat(VecTy->getElementCount(), Iden); 8868 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 8869 NewVecOp = Select; 8870 } 8871 Value *NewRed = 8872 createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 8873 Value *PrevInChain = State.get(getChainOp(), Part); 8874 Value *NextInChain; 8875 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 8876 NextInChain = 8877 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 8878 NewRed, PrevInChain); 8879 } else { 8880 NextInChain = State.Builder.CreateBinOp( 8881 (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed, 8882 PrevInChain); 8883 } 8884 State.set(this, getUnderlyingInstr(), NextInChain, Part); 8885 } 8886 } 8887 8888 void VPReplicateRecipe::execute(VPTransformState &State) { 8889 if (State.Instance) { // Generate a single instance. 8890 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 8891 State.ILV->scalarizeInstruction(getUnderlyingInstr(), *this, 8892 *State.Instance, IsPredicated, State); 8893 // Insert scalar instance packing it into a vector. 8894 if (AlsoPack && State.VF.isVector()) { 8895 // If we're constructing lane 0, initialize to start from poison. 8896 if (State.Instance->Lane == 0) { 8897 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 8898 Value *Poison = PoisonValue::get( 8899 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 8900 State.ValueMap.setVectorValue(getUnderlyingInstr(), 8901 State.Instance->Part, Poison); 8902 } 8903 State.ILV->packScalarIntoVectorValue(getUnderlyingInstr(), 8904 *State.Instance); 8905 } 8906 return; 8907 } 8908 8909 // Generate scalar instances for all VF lanes of all UF parts, unless the 8910 // instruction is uniform inwhich case generate only the first lane for each 8911 // of the UF parts. 8912 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 8913 assert((!State.VF.isScalable() || IsUniform) && 8914 "Can't scalarize a scalable vector"); 8915 for (unsigned Part = 0; Part < State.UF; ++Part) 8916 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 8917 State.ILV->scalarizeInstruction(getUnderlyingInstr(), *this, {Part, Lane}, 8918 IsPredicated, State); 8919 } 8920 8921 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 8922 assert(State.Instance && "Branch on Mask works only on single instance."); 8923 8924 unsigned Part = State.Instance->Part; 8925 unsigned Lane = State.Instance->Lane; 8926 8927 Value *ConditionBit = nullptr; 8928 VPValue *BlockInMask = getMask(); 8929 if (BlockInMask) { 8930 ConditionBit = State.get(BlockInMask, Part); 8931 if (ConditionBit->getType()->isVectorTy()) 8932 ConditionBit = State.Builder.CreateExtractElement( 8933 ConditionBit, State.Builder.getInt32(Lane)); 8934 } else // Block in mask is all-one. 8935 ConditionBit = State.Builder.getTrue(); 8936 8937 // Replace the temporary unreachable terminator with a new conditional branch, 8938 // whose two destinations will be set later when they are created. 8939 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 8940 assert(isa<UnreachableInst>(CurrentTerminator) && 8941 "Expected to replace unreachable terminator with conditional branch."); 8942 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 8943 CondBr->setSuccessor(0, nullptr); 8944 ReplaceInstWithInst(CurrentTerminator, CondBr); 8945 } 8946 8947 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 8948 assert(State.Instance && "Predicated instruction PHI works per instance."); 8949 Instruction *ScalarPredInst = 8950 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 8951 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 8952 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 8953 assert(PredicatingBB && "Predicated block has no single predecessor."); 8954 8955 // By current pack/unpack logic we need to generate only a single phi node: if 8956 // a vector value for the predicated instruction exists at this point it means 8957 // the instruction has vector users only, and a phi for the vector value is 8958 // needed. In this case the recipe of the predicated instruction is marked to 8959 // also do that packing, thereby "hoisting" the insert-element sequence. 8960 // Otherwise, a phi node for the scalar value is needed. 8961 unsigned Part = State.Instance->Part; 8962 Instruction *PredInst = 8963 cast<Instruction>(getOperand(0)->getUnderlyingValue()); 8964 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 8965 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 8966 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 8967 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 8968 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 8969 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 8970 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 8971 } else { 8972 Type *PredInstType = PredInst->getType(); 8973 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 8974 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), PredicatingBB); 8975 Phi->addIncoming(ScalarPredInst, PredicatedBB); 8976 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 8977 } 8978 } 8979 8980 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 8981 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 8982 State.ILV->vectorizeMemoryInstruction(&Ingredient, State, 8983 StoredValue ? nullptr : getVPValue(), 8984 getAddr(), StoredValue, getMask()); 8985 } 8986 8987 // Determine how to lower the scalar epilogue, which depends on 1) optimising 8988 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 8989 // predication, and 4) a TTI hook that analyses whether the loop is suitable 8990 // for predication. 8991 static ScalarEpilogueLowering getScalarEpilogueLowering( 8992 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 8993 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 8994 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 8995 LoopVectorizationLegality &LVL) { 8996 // 1) OptSize takes precedence over all other options, i.e. if this is set, 8997 // don't look at hints or options, and don't request a scalar epilogue. 8998 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 8999 // LoopAccessInfo (due to code dependency and not being able to reliably get 9000 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 9001 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 9002 // versioning when the vectorization is forced, unlike hasOptSize. So revert 9003 // back to the old way and vectorize with versioning when forced. See D81345.) 9004 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 9005 PGSOQueryType::IRPass) && 9006 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 9007 return CM_ScalarEpilogueNotAllowedOptSize; 9008 9009 // 2) If set, obey the directives 9010 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 9011 switch (PreferPredicateOverEpilogue) { 9012 case PreferPredicateTy::ScalarEpilogue: 9013 return CM_ScalarEpilogueAllowed; 9014 case PreferPredicateTy::PredicateElseScalarEpilogue: 9015 return CM_ScalarEpilogueNotNeededUsePredicate; 9016 case PreferPredicateTy::PredicateOrDontVectorize: 9017 return CM_ScalarEpilogueNotAllowedUsePredicate; 9018 }; 9019 } 9020 9021 // 3) If set, obey the hints 9022 switch (Hints.getPredicate()) { 9023 case LoopVectorizeHints::FK_Enabled: 9024 return CM_ScalarEpilogueNotNeededUsePredicate; 9025 case LoopVectorizeHints::FK_Disabled: 9026 return CM_ScalarEpilogueAllowed; 9027 }; 9028 9029 // 4) if the TTI hook indicates this is profitable, request predication. 9030 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 9031 LVL.getLAI())) 9032 return CM_ScalarEpilogueNotNeededUsePredicate; 9033 9034 return CM_ScalarEpilogueAllowed; 9035 } 9036 9037 void VPTransformState::set(VPValue *Def, Value *IRDef, Value *V, 9038 unsigned Part) { 9039 set(Def, V, Part); 9040 ILV->setVectorValue(IRDef, Part, V); 9041 } 9042 9043 // Process the loop in the VPlan-native vectorization path. This path builds 9044 // VPlan upfront in the vectorization pipeline, which allows to apply 9045 // VPlan-to-VPlan transformations from the very beginning without modifying the 9046 // input LLVM IR. 9047 static bool processLoopInVPlanNativePath( 9048 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 9049 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 9050 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 9051 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 9052 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) { 9053 9054 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 9055 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 9056 return false; 9057 } 9058 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 9059 Function *F = L->getHeader()->getParent(); 9060 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 9061 9062 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9063 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 9064 9065 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 9066 &Hints, IAI); 9067 // Use the planner for outer loop vectorization. 9068 // TODO: CM is not used at this point inside the planner. Turn CM into an 9069 // optional argument if we don't need it in the future. 9070 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE); 9071 9072 // Get user vectorization factor. 9073 ElementCount UserVF = Hints.getWidth(); 9074 9075 // Plan how to best vectorize, return the best VF and its cost. 9076 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 9077 9078 // If we are stress testing VPlan builds, do not attempt to generate vector 9079 // code. Masked vector code generation support will follow soon. 9080 // Also, do not attempt to vectorize if no vector code will be produced. 9081 if (VPlanBuildStressTest || EnableVPlanPredication || 9082 VectorizationFactor::Disabled() == VF) 9083 return false; 9084 9085 LVP.setBestPlan(VF.Width, 1); 9086 9087 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 9088 &CM, BFI, PSI); 9089 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 9090 << L->getHeader()->getParent()->getName() << "\"\n"); 9091 LVP.executePlan(LB, DT); 9092 9093 // Mark the loop as already vectorized to avoid vectorizing again. 9094 Hints.setAlreadyVectorized(); 9095 9096 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9097 return true; 9098 } 9099 9100 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 9101 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 9102 !EnableLoopInterleaving), 9103 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 9104 !EnableLoopVectorization) {} 9105 9106 bool LoopVectorizePass::processLoop(Loop *L) { 9107 assert((EnableVPlanNativePath || L->isInnermost()) && 9108 "VPlan-native path is not enabled. Only process inner loops."); 9109 9110 #ifndef NDEBUG 9111 const std::string DebugLocStr = getDebugLocString(L); 9112 #endif /* NDEBUG */ 9113 9114 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 9115 << L->getHeader()->getParent()->getName() << "\" from " 9116 << DebugLocStr << "\n"); 9117 9118 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 9119 9120 LLVM_DEBUG( 9121 dbgs() << "LV: Loop hints:" 9122 << " force=" 9123 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 9124 ? "disabled" 9125 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 9126 ? "enabled" 9127 : "?")) 9128 << " width=" << Hints.getWidth() 9129 << " unroll=" << Hints.getInterleave() << "\n"); 9130 9131 // Function containing loop 9132 Function *F = L->getHeader()->getParent(); 9133 9134 // Looking at the diagnostic output is the only way to determine if a loop 9135 // was vectorized (other than looking at the IR or machine code), so it 9136 // is important to generate an optimization remark for each loop. Most of 9137 // these messages are generated as OptimizationRemarkAnalysis. Remarks 9138 // generated as OptimizationRemark and OptimizationRemarkMissed are 9139 // less verbose reporting vectorized loops and unvectorized loops that may 9140 // benefit from vectorization, respectively. 9141 9142 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 9143 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 9144 return false; 9145 } 9146 9147 PredicatedScalarEvolution PSE(*SE, *L); 9148 9149 // Check if it is legal to vectorize the loop. 9150 LoopVectorizationRequirements Requirements(*ORE); 9151 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 9152 &Requirements, &Hints, DB, AC, BFI, PSI); 9153 if (!LVL.canVectorize(EnableVPlanNativePath)) { 9154 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 9155 Hints.emitRemarkWithHints(); 9156 return false; 9157 } 9158 9159 // Check the function attributes and profiles to find out if this function 9160 // should be optimized for size. 9161 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9162 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 9163 9164 // Entrance to the VPlan-native vectorization path. Outer loops are processed 9165 // here. They may require CFG and instruction level transformations before 9166 // even evaluating whether vectorization is profitable. Since we cannot modify 9167 // the incoming IR, we need to build VPlan upfront in the vectorization 9168 // pipeline. 9169 if (!L->isInnermost()) 9170 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 9171 ORE, BFI, PSI, Hints); 9172 9173 assert(L->isInnermost() && "Inner loop expected."); 9174 9175 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 9176 // count by optimizing for size, to minimize overheads. 9177 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 9178 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 9179 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 9180 << "This loop is worth vectorizing only if no scalar " 9181 << "iteration overheads are incurred."); 9182 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 9183 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 9184 else { 9185 LLVM_DEBUG(dbgs() << "\n"); 9186 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 9187 } 9188 } 9189 9190 // Check the function attributes to see if implicit floats are allowed. 9191 // FIXME: This check doesn't seem possibly correct -- what if the loop is 9192 // an integer loop and the vector instructions selected are purely integer 9193 // vector instructions? 9194 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 9195 reportVectorizationFailure( 9196 "Can't vectorize when the NoImplicitFloat attribute is used", 9197 "loop not vectorized due to NoImplicitFloat attribute", 9198 "NoImplicitFloat", ORE, L); 9199 Hints.emitRemarkWithHints(); 9200 return false; 9201 } 9202 9203 // Check if the target supports potentially unsafe FP vectorization. 9204 // FIXME: Add a check for the type of safety issue (denormal, signaling) 9205 // for the target we're vectorizing for, to make sure none of the 9206 // additional fp-math flags can help. 9207 if (Hints.isPotentiallyUnsafe() && 9208 TTI->isFPVectorizationPotentiallyUnsafe()) { 9209 reportVectorizationFailure( 9210 "Potentially unsafe FP op prevents vectorization", 9211 "loop not vectorized due to unsafe FP support.", 9212 "UnsafeFP", ORE, L); 9213 Hints.emitRemarkWithHints(); 9214 return false; 9215 } 9216 9217 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 9218 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 9219 9220 // If an override option has been passed in for interleaved accesses, use it. 9221 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 9222 UseInterleaved = EnableInterleavedMemAccesses; 9223 9224 // Analyze interleaved memory accesses. 9225 if (UseInterleaved) { 9226 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 9227 } 9228 9229 // Use the cost model. 9230 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 9231 F, &Hints, IAI); 9232 CM.collectValuesToIgnore(); 9233 9234 // Use the planner for vectorization. 9235 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE); 9236 9237 // Get user vectorization factor and interleave count. 9238 ElementCount UserVF = Hints.getWidth(); 9239 unsigned UserIC = Hints.getInterleave(); 9240 9241 // Plan how to best vectorize, return the best VF and its cost. 9242 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 9243 9244 VectorizationFactor VF = VectorizationFactor::Disabled(); 9245 unsigned IC = 1; 9246 9247 if (MaybeVF) { 9248 VF = *MaybeVF; 9249 // Select the interleave count. 9250 IC = CM.selectInterleaveCount(VF.Width, VF.Cost); 9251 } 9252 9253 // Identify the diagnostic messages that should be produced. 9254 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 9255 bool VectorizeLoop = true, InterleaveLoop = true; 9256 if (Requirements.doesNotMeet(F, L, Hints)) { 9257 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 9258 "requirements.\n"); 9259 Hints.emitRemarkWithHints(); 9260 return false; 9261 } 9262 9263 if (VF.Width.isScalar()) { 9264 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 9265 VecDiagMsg = std::make_pair( 9266 "VectorizationNotBeneficial", 9267 "the cost-model indicates that vectorization is not beneficial"); 9268 VectorizeLoop = false; 9269 } 9270 9271 if (!MaybeVF && UserIC > 1) { 9272 // Tell the user interleaving was avoided up-front, despite being explicitly 9273 // requested. 9274 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 9275 "interleaving should be avoided up front\n"); 9276 IntDiagMsg = std::make_pair( 9277 "InterleavingAvoided", 9278 "Ignoring UserIC, because interleaving was avoided up front"); 9279 InterleaveLoop = false; 9280 } else if (IC == 1 && UserIC <= 1) { 9281 // Tell the user interleaving is not beneficial. 9282 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 9283 IntDiagMsg = std::make_pair( 9284 "InterleavingNotBeneficial", 9285 "the cost-model indicates that interleaving is not beneficial"); 9286 InterleaveLoop = false; 9287 if (UserIC == 1) { 9288 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 9289 IntDiagMsg.second += 9290 " and is explicitly disabled or interleave count is set to 1"; 9291 } 9292 } else if (IC > 1 && UserIC == 1) { 9293 // Tell the user interleaving is beneficial, but it explicitly disabled. 9294 LLVM_DEBUG( 9295 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 9296 IntDiagMsg = std::make_pair( 9297 "InterleavingBeneficialButDisabled", 9298 "the cost-model indicates that interleaving is beneficial " 9299 "but is explicitly disabled or interleave count is set to 1"); 9300 InterleaveLoop = false; 9301 } 9302 9303 // Override IC if user provided an interleave count. 9304 IC = UserIC > 0 ? UserIC : IC; 9305 9306 // Emit diagnostic messages, if any. 9307 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 9308 if (!VectorizeLoop && !InterleaveLoop) { 9309 // Do not vectorize or interleaving the loop. 9310 ORE->emit([&]() { 9311 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 9312 L->getStartLoc(), L->getHeader()) 9313 << VecDiagMsg.second; 9314 }); 9315 ORE->emit([&]() { 9316 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 9317 L->getStartLoc(), L->getHeader()) 9318 << IntDiagMsg.second; 9319 }); 9320 return false; 9321 } else if (!VectorizeLoop && InterleaveLoop) { 9322 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 9323 ORE->emit([&]() { 9324 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 9325 L->getStartLoc(), L->getHeader()) 9326 << VecDiagMsg.second; 9327 }); 9328 } else if (VectorizeLoop && !InterleaveLoop) { 9329 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 9330 << ") in " << DebugLocStr << '\n'); 9331 ORE->emit([&]() { 9332 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 9333 L->getStartLoc(), L->getHeader()) 9334 << IntDiagMsg.second; 9335 }); 9336 } else if (VectorizeLoop && InterleaveLoop) { 9337 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 9338 << ") in " << DebugLocStr << '\n'); 9339 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 9340 } 9341 9342 LVP.setBestPlan(VF.Width, IC); 9343 9344 using namespace ore; 9345 bool DisableRuntimeUnroll = false; 9346 MDNode *OrigLoopID = L->getLoopID(); 9347 9348 if (!VectorizeLoop) { 9349 assert(IC > 1 && "interleave count should not be 1 or 0"); 9350 // If we decided that it is not legal to vectorize the loop, then 9351 // interleave it. 9352 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, &CM, 9353 BFI, PSI); 9354 LVP.executePlan(Unroller, DT); 9355 9356 ORE->emit([&]() { 9357 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 9358 L->getHeader()) 9359 << "interleaved loop (interleaved count: " 9360 << NV("InterleaveCount", IC) << ")"; 9361 }); 9362 } else { 9363 // If we decided that it is *legal* to vectorize the loop, then do it. 9364 9365 // Consider vectorizing the epilogue too if it's profitable. 9366 VectorizationFactor EpilogueVF = 9367 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 9368 if (EpilogueVF.Width.isVector()) { 9369 9370 // The first pass vectorizes the main loop and creates a scalar epilogue 9371 // to be vectorized by executing the plan (potentially with a different 9372 // factor) again shortly afterwards. 9373 EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC, 9374 EpilogueVF.Width.getKnownMinValue(), 1); 9375 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, EPI, 9376 &LVL, &CM, BFI, PSI); 9377 9378 LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF); 9379 LVP.executePlan(MainILV, DT); 9380 ++LoopsVectorized; 9381 9382 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 9383 formLCSSARecursively(*L, *DT, LI, SE); 9384 9385 // Second pass vectorizes the epilogue and adjusts the control flow 9386 // edges from the first pass. 9387 LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF); 9388 EPI.MainLoopVF = EPI.EpilogueVF; 9389 EPI.MainLoopUF = EPI.EpilogueUF; 9390 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 9391 ORE, EPI, &LVL, &CM, BFI, PSI); 9392 LVP.executePlan(EpilogILV, DT); 9393 ++LoopsEpilogueVectorized; 9394 9395 if (!MainILV.areSafetyChecksAdded()) 9396 DisableRuntimeUnroll = true; 9397 } else { 9398 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 9399 &LVL, &CM, BFI, PSI); 9400 LVP.executePlan(LB, DT); 9401 ++LoopsVectorized; 9402 9403 // Add metadata to disable runtime unrolling a scalar loop when there are 9404 // no runtime checks about strides and memory. A scalar loop that is 9405 // rarely used is not worth unrolling. 9406 if (!LB.areSafetyChecksAdded()) 9407 DisableRuntimeUnroll = true; 9408 } 9409 9410 // Report the vectorization decision. 9411 ORE->emit([&]() { 9412 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 9413 L->getHeader()) 9414 << "vectorized loop (vectorization width: " 9415 << NV("VectorizationFactor", VF.Width) 9416 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 9417 }); 9418 } 9419 9420 Optional<MDNode *> RemainderLoopID = 9421 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 9422 LLVMLoopVectorizeFollowupEpilogue}); 9423 if (RemainderLoopID.hasValue()) { 9424 L->setLoopID(RemainderLoopID.getValue()); 9425 } else { 9426 if (DisableRuntimeUnroll) 9427 AddRuntimeUnrollDisableMetaData(L); 9428 9429 // Mark the loop as already vectorized to avoid vectorizing again. 9430 Hints.setAlreadyVectorized(); 9431 } 9432 9433 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9434 return true; 9435 } 9436 9437 LoopVectorizeResult LoopVectorizePass::runImpl( 9438 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 9439 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 9440 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 9441 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 9442 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 9443 SE = &SE_; 9444 LI = &LI_; 9445 TTI = &TTI_; 9446 DT = &DT_; 9447 BFI = &BFI_; 9448 TLI = TLI_; 9449 AA = &AA_; 9450 AC = &AC_; 9451 GetLAA = &GetLAA_; 9452 DB = &DB_; 9453 ORE = &ORE_; 9454 PSI = PSI_; 9455 9456 // Don't attempt if 9457 // 1. the target claims to have no vector registers, and 9458 // 2. interleaving won't help ILP. 9459 // 9460 // The second condition is necessary because, even if the target has no 9461 // vector registers, loop vectorization may still enable scalar 9462 // interleaving. 9463 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 9464 TTI->getMaxInterleaveFactor(1) < 2) 9465 return LoopVectorizeResult(false, false); 9466 9467 bool Changed = false, CFGChanged = false; 9468 9469 // The vectorizer requires loops to be in simplified form. 9470 // Since simplification may add new inner loops, it has to run before the 9471 // legality and profitability checks. This means running the loop vectorizer 9472 // will simplify all loops, regardless of whether anything end up being 9473 // vectorized. 9474 for (auto &L : *LI) 9475 Changed |= CFGChanged |= 9476 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 9477 9478 // Build up a worklist of inner-loops to vectorize. This is necessary as 9479 // the act of vectorizing or partially unrolling a loop creates new loops 9480 // and can invalidate iterators across the loops. 9481 SmallVector<Loop *, 8> Worklist; 9482 9483 for (Loop *L : *LI) 9484 collectSupportedLoops(*L, LI, ORE, Worklist); 9485 9486 LoopsAnalyzed += Worklist.size(); 9487 9488 // Now walk the identified inner loops. 9489 while (!Worklist.empty()) { 9490 Loop *L = Worklist.pop_back_val(); 9491 9492 // For the inner loops we actually process, form LCSSA to simplify the 9493 // transform. 9494 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 9495 9496 Changed |= CFGChanged |= processLoop(L); 9497 } 9498 9499 // Process each loop nest in the function. 9500 return LoopVectorizeResult(Changed, CFGChanged); 9501 } 9502 9503 PreservedAnalyses LoopVectorizePass::run(Function &F, 9504 FunctionAnalysisManager &AM) { 9505 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 9506 auto &LI = AM.getResult<LoopAnalysis>(F); 9507 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 9508 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 9509 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 9510 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 9511 auto &AA = AM.getResult<AAManager>(F); 9512 auto &AC = AM.getResult<AssumptionAnalysis>(F); 9513 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 9514 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 9515 MemorySSA *MSSA = EnableMSSALoopDependency 9516 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 9517 : nullptr; 9518 9519 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 9520 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 9521 [&](Loop &L) -> const LoopAccessInfo & { 9522 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 9523 TLI, TTI, nullptr, MSSA}; 9524 return LAM.getResult<LoopAccessAnalysis>(L, AR); 9525 }; 9526 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 9527 ProfileSummaryInfo *PSI = 9528 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 9529 LoopVectorizeResult Result = 9530 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 9531 if (!Result.MadeAnyChange) 9532 return PreservedAnalyses::all(); 9533 PreservedAnalyses PA; 9534 9535 // We currently do not preserve loopinfo/dominator analyses with outer loop 9536 // vectorization. Until this is addressed, mark these analyses as preserved 9537 // only for non-VPlan-native path. 9538 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 9539 if (!EnableVPlanNativePath) { 9540 PA.preserve<LoopAnalysis>(); 9541 PA.preserve<DominatorTreeAnalysis>(); 9542 } 9543 PA.preserve<BasicAA>(); 9544 PA.preserve<GlobalsAA>(); 9545 if (!Result.MadeCFGChange) 9546 PA.preserveSet<CFGAnalyses>(); 9547 return PA; 9548 } 9549