1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/MemorySSA.h" 91 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 92 #include "llvm/Analysis/ProfileSummaryInfo.h" 93 #include "llvm/Analysis/ScalarEvolution.h" 94 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 95 #include "llvm/Analysis/TargetLibraryInfo.h" 96 #include "llvm/Analysis/TargetTransformInfo.h" 97 #include "llvm/Analysis/VectorUtils.h" 98 #include "llvm/IR/Attributes.h" 99 #include "llvm/IR/BasicBlock.h" 100 #include "llvm/IR/CFG.h" 101 #include "llvm/IR/Constant.h" 102 #include "llvm/IR/Constants.h" 103 #include "llvm/IR/DataLayout.h" 104 #include "llvm/IR/DebugInfoMetadata.h" 105 #include "llvm/IR/DebugLoc.h" 106 #include "llvm/IR/DerivedTypes.h" 107 #include "llvm/IR/DiagnosticInfo.h" 108 #include "llvm/IR/Dominators.h" 109 #include "llvm/IR/Function.h" 110 #include "llvm/IR/IRBuilder.h" 111 #include "llvm/IR/InstrTypes.h" 112 #include "llvm/IR/Instruction.h" 113 #include "llvm/IR/Instructions.h" 114 #include "llvm/IR/IntrinsicInst.h" 115 #include "llvm/IR/Intrinsics.h" 116 #include "llvm/IR/LLVMContext.h" 117 #include "llvm/IR/Metadata.h" 118 #include "llvm/IR/Module.h" 119 #include "llvm/IR/Operator.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 202 // that predication is preferred, and this lists all options. I.e., the 203 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 204 // and predicate the instructions accordingly. If tail-folding fails, there are 205 // different fallback strategies depending on these values: 206 namespace PreferPredicateTy { 207 enum Option { 208 ScalarEpilogue = 0, 209 PredicateElseScalarEpilogue, 210 PredicateOrDontVectorize 211 }; 212 } // namespace PreferPredicateTy 213 214 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 215 "prefer-predicate-over-epilogue", 216 cl::init(PreferPredicateTy::ScalarEpilogue), 217 cl::Hidden, 218 cl::desc("Tail-folding and predication preferences over creating a scalar " 219 "epilogue loop."), 220 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 221 "scalar-epilogue", 222 "Don't tail-predicate loops, create scalar epilogue"), 223 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 224 "predicate-else-scalar-epilogue", 225 "prefer tail-folding, create scalar epilogue if tail " 226 "folding fails."), 227 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 228 "predicate-dont-vectorize", 229 "prefers tail-folding, don't attempt vectorization if " 230 "tail-folding fails."))); 231 232 static cl::opt<bool> MaximizeBandwidth( 233 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 234 cl::desc("Maximize bandwidth when selecting vectorization factor which " 235 "will be determined by the smallest type in loop.")); 236 237 static cl::opt<bool> EnableInterleavedMemAccesses( 238 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 239 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 240 241 /// An interleave-group may need masking if it resides in a block that needs 242 /// predication, or in order to mask away gaps. 243 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 244 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 245 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 246 247 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 248 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 249 cl::desc("We don't interleave loops with a estimated constant trip count " 250 "below this number")); 251 252 static cl::opt<unsigned> ForceTargetNumScalarRegs( 253 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 254 cl::desc("A flag that overrides the target's number of scalar registers.")); 255 256 static cl::opt<unsigned> ForceTargetNumVectorRegs( 257 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 258 cl::desc("A flag that overrides the target's number of vector registers.")); 259 260 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 261 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 262 cl::desc("A flag that overrides the target's max interleave factor for " 263 "scalar loops.")); 264 265 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 266 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "vectorized loops.")); 269 270 static cl::opt<unsigned> ForceTargetInstructionCost( 271 "force-target-instruction-cost", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's expected cost for " 273 "an instruction to a single constant value. Mostly " 274 "useful for getting consistent testing.")); 275 276 static cl::opt<bool> ForceTargetSupportsScalableVectors( 277 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 278 cl::desc( 279 "Pretend that scalable vectors are supported, even if the target does " 280 "not support them. This flag should only be used for testing.")); 281 282 static cl::opt<unsigned> SmallLoopCost( 283 "small-loop-cost", cl::init(20), cl::Hidden, 284 cl::desc( 285 "The cost of a loop that is considered 'small' by the interleaver.")); 286 287 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 288 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 289 cl::desc("Enable the use of the block frequency analysis to access PGO " 290 "heuristics minimizing code growth in cold regions and being more " 291 "aggressive in hot regions.")); 292 293 // Runtime interleave loops for load/store throughput. 294 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 295 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 296 cl::desc( 297 "Enable runtime interleaving until load/store ports are saturated")); 298 299 /// Interleave small loops with scalar reductions. 300 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 301 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 302 cl::desc("Enable interleaving for loops with small iteration counts that " 303 "contain scalar reductions to expose ILP.")); 304 305 /// The number of stores in a loop that are allowed to need predication. 306 static cl::opt<unsigned> NumberOfStoresToPredicate( 307 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 308 cl::desc("Max number of stores to be predicated behind an if.")); 309 310 static cl::opt<bool> EnableIndVarRegisterHeur( 311 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 312 cl::desc("Count the induction variable only once when interleaving")); 313 314 static cl::opt<bool> EnableCondStoresVectorization( 315 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 316 cl::desc("Enable if predication of stores during vectorization.")); 317 318 static cl::opt<unsigned> MaxNestedScalarReductionIC( 319 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 320 cl::desc("The maximum interleave count to use when interleaving a scalar " 321 "reduction in a nested loop.")); 322 323 static cl::opt<bool> 324 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 325 cl::Hidden, 326 cl::desc("Prefer in-loop vector reductions, " 327 "overriding the targets preference.")); 328 329 static cl::opt<bool> PreferPredicatedReductionSelect( 330 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 331 cl::desc( 332 "Prefer predicating a reduction operation over an after loop select.")); 333 334 cl::opt<bool> EnableVPlanNativePath( 335 "enable-vplan-native-path", cl::init(false), cl::Hidden, 336 cl::desc("Enable VPlan-native vectorization path with " 337 "support for outer loop vectorization.")); 338 339 // FIXME: Remove this switch once we have divergence analysis. Currently we 340 // assume divergent non-backedge branches when this switch is true. 341 cl::opt<bool> EnableVPlanPredication( 342 "enable-vplan-predication", cl::init(false), cl::Hidden, 343 cl::desc("Enable VPlan-native vectorization path predicator with " 344 "support for outer loop vectorization.")); 345 346 // This flag enables the stress testing of the VPlan H-CFG construction in the 347 // VPlan-native vectorization path. It must be used in conjuction with 348 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 349 // verification of the H-CFGs built. 350 static cl::opt<bool> VPlanBuildStressTest( 351 "vplan-build-stress-test", cl::init(false), cl::Hidden, 352 cl::desc( 353 "Build VPlan for every supported loop nest in the function and bail " 354 "out right after the build (stress test the VPlan H-CFG construction " 355 "in the VPlan-native vectorization path).")); 356 357 cl::opt<bool> llvm::EnableLoopInterleaving( 358 "interleave-loops", cl::init(true), cl::Hidden, 359 cl::desc("Enable loop interleaving in Loop vectorization passes")); 360 cl::opt<bool> llvm::EnableLoopVectorization( 361 "vectorize-loops", cl::init(true), cl::Hidden, 362 cl::desc("Run the Loop vectorization passes")); 363 364 /// A helper function that returns the type of loaded or stored value. 365 static Type *getMemInstValueType(Value *I) { 366 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 367 "Expected Load or Store instruction"); 368 if (auto *LI = dyn_cast<LoadInst>(I)) 369 return LI->getType(); 370 return cast<StoreInst>(I)->getValueOperand()->getType(); 371 } 372 373 /// A helper function that returns true if the given type is irregular. The 374 /// type is irregular if its allocated size doesn't equal the store size of an 375 /// element of the corresponding vector type at the given vectorization factor. 376 static bool hasIrregularType(Type *Ty, const DataLayout &DL, ElementCount VF) { 377 // Determine if an array of VF elements of type Ty is "bitcast compatible" 378 // with a <VF x Ty> vector. 379 if (VF.isVector()) { 380 auto *VectorTy = VectorType::get(Ty, VF); 381 return TypeSize::get(VF.getKnownMinValue() * 382 DL.getTypeAllocSize(Ty).getFixedValue(), 383 VF.isScalable()) != DL.getTypeStoreSize(VectorTy); 384 } 385 386 // If the vectorization factor is one, we just check if an array of type Ty 387 // requires padding between elements. 388 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 389 } 390 391 /// A helper function that returns the reciprocal of the block probability of 392 /// predicated blocks. If we return X, we are assuming the predicated block 393 /// will execute once for every X iterations of the loop header. 394 /// 395 /// TODO: We should use actual block probability here, if available. Currently, 396 /// we always assume predicated blocks have a 50% chance of executing. 397 static unsigned getReciprocalPredBlockProb() { return 2; } 398 399 /// A helper function that adds a 'fast' flag to floating-point operations. 400 static Value *addFastMathFlag(Value *V) { 401 if (isa<FPMathOperator>(V)) 402 cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast()); 403 return V; 404 } 405 406 /// A helper function that returns an integer or floating-point constant with 407 /// value C. 408 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 409 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 410 : ConstantFP::get(Ty, C); 411 } 412 413 /// Returns "best known" trip count for the specified loop \p L as defined by 414 /// the following procedure: 415 /// 1) Returns exact trip count if it is known. 416 /// 2) Returns expected trip count according to profile data if any. 417 /// 3) Returns upper bound estimate if it is known. 418 /// 4) Returns None if all of the above failed. 419 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 420 // Check if exact trip count is known. 421 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 422 return ExpectedTC; 423 424 // Check if there is an expected trip count available from profile data. 425 if (LoopVectorizeWithBlockFrequency) 426 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 427 return EstimatedTC; 428 429 // Check if upper bound estimate is known. 430 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 431 return ExpectedTC; 432 433 return None; 434 } 435 436 namespace llvm { 437 438 /// InnerLoopVectorizer vectorizes loops which contain only one basic 439 /// block to a specified vectorization factor (VF). 440 /// This class performs the widening of scalars into vectors, or multiple 441 /// scalars. This class also implements the following features: 442 /// * It inserts an epilogue loop for handling loops that don't have iteration 443 /// counts that are known to be a multiple of the vectorization factor. 444 /// * It handles the code generation for reduction variables. 445 /// * Scalarization (implementation using scalars) of un-vectorizable 446 /// instructions. 447 /// InnerLoopVectorizer does not perform any vectorization-legality 448 /// checks, and relies on the caller to check for the different legality 449 /// aspects. The InnerLoopVectorizer relies on the 450 /// LoopVectorizationLegality class to provide information about the induction 451 /// and reduction variables that were found to a given vectorization factor. 452 class InnerLoopVectorizer { 453 public: 454 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 455 LoopInfo *LI, DominatorTree *DT, 456 const TargetLibraryInfo *TLI, 457 const TargetTransformInfo *TTI, AssumptionCache *AC, 458 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 459 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 460 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 461 ProfileSummaryInfo *PSI) 462 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 463 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 464 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 465 PSI(PSI) { 466 // Query this against the original loop and save it here because the profile 467 // of the original loop header may change as the transformation happens. 468 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 469 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 470 } 471 472 virtual ~InnerLoopVectorizer() = default; 473 474 /// Create a new empty loop that will contain vectorized instructions later 475 /// on, while the old loop will be used as the scalar remainder. Control flow 476 /// is generated around the vectorized (and scalar epilogue) loops consisting 477 /// of various checks and bypasses. Return the pre-header block of the new 478 /// loop. 479 /// In the case of epilogue vectorization, this function is overriden to 480 /// handle the more complex control flow around the loops. 481 virtual BasicBlock *createVectorizedLoopSkeleton(); 482 483 /// Widen a single instruction within the innermost loop. 484 void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands, 485 VPTransformState &State); 486 487 /// Widen a single call instruction within the innermost loop. 488 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 489 VPTransformState &State); 490 491 /// Widen a single select instruction within the innermost loop. 492 void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, 493 bool InvariantCond, VPTransformState &State); 494 495 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 496 void fixVectorizedLoop(VPTransformState &State); 497 498 // Return true if any runtime check is added. 499 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 500 501 /// A type for vectorized values in the new loop. Each value from the 502 /// original loop, when vectorized, is represented by UF vector values in the 503 /// new unrolled loop, where UF is the unroll factor. 504 using VectorParts = SmallVector<Value *, 2>; 505 506 /// Vectorize a single GetElementPtrInst based on information gathered and 507 /// decisions taken during planning. 508 void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices, 509 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant, 510 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); 511 512 /// Vectorize a single PHINode in a block. This method handles the induction 513 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 514 /// arbitrary length vectors. 515 void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc, 516 VPValue *StartV, VPValue *Def, 517 VPTransformState &State); 518 519 /// A helper function to scalarize a single Instruction in the innermost loop. 520 /// Generates a sequence of scalar instances for each lane between \p MinLane 521 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 522 /// inclusive. Uses the VPValue operands from \p Operands instead of \p 523 /// Instr's operands. 524 void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands, 525 const VPIteration &Instance, bool IfPredicateInstr, 526 VPTransformState &State); 527 528 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 529 /// is provided, the integer induction variable will first be truncated to 530 /// the corresponding type. 531 void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, 532 VPValue *Def, VPValue *CastDef, 533 VPTransformState &State); 534 535 /// Construct the vector value of a scalarized value \p V one lane at a time. 536 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 537 VPTransformState &State); 538 539 /// Try to vectorize interleaved access group \p Group with the base address 540 /// given in \p Addr, optionally masking the vector operations if \p 541 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 542 /// values in the vectorized loop. 543 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 544 ArrayRef<VPValue *> VPDefs, 545 VPTransformState &State, VPValue *Addr, 546 ArrayRef<VPValue *> StoredValues, 547 VPValue *BlockInMask = nullptr); 548 549 /// Vectorize Load and Store instructions with the base address given in \p 550 /// Addr, optionally masking the vector operations if \p BlockInMask is 551 /// non-null. Use \p State to translate given VPValues to IR values in the 552 /// vectorized loop. 553 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 554 VPValue *Def, VPValue *Addr, 555 VPValue *StoredValue, VPValue *BlockInMask); 556 557 /// Set the debug location in the builder using the debug location in 558 /// the instruction. 559 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 560 561 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 562 void fixNonInductionPHIs(VPTransformState &State); 563 564 /// Create a broadcast instruction. This method generates a broadcast 565 /// instruction (shuffle) for loop invariant values and for the induction 566 /// value. If this is the induction variable then we extend it to N, N+1, ... 567 /// this is needed because each iteration in the loop corresponds to a SIMD 568 /// element. 569 virtual Value *getBroadcastInstrs(Value *V); 570 571 protected: 572 friend class LoopVectorizationPlanner; 573 574 /// A small list of PHINodes. 575 using PhiVector = SmallVector<PHINode *, 4>; 576 577 /// A type for scalarized values in the new loop. Each value from the 578 /// original loop, when scalarized, is represented by UF x VF scalar values 579 /// in the new unrolled loop, where UF is the unroll factor and VF is the 580 /// vectorization factor. 581 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 582 583 /// Set up the values of the IVs correctly when exiting the vector loop. 584 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 585 Value *CountRoundDown, Value *EndValue, 586 BasicBlock *MiddleBlock); 587 588 /// Create a new induction variable inside L. 589 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 590 Value *Step, Instruction *DL); 591 592 /// Handle all cross-iteration phis in the header. 593 void fixCrossIterationPHIs(VPTransformState &State); 594 595 /// Fix a first-order recurrence. This is the second phase of vectorizing 596 /// this phi node. 597 void fixFirstOrderRecurrence(PHINode *Phi, VPTransformState &State); 598 599 /// Fix a reduction cross-iteration phi. This is the second phase of 600 /// vectorizing this phi node. 601 void fixReduction(PHINode *Phi, VPTransformState &State); 602 603 /// Clear NSW/NUW flags from reduction instructions if necessary. 604 void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc, 605 VPTransformState &State); 606 607 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 608 /// means we need to add the appropriate incoming value from the middle 609 /// block as exiting edges from the scalar epilogue loop (if present) are 610 /// already in place, and we exit the vector loop exclusively to the middle 611 /// block. 612 void fixLCSSAPHIs(VPTransformState &State); 613 614 /// Iteratively sink the scalarized operands of a predicated instruction into 615 /// the block that was created for it. 616 void sinkScalarOperands(Instruction *PredInst); 617 618 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 619 /// represented as. 620 void truncateToMinimalBitwidths(VPTransformState &State); 621 622 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 623 /// to each vector element of Val. The sequence starts at StartIndex. 624 /// \p Opcode is relevant for FP induction variable. 625 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 626 Instruction::BinaryOps Opcode = 627 Instruction::BinaryOpsEnd); 628 629 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 630 /// variable on which to base the steps, \p Step is the size of the step, and 631 /// \p EntryVal is the value from the original loop that maps to the steps. 632 /// Note that \p EntryVal doesn't have to be an induction variable - it 633 /// can also be a truncate instruction. 634 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 635 const InductionDescriptor &ID, VPValue *Def, 636 VPValue *CastDef, VPTransformState &State); 637 638 /// Create a vector induction phi node based on an existing scalar one. \p 639 /// EntryVal is the value from the original loop that maps to the vector phi 640 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 641 /// truncate instruction, instead of widening the original IV, we widen a 642 /// version of the IV truncated to \p EntryVal's type. 643 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 644 Value *Step, Value *Start, 645 Instruction *EntryVal, VPValue *Def, 646 VPValue *CastDef, 647 VPTransformState &State); 648 649 /// Returns true if an instruction \p I should be scalarized instead of 650 /// vectorized for the chosen vectorization factor. 651 bool shouldScalarizeInstruction(Instruction *I) const; 652 653 /// Returns true if we should generate a scalar version of \p IV. 654 bool needsScalarInduction(Instruction *IV) const; 655 656 /// If there is a cast involved in the induction variable \p ID, which should 657 /// be ignored in the vectorized loop body, this function records the 658 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 659 /// cast. We had already proved that the casted Phi is equal to the uncasted 660 /// Phi in the vectorized loop (under a runtime guard), and therefore 661 /// there is no need to vectorize the cast - the same value can be used in the 662 /// vector loop for both the Phi and the cast. 663 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 664 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 665 /// 666 /// \p EntryVal is the value from the original loop that maps to the vector 667 /// phi node and is used to distinguish what is the IV currently being 668 /// processed - original one (if \p EntryVal is a phi corresponding to the 669 /// original IV) or the "newly-created" one based on the proof mentioned above 670 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 671 /// latter case \p EntryVal is a TruncInst and we must not record anything for 672 /// that IV, but it's error-prone to expect callers of this routine to care 673 /// about that, hence this explicit parameter. 674 void recordVectorLoopValueForInductionCast( 675 const InductionDescriptor &ID, const Instruction *EntryVal, 676 Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, 677 unsigned Part, unsigned Lane = UINT_MAX); 678 679 /// Generate a shuffle sequence that will reverse the vector Vec. 680 virtual Value *reverseVector(Value *Vec); 681 682 /// Returns (and creates if needed) the original loop trip count. 683 Value *getOrCreateTripCount(Loop *NewLoop); 684 685 /// Returns (and creates if needed) the trip count of the widened loop. 686 Value *getOrCreateVectorTripCount(Loop *NewLoop); 687 688 /// Returns a bitcasted value to the requested vector type. 689 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 690 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 691 const DataLayout &DL); 692 693 /// Emit a bypass check to see if the vector trip count is zero, including if 694 /// it overflows. 695 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 696 697 /// Emit a bypass check to see if all of the SCEV assumptions we've 698 /// had to make are correct. 699 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 700 701 /// Emit bypass checks to check any memory assumptions we may have made. 702 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 703 704 /// Compute the transformed value of Index at offset StartValue using step 705 /// StepValue. 706 /// For integer induction, returns StartValue + Index * StepValue. 707 /// For pointer induction, returns StartValue[Index * StepValue]. 708 /// FIXME: The newly created binary instructions should contain nsw/nuw 709 /// flags, which can be found from the original scalar operations. 710 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 711 const DataLayout &DL, 712 const InductionDescriptor &ID) const; 713 714 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 715 /// vector loop preheader, middle block and scalar preheader. Also 716 /// allocate a loop object for the new vector loop and return it. 717 Loop *createVectorLoopSkeleton(StringRef Prefix); 718 719 /// Create new phi nodes for the induction variables to resume iteration count 720 /// in the scalar epilogue, from where the vectorized loop left off (given by 721 /// \p VectorTripCount). 722 /// In cases where the loop skeleton is more complicated (eg. epilogue 723 /// vectorization) and the resume values can come from an additional bypass 724 /// block, the \p AdditionalBypass pair provides information about the bypass 725 /// block and the end value on the edge from bypass to this loop. 726 void createInductionResumeValues( 727 Loop *L, Value *VectorTripCount, 728 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 729 730 /// Complete the loop skeleton by adding debug MDs, creating appropriate 731 /// conditional branches in the middle block, preparing the builder and 732 /// running the verifier. Take in the vector loop \p L as argument, and return 733 /// the preheader of the completed vector loop. 734 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 735 736 /// Add additional metadata to \p To that was not present on \p Orig. 737 /// 738 /// Currently this is used to add the noalias annotations based on the 739 /// inserted memchecks. Use this for instructions that are *cloned* into the 740 /// vector loop. 741 void addNewMetadata(Instruction *To, const Instruction *Orig); 742 743 /// Add metadata from one instruction to another. 744 /// 745 /// This includes both the original MDs from \p From and additional ones (\see 746 /// addNewMetadata). Use this for *newly created* instructions in the vector 747 /// loop. 748 void addMetadata(Instruction *To, Instruction *From); 749 750 /// Similar to the previous function but it adds the metadata to a 751 /// vector of instructions. 752 void addMetadata(ArrayRef<Value *> To, Instruction *From); 753 754 /// Allow subclasses to override and print debug traces before/after vplan 755 /// execution, when trace information is requested. 756 virtual void printDebugTracesAtStart(){}; 757 virtual void printDebugTracesAtEnd(){}; 758 759 /// The original loop. 760 Loop *OrigLoop; 761 762 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 763 /// dynamic knowledge to simplify SCEV expressions and converts them to a 764 /// more usable form. 765 PredicatedScalarEvolution &PSE; 766 767 /// Loop Info. 768 LoopInfo *LI; 769 770 /// Dominator Tree. 771 DominatorTree *DT; 772 773 /// Alias Analysis. 774 AAResults *AA; 775 776 /// Target Library Info. 777 const TargetLibraryInfo *TLI; 778 779 /// Target Transform Info. 780 const TargetTransformInfo *TTI; 781 782 /// Assumption Cache. 783 AssumptionCache *AC; 784 785 /// Interface to emit optimization remarks. 786 OptimizationRemarkEmitter *ORE; 787 788 /// LoopVersioning. It's only set up (non-null) if memchecks were 789 /// used. 790 /// 791 /// This is currently only used to add no-alias metadata based on the 792 /// memchecks. The actually versioning is performed manually. 793 std::unique_ptr<LoopVersioning> LVer; 794 795 /// The vectorization SIMD factor to use. Each vector will have this many 796 /// vector elements. 797 ElementCount VF; 798 799 /// The vectorization unroll factor to use. Each scalar is vectorized to this 800 /// many different vector instructions. 801 unsigned UF; 802 803 /// The builder that we use 804 IRBuilder<> Builder; 805 806 // --- Vectorization state --- 807 808 /// The vector-loop preheader. 809 BasicBlock *LoopVectorPreHeader; 810 811 /// The scalar-loop preheader. 812 BasicBlock *LoopScalarPreHeader; 813 814 /// Middle Block between the vector and the scalar. 815 BasicBlock *LoopMiddleBlock; 816 817 /// The (unique) ExitBlock of the scalar loop. Note that 818 /// there can be multiple exiting edges reaching this block. 819 BasicBlock *LoopExitBlock; 820 821 /// The vector loop body. 822 BasicBlock *LoopVectorBody; 823 824 /// The scalar loop body. 825 BasicBlock *LoopScalarBody; 826 827 /// A list of all bypass blocks. The first block is the entry of the loop. 828 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 829 830 /// The new Induction variable which was added to the new block. 831 PHINode *Induction = nullptr; 832 833 /// The induction variable of the old basic block. 834 PHINode *OldInduction = nullptr; 835 836 /// Store instructions that were predicated. 837 SmallVector<Instruction *, 4> PredicatedInstructions; 838 839 /// Trip count of the original loop. 840 Value *TripCount = nullptr; 841 842 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 843 Value *VectorTripCount = nullptr; 844 845 /// The legality analysis. 846 LoopVectorizationLegality *Legal; 847 848 /// The profitablity analysis. 849 LoopVectorizationCostModel *Cost; 850 851 // Record whether runtime checks are added. 852 bool AddedSafetyChecks = false; 853 854 // Holds the end values for each induction variable. We save the end values 855 // so we can later fix-up the external users of the induction variables. 856 DenseMap<PHINode *, Value *> IVEndValues; 857 858 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 859 // fixed up at the end of vector code generation. 860 SmallVector<PHINode *, 8> OrigPHIsToFix; 861 862 /// BFI and PSI are used to check for profile guided size optimizations. 863 BlockFrequencyInfo *BFI; 864 ProfileSummaryInfo *PSI; 865 866 // Whether this loop should be optimized for size based on profile guided size 867 // optimizatios. 868 bool OptForSizeBasedOnProfile; 869 }; 870 871 class InnerLoopUnroller : public InnerLoopVectorizer { 872 public: 873 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 874 LoopInfo *LI, DominatorTree *DT, 875 const TargetLibraryInfo *TLI, 876 const TargetTransformInfo *TTI, AssumptionCache *AC, 877 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 878 LoopVectorizationLegality *LVL, 879 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 880 ProfileSummaryInfo *PSI) 881 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 882 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 883 BFI, PSI) {} 884 885 private: 886 Value *getBroadcastInstrs(Value *V) override; 887 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 888 Instruction::BinaryOps Opcode = 889 Instruction::BinaryOpsEnd) override; 890 Value *reverseVector(Value *Vec) override; 891 }; 892 893 /// Encapsulate information regarding vectorization of a loop and its epilogue. 894 /// This information is meant to be updated and used across two stages of 895 /// epilogue vectorization. 896 struct EpilogueLoopVectorizationInfo { 897 ElementCount MainLoopVF = ElementCount::getFixed(0); 898 unsigned MainLoopUF = 0; 899 ElementCount EpilogueVF = ElementCount::getFixed(0); 900 unsigned EpilogueUF = 0; 901 BasicBlock *MainLoopIterationCountCheck = nullptr; 902 BasicBlock *EpilogueIterationCountCheck = nullptr; 903 BasicBlock *SCEVSafetyCheck = nullptr; 904 BasicBlock *MemSafetyCheck = nullptr; 905 Value *TripCount = nullptr; 906 Value *VectorTripCount = nullptr; 907 908 EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF, 909 unsigned EUF) 910 : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF), 911 EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) { 912 assert(EUF == 1 && 913 "A high UF for the epilogue loop is likely not beneficial."); 914 } 915 }; 916 917 /// An extension of the inner loop vectorizer that creates a skeleton for a 918 /// vectorized loop that has its epilogue (residual) also vectorized. 919 /// The idea is to run the vplan on a given loop twice, firstly to setup the 920 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 921 /// from the first step and vectorize the epilogue. This is achieved by 922 /// deriving two concrete strategy classes from this base class and invoking 923 /// them in succession from the loop vectorizer planner. 924 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 925 public: 926 InnerLoopAndEpilogueVectorizer( 927 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 928 DominatorTree *DT, const TargetLibraryInfo *TLI, 929 const TargetTransformInfo *TTI, AssumptionCache *AC, 930 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 931 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 932 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 933 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 934 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI), 935 EPI(EPI) {} 936 937 // Override this function to handle the more complex control flow around the 938 // three loops. 939 BasicBlock *createVectorizedLoopSkeleton() final override { 940 return createEpilogueVectorizedLoopSkeleton(); 941 } 942 943 /// The interface for creating a vectorized skeleton using one of two 944 /// different strategies, each corresponding to one execution of the vplan 945 /// as described above. 946 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 947 948 /// Holds and updates state information required to vectorize the main loop 949 /// and its epilogue in two separate passes. This setup helps us avoid 950 /// regenerating and recomputing runtime safety checks. It also helps us to 951 /// shorten the iteration-count-check path length for the cases where the 952 /// iteration count of the loop is so small that the main vector loop is 953 /// completely skipped. 954 EpilogueLoopVectorizationInfo &EPI; 955 }; 956 957 /// A specialized derived class of inner loop vectorizer that performs 958 /// vectorization of *main* loops in the process of vectorizing loops and their 959 /// epilogues. 960 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 961 public: 962 EpilogueVectorizerMainLoop( 963 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 964 DominatorTree *DT, const TargetLibraryInfo *TLI, 965 const TargetTransformInfo *TTI, AssumptionCache *AC, 966 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 967 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 968 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 969 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 970 EPI, LVL, CM, BFI, PSI) {} 971 /// Implements the interface for creating a vectorized skeleton using the 972 /// *main loop* strategy (ie the first pass of vplan execution). 973 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 974 975 protected: 976 /// Emits an iteration count bypass check once for the main loop (when \p 977 /// ForEpilogue is false) and once for the epilogue loop (when \p 978 /// ForEpilogue is true). 979 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 980 bool ForEpilogue); 981 void printDebugTracesAtStart() override; 982 void printDebugTracesAtEnd() override; 983 }; 984 985 // A specialized derived class of inner loop vectorizer that performs 986 // vectorization of *epilogue* loops in the process of vectorizing loops and 987 // their epilogues. 988 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 989 public: 990 EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 991 LoopInfo *LI, DominatorTree *DT, 992 const TargetLibraryInfo *TLI, 993 const TargetTransformInfo *TTI, AssumptionCache *AC, 994 OptimizationRemarkEmitter *ORE, 995 EpilogueLoopVectorizationInfo &EPI, 996 LoopVectorizationLegality *LVL, 997 llvm::LoopVectorizationCostModel *CM, 998 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 999 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1000 EPI, LVL, CM, BFI, PSI) {} 1001 /// Implements the interface for creating a vectorized skeleton using the 1002 /// *epilogue loop* strategy (ie the second pass of vplan execution). 1003 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1004 1005 protected: 1006 /// Emits an iteration count bypass check after the main vector loop has 1007 /// finished to see if there are any iterations left to execute by either 1008 /// the vector epilogue or the scalar epilogue. 1009 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1010 BasicBlock *Bypass, 1011 BasicBlock *Insert); 1012 void printDebugTracesAtStart() override; 1013 void printDebugTracesAtEnd() override; 1014 }; 1015 } // end namespace llvm 1016 1017 /// Look for a meaningful debug location on the instruction or it's 1018 /// operands. 1019 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1020 if (!I) 1021 return I; 1022 1023 DebugLoc Empty; 1024 if (I->getDebugLoc() != Empty) 1025 return I; 1026 1027 for (Use &Op : I->operands()) { 1028 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1029 if (OpInst->getDebugLoc() != Empty) 1030 return OpInst; 1031 } 1032 1033 return I; 1034 } 1035 1036 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 1037 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 1038 const DILocation *DIL = Inst->getDebugLoc(); 1039 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1040 !isa<DbgInfoIntrinsic>(Inst)) { 1041 assert(!VF.isScalable() && "scalable vectors not yet supported."); 1042 auto NewDIL = 1043 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1044 if (NewDIL) 1045 B.SetCurrentDebugLocation(NewDIL.getValue()); 1046 else 1047 LLVM_DEBUG(dbgs() 1048 << "Failed to create new discriminator: " 1049 << DIL->getFilename() << " Line: " << DIL->getLine()); 1050 } 1051 else 1052 B.SetCurrentDebugLocation(DIL); 1053 } else 1054 B.SetCurrentDebugLocation(DebugLoc()); 1055 } 1056 1057 /// Write a record \p DebugMsg about vectorization failure to the debug 1058 /// output stream. If \p I is passed, it is an instruction that prevents 1059 /// vectorization. 1060 #ifndef NDEBUG 1061 static void debugVectorizationFailure(const StringRef DebugMsg, 1062 Instruction *I) { 1063 dbgs() << "LV: Not vectorizing: " << DebugMsg; 1064 if (I != nullptr) 1065 dbgs() << " " << *I; 1066 else 1067 dbgs() << '.'; 1068 dbgs() << '\n'; 1069 } 1070 #endif 1071 1072 /// Create an analysis remark that explains why vectorization failed 1073 /// 1074 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1075 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1076 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1077 /// the location of the remark. \return the remark object that can be 1078 /// streamed to. 1079 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1080 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1081 Value *CodeRegion = TheLoop->getHeader(); 1082 DebugLoc DL = TheLoop->getStartLoc(); 1083 1084 if (I) { 1085 CodeRegion = I->getParent(); 1086 // If there is no debug location attached to the instruction, revert back to 1087 // using the loop's. 1088 if (I->getDebugLoc()) 1089 DL = I->getDebugLoc(); 1090 } 1091 1092 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 1093 R << "loop not vectorized: "; 1094 return R; 1095 } 1096 1097 /// Return a value for Step multiplied by VF. 1098 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) { 1099 assert(isa<ConstantInt>(Step) && "Expected an integer step"); 1100 Constant *StepVal = ConstantInt::get( 1101 Step->getType(), 1102 cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue()); 1103 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1104 } 1105 1106 namespace llvm { 1107 1108 void reportVectorizationFailure(const StringRef DebugMsg, 1109 const StringRef OREMsg, const StringRef ORETag, 1110 OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) { 1111 LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I)); 1112 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1113 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), 1114 ORETag, TheLoop, I) << OREMsg); 1115 } 1116 1117 } // end namespace llvm 1118 1119 #ifndef NDEBUG 1120 /// \return string containing a file name and a line # for the given loop. 1121 static std::string getDebugLocString(const Loop *L) { 1122 std::string Result; 1123 if (L) { 1124 raw_string_ostream OS(Result); 1125 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1126 LoopDbgLoc.print(OS); 1127 else 1128 // Just print the module name. 1129 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1130 OS.flush(); 1131 } 1132 return Result; 1133 } 1134 #endif 1135 1136 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1137 const Instruction *Orig) { 1138 // If the loop was versioned with memchecks, add the corresponding no-alias 1139 // metadata. 1140 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1141 LVer->annotateInstWithNoAlias(To, Orig); 1142 } 1143 1144 void InnerLoopVectorizer::addMetadata(Instruction *To, 1145 Instruction *From) { 1146 propagateMetadata(To, From); 1147 addNewMetadata(To, From); 1148 } 1149 1150 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1151 Instruction *From) { 1152 for (Value *V : To) { 1153 if (Instruction *I = dyn_cast<Instruction>(V)) 1154 addMetadata(I, From); 1155 } 1156 } 1157 1158 namespace llvm { 1159 1160 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1161 // lowered. 1162 enum ScalarEpilogueLowering { 1163 1164 // The default: allowing scalar epilogues. 1165 CM_ScalarEpilogueAllowed, 1166 1167 // Vectorization with OptForSize: don't allow epilogues. 1168 CM_ScalarEpilogueNotAllowedOptSize, 1169 1170 // A special case of vectorisation with OptForSize: loops with a very small 1171 // trip count are considered for vectorization under OptForSize, thereby 1172 // making sure the cost of their loop body is dominant, free of runtime 1173 // guards and scalar iteration overheads. 1174 CM_ScalarEpilogueNotAllowedLowTripLoop, 1175 1176 // Loop hint predicate indicating an epilogue is undesired. 1177 CM_ScalarEpilogueNotNeededUsePredicate, 1178 1179 // Directive indicating we must either tail fold or not vectorize 1180 CM_ScalarEpilogueNotAllowedUsePredicate 1181 }; 1182 1183 /// LoopVectorizationCostModel - estimates the expected speedups due to 1184 /// vectorization. 1185 /// In many cases vectorization is not profitable. This can happen because of 1186 /// a number of reasons. In this class we mainly attempt to predict the 1187 /// expected speedup/slowdowns due to the supported instruction set. We use the 1188 /// TargetTransformInfo to query the different backends for the cost of 1189 /// different operations. 1190 class LoopVectorizationCostModel { 1191 public: 1192 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1193 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1194 LoopVectorizationLegality *Legal, 1195 const TargetTransformInfo &TTI, 1196 const TargetLibraryInfo *TLI, DemandedBits *DB, 1197 AssumptionCache *AC, 1198 OptimizationRemarkEmitter *ORE, const Function *F, 1199 const LoopVectorizeHints *Hints, 1200 InterleavedAccessInfo &IAI) 1201 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1202 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1203 Hints(Hints), InterleaveInfo(IAI) {} 1204 1205 /// \return An upper bound for the vectorization factor, or None if 1206 /// vectorization and interleaving should be avoided up front. 1207 Optional<ElementCount> computeMaxVF(ElementCount UserVF, unsigned UserIC); 1208 1209 /// \return True if runtime checks are required for vectorization, and false 1210 /// otherwise. 1211 bool runtimeChecksRequired(); 1212 1213 /// \return The most profitable vectorization factor and the cost of that VF. 1214 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 1215 /// then this vectorization factor will be selected if vectorization is 1216 /// possible. 1217 VectorizationFactor selectVectorizationFactor(ElementCount MaxVF); 1218 VectorizationFactor 1219 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1220 const LoopVectorizationPlanner &LVP); 1221 1222 /// Setup cost-based decisions for user vectorization factor. 1223 void selectUserVectorizationFactor(ElementCount UserVF) { 1224 collectUniformsAndScalars(UserVF); 1225 collectInstsToScalarize(UserVF); 1226 } 1227 1228 /// \return The size (in bits) of the smallest and widest types in the code 1229 /// that needs to be vectorized. We ignore values that remain scalar such as 1230 /// 64 bit loop indices. 1231 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1232 1233 /// \return The desired interleave count. 1234 /// If interleave count has been specified by metadata it will be returned. 1235 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1236 /// are the selected vectorization factor and the cost of the selected VF. 1237 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1238 1239 /// Memory access instruction may be vectorized in more than one way. 1240 /// Form of instruction after vectorization depends on cost. 1241 /// This function takes cost-based decisions for Load/Store instructions 1242 /// and collects them in a map. This decisions map is used for building 1243 /// the lists of loop-uniform and loop-scalar instructions. 1244 /// The calculated cost is saved with widening decision in order to 1245 /// avoid redundant calculations. 1246 void setCostBasedWideningDecision(ElementCount VF); 1247 1248 /// A struct that represents some properties of the register usage 1249 /// of a loop. 1250 struct RegisterUsage { 1251 /// Holds the number of loop invariant values that are used in the loop. 1252 /// The key is ClassID of target-provided register class. 1253 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1254 /// Holds the maximum number of concurrent live intervals in the loop. 1255 /// The key is ClassID of target-provided register class. 1256 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1257 }; 1258 1259 /// \return Returns information about the register usages of the loop for the 1260 /// given vectorization factors. 1261 SmallVector<RegisterUsage, 8> 1262 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1263 1264 /// Collect values we want to ignore in the cost model. 1265 void collectValuesToIgnore(); 1266 1267 /// Split reductions into those that happen in the loop, and those that happen 1268 /// outside. In loop reductions are collected into InLoopReductionChains. 1269 void collectInLoopReductions(); 1270 1271 /// \returns The smallest bitwidth each instruction can be represented with. 1272 /// The vector equivalents of these instructions should be truncated to this 1273 /// type. 1274 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1275 return MinBWs; 1276 } 1277 1278 /// \returns True if it is more profitable to scalarize instruction \p I for 1279 /// vectorization factor \p VF. 1280 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1281 assert(VF.isVector() && 1282 "Profitable to scalarize relevant only for VF > 1."); 1283 1284 // Cost model is not run in the VPlan-native path - return conservative 1285 // result until this changes. 1286 if (EnableVPlanNativePath) 1287 return false; 1288 1289 auto Scalars = InstsToScalarize.find(VF); 1290 assert(Scalars != InstsToScalarize.end() && 1291 "VF not yet analyzed for scalarization profitability"); 1292 return Scalars->second.find(I) != Scalars->second.end(); 1293 } 1294 1295 /// Returns true if \p I is known to be uniform after vectorization. 1296 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1297 if (VF.isScalar()) 1298 return true; 1299 1300 // Cost model is not run in the VPlan-native path - return conservative 1301 // result until this changes. 1302 if (EnableVPlanNativePath) 1303 return false; 1304 1305 auto UniformsPerVF = Uniforms.find(VF); 1306 assert(UniformsPerVF != Uniforms.end() && 1307 "VF not yet analyzed for uniformity"); 1308 return UniformsPerVF->second.count(I); 1309 } 1310 1311 /// Returns true if \p I is known to be scalar after vectorization. 1312 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1313 if (VF.isScalar()) 1314 return true; 1315 1316 // Cost model is not run in the VPlan-native path - return conservative 1317 // result until this changes. 1318 if (EnableVPlanNativePath) 1319 return false; 1320 1321 auto ScalarsPerVF = Scalars.find(VF); 1322 assert(ScalarsPerVF != Scalars.end() && 1323 "Scalar values are not calculated for VF"); 1324 return ScalarsPerVF->second.count(I); 1325 } 1326 1327 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1328 /// for vectorization factor \p VF. 1329 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1330 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1331 !isProfitableToScalarize(I, VF) && 1332 !isScalarAfterVectorization(I, VF); 1333 } 1334 1335 /// Decision that was taken during cost calculation for memory instruction. 1336 enum InstWidening { 1337 CM_Unknown, 1338 CM_Widen, // For consecutive accesses with stride +1. 1339 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1340 CM_Interleave, 1341 CM_GatherScatter, 1342 CM_Scalarize 1343 }; 1344 1345 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1346 /// instruction \p I and vector width \p VF. 1347 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1348 InstructionCost Cost) { 1349 assert(VF.isVector() && "Expected VF >=2"); 1350 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1351 } 1352 1353 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1354 /// interleaving group \p Grp and vector width \p VF. 1355 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1356 ElementCount VF, InstWidening W, 1357 InstructionCost Cost) { 1358 assert(VF.isVector() && "Expected VF >=2"); 1359 /// Broadcast this decicion to all instructions inside the group. 1360 /// But the cost will be assigned to one instruction only. 1361 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1362 if (auto *I = Grp->getMember(i)) { 1363 if (Grp->getInsertPos() == I) 1364 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1365 else 1366 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1367 } 1368 } 1369 } 1370 1371 /// Return the cost model decision for the given instruction \p I and vector 1372 /// width \p VF. Return CM_Unknown if this instruction did not pass 1373 /// through the cost modeling. 1374 InstWidening getWideningDecision(Instruction *I, ElementCount VF) { 1375 assert(VF.isVector() && "Expected VF to be a vector VF"); 1376 // Cost model is not run in the VPlan-native path - return conservative 1377 // result until this changes. 1378 if (EnableVPlanNativePath) 1379 return CM_GatherScatter; 1380 1381 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1382 auto Itr = WideningDecisions.find(InstOnVF); 1383 if (Itr == WideningDecisions.end()) 1384 return CM_Unknown; 1385 return Itr->second.first; 1386 } 1387 1388 /// Return the vectorization cost for the given instruction \p I and vector 1389 /// width \p VF. 1390 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1391 assert(VF.isVector() && "Expected VF >=2"); 1392 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1393 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1394 "The cost is not calculated"); 1395 return WideningDecisions[InstOnVF].second; 1396 } 1397 1398 /// Return True if instruction \p I is an optimizable truncate whose operand 1399 /// is an induction variable. Such a truncate will be removed by adding a new 1400 /// induction variable with the destination type. 1401 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1402 // If the instruction is not a truncate, return false. 1403 auto *Trunc = dyn_cast<TruncInst>(I); 1404 if (!Trunc) 1405 return false; 1406 1407 // Get the source and destination types of the truncate. 1408 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1409 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1410 1411 // If the truncate is free for the given types, return false. Replacing a 1412 // free truncate with an induction variable would add an induction variable 1413 // update instruction to each iteration of the loop. We exclude from this 1414 // check the primary induction variable since it will need an update 1415 // instruction regardless. 1416 Value *Op = Trunc->getOperand(0); 1417 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1418 return false; 1419 1420 // If the truncated value is not an induction variable, return false. 1421 return Legal->isInductionPhi(Op); 1422 } 1423 1424 /// Collects the instructions to scalarize for each predicated instruction in 1425 /// the loop. 1426 void collectInstsToScalarize(ElementCount VF); 1427 1428 /// Collect Uniform and Scalar values for the given \p VF. 1429 /// The sets depend on CM decision for Load/Store instructions 1430 /// that may be vectorized as interleave, gather-scatter or scalarized. 1431 void collectUniformsAndScalars(ElementCount VF) { 1432 // Do the analysis once. 1433 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1434 return; 1435 setCostBasedWideningDecision(VF); 1436 collectLoopUniforms(VF); 1437 collectLoopScalars(VF); 1438 } 1439 1440 /// Returns true if the target machine supports masked store operation 1441 /// for the given \p DataType and kind of access to \p Ptr. 1442 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) { 1443 return Legal->isConsecutivePtr(Ptr) && 1444 TTI.isLegalMaskedStore(DataType, Alignment); 1445 } 1446 1447 /// Returns true if the target machine supports masked load operation 1448 /// for the given \p DataType and kind of access to \p Ptr. 1449 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) { 1450 return Legal->isConsecutivePtr(Ptr) && 1451 TTI.isLegalMaskedLoad(DataType, Alignment); 1452 } 1453 1454 /// Returns true if the target machine supports masked scatter operation 1455 /// for the given \p DataType. 1456 bool isLegalMaskedScatter(Type *DataType, Align Alignment) { 1457 return TTI.isLegalMaskedScatter(DataType, Alignment); 1458 } 1459 1460 /// Returns true if the target machine supports masked gather operation 1461 /// for the given \p DataType. 1462 bool isLegalMaskedGather(Type *DataType, Align Alignment) { 1463 return TTI.isLegalMaskedGather(DataType, Alignment); 1464 } 1465 1466 /// Returns true if the target machine can represent \p V as a masked gather 1467 /// or scatter operation. 1468 bool isLegalGatherOrScatter(Value *V) { 1469 bool LI = isa<LoadInst>(V); 1470 bool SI = isa<StoreInst>(V); 1471 if (!LI && !SI) 1472 return false; 1473 auto *Ty = getMemInstValueType(V); 1474 Align Align = getLoadStoreAlignment(V); 1475 return (LI && isLegalMaskedGather(Ty, Align)) || 1476 (SI && isLegalMaskedScatter(Ty, Align)); 1477 } 1478 1479 /// Returns true if the target machine supports all of the reduction 1480 /// variables found for the given VF. 1481 bool canVectorizeReductions(ElementCount VF) { 1482 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1483 RecurrenceDescriptor RdxDesc = Reduction.second; 1484 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1485 })); 1486 } 1487 1488 /// Returns true if \p I is an instruction that will be scalarized with 1489 /// predication. Such instructions include conditional stores and 1490 /// instructions that may divide by zero. 1491 /// If a non-zero VF has been calculated, we check if I will be scalarized 1492 /// predication for that VF. 1493 bool isScalarWithPredication(Instruction *I, 1494 ElementCount VF = ElementCount::getFixed(1)); 1495 1496 // Returns true if \p I is an instruction that will be predicated either 1497 // through scalar predication or masked load/store or masked gather/scatter. 1498 // Superset of instructions that return true for isScalarWithPredication. 1499 bool isPredicatedInst(Instruction *I) { 1500 if (!blockNeedsPredication(I->getParent())) 1501 return false; 1502 // Loads and stores that need some form of masked operation are predicated 1503 // instructions. 1504 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1505 return Legal->isMaskRequired(I); 1506 return isScalarWithPredication(I); 1507 } 1508 1509 /// Returns true if \p I is a memory instruction with consecutive memory 1510 /// access that can be widened. 1511 bool 1512 memoryInstructionCanBeWidened(Instruction *I, 1513 ElementCount VF = ElementCount::getFixed(1)); 1514 1515 /// Returns true if \p I is a memory instruction in an interleaved-group 1516 /// of memory accesses that can be vectorized with wide vector loads/stores 1517 /// and shuffles. 1518 bool 1519 interleavedAccessCanBeWidened(Instruction *I, 1520 ElementCount VF = ElementCount::getFixed(1)); 1521 1522 /// Check if \p Instr belongs to any interleaved access group. 1523 bool isAccessInterleaved(Instruction *Instr) { 1524 return InterleaveInfo.isInterleaved(Instr); 1525 } 1526 1527 /// Get the interleaved access group that \p Instr belongs to. 1528 const InterleaveGroup<Instruction> * 1529 getInterleavedAccessGroup(Instruction *Instr) { 1530 return InterleaveInfo.getInterleaveGroup(Instr); 1531 } 1532 1533 /// Returns true if we're required to use a scalar epilogue for at least 1534 /// the final iteration of the original loop. 1535 bool requiresScalarEpilogue() const { 1536 if (!isScalarEpilogueAllowed()) 1537 return false; 1538 // If we might exit from anywhere but the latch, must run the exiting 1539 // iteration in scalar form. 1540 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1541 return true; 1542 return InterleaveInfo.requiresScalarEpilogue(); 1543 } 1544 1545 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1546 /// loop hint annotation. 1547 bool isScalarEpilogueAllowed() const { 1548 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1549 } 1550 1551 /// Returns true if all loop blocks should be masked to fold tail loop. 1552 bool foldTailByMasking() const { return FoldTailByMasking; } 1553 1554 bool blockNeedsPredication(BasicBlock *BB) { 1555 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1556 } 1557 1558 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1559 /// nodes to the chain of instructions representing the reductions. Uses a 1560 /// MapVector to ensure deterministic iteration order. 1561 using ReductionChainMap = 1562 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1563 1564 /// Return the chain of instructions representing an inloop reduction. 1565 const ReductionChainMap &getInLoopReductionChains() const { 1566 return InLoopReductionChains; 1567 } 1568 1569 /// Returns true if the Phi is part of an inloop reduction. 1570 bool isInLoopReduction(PHINode *Phi) const { 1571 return InLoopReductionChains.count(Phi); 1572 } 1573 1574 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1575 /// with factor VF. Return the cost of the instruction, including 1576 /// scalarization overhead if it's needed. 1577 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF); 1578 1579 /// Estimate cost of a call instruction CI if it were vectorized with factor 1580 /// VF. Return the cost of the instruction, including scalarization overhead 1581 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1582 /// scalarized - 1583 /// i.e. either vector version isn't available, or is too expensive. 1584 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1585 bool &NeedToScalarize); 1586 1587 /// Invalidates decisions already taken by the cost model. 1588 void invalidateCostModelingDecisions() { 1589 WideningDecisions.clear(); 1590 Uniforms.clear(); 1591 Scalars.clear(); 1592 } 1593 1594 private: 1595 unsigned NumPredStores = 0; 1596 1597 /// \return An upper bound for the vectorization factor, a power-of-2 larger 1598 /// than zero. One is returned if vectorization should best be avoided due 1599 /// to cost. 1600 ElementCount computeFeasibleMaxVF(unsigned ConstTripCount, 1601 ElementCount UserVF); 1602 1603 /// The vectorization cost is a combination of the cost itself and a boolean 1604 /// indicating whether any of the contributing operations will actually 1605 /// operate on 1606 /// vector values after type legalization in the backend. If this latter value 1607 /// is 1608 /// false, then all operations will be scalarized (i.e. no vectorization has 1609 /// actually taken place). 1610 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1611 1612 /// Returns the expected execution cost. The unit of the cost does 1613 /// not matter because we use the 'cost' units to compare different 1614 /// vector widths. The cost that is returned is *not* normalized by 1615 /// the factor width. 1616 VectorizationCostTy expectedCost(ElementCount VF); 1617 1618 /// Returns the execution time cost of an instruction for a given vector 1619 /// width. Vector width of one means scalar. 1620 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1621 1622 /// The cost-computation logic from getInstructionCost which provides 1623 /// the vector type as an output parameter. 1624 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1625 Type *&VectorTy); 1626 1627 /// Return the cost of instructions in an inloop reduction pattern, if I is 1628 /// part of that pattern. 1629 InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF, 1630 Type *VectorTy, 1631 TTI::TargetCostKind CostKind); 1632 1633 /// Calculate vectorization cost of memory instruction \p I. 1634 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1635 1636 /// The cost computation for scalarized memory instruction. 1637 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1638 1639 /// The cost computation for interleaving group of memory instructions. 1640 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1641 1642 /// The cost computation for Gather/Scatter instruction. 1643 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1644 1645 /// The cost computation for widening instruction \p I with consecutive 1646 /// memory access. 1647 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1648 1649 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1650 /// Load: scalar load + broadcast. 1651 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1652 /// element) 1653 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1654 1655 /// Estimate the overhead of scalarizing an instruction. This is a 1656 /// convenience wrapper for the type-based getScalarizationOverhead API. 1657 InstructionCost getScalarizationOverhead(Instruction *I, ElementCount VF); 1658 1659 /// Returns whether the instruction is a load or store and will be a emitted 1660 /// as a vector operation. 1661 bool isConsecutiveLoadOrStore(Instruction *I); 1662 1663 /// Returns true if an artificially high cost for emulated masked memrefs 1664 /// should be used. 1665 bool useEmulatedMaskMemRefHack(Instruction *I); 1666 1667 /// Map of scalar integer values to the smallest bitwidth they can be legally 1668 /// represented as. The vector equivalents of these values should be truncated 1669 /// to this type. 1670 MapVector<Instruction *, uint64_t> MinBWs; 1671 1672 /// A type representing the costs for instructions if they were to be 1673 /// scalarized rather than vectorized. The entries are Instruction-Cost 1674 /// pairs. 1675 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1676 1677 /// A set containing all BasicBlocks that are known to present after 1678 /// vectorization as a predicated block. 1679 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1680 1681 /// Records whether it is allowed to have the original scalar loop execute at 1682 /// least once. This may be needed as a fallback loop in case runtime 1683 /// aliasing/dependence checks fail, or to handle the tail/remainder 1684 /// iterations when the trip count is unknown or doesn't divide by the VF, 1685 /// or as a peel-loop to handle gaps in interleave-groups. 1686 /// Under optsize and when the trip count is very small we don't allow any 1687 /// iterations to execute in the scalar loop. 1688 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1689 1690 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1691 bool FoldTailByMasking = false; 1692 1693 /// A map holding scalar costs for different vectorization factors. The 1694 /// presence of a cost for an instruction in the mapping indicates that the 1695 /// instruction will be scalarized when vectorizing with the associated 1696 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1697 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1698 1699 /// Holds the instructions known to be uniform after vectorization. 1700 /// The data is collected per VF. 1701 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1702 1703 /// Holds the instructions known to be scalar after vectorization. 1704 /// The data is collected per VF. 1705 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1706 1707 /// Holds the instructions (address computations) that are forced to be 1708 /// scalarized. 1709 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1710 1711 /// PHINodes of the reductions that should be expanded in-loop along with 1712 /// their associated chains of reduction operations, in program order from top 1713 /// (PHI) to bottom 1714 ReductionChainMap InLoopReductionChains; 1715 1716 /// A Map of inloop reduction operations and their immediate chain operand. 1717 /// FIXME: This can be removed once reductions can be costed correctly in 1718 /// vplan. This was added to allow quick lookup to the inloop operations, 1719 /// without having to loop through InLoopReductionChains. 1720 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1721 1722 /// Returns the expected difference in cost from scalarizing the expression 1723 /// feeding a predicated instruction \p PredInst. The instructions to 1724 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1725 /// non-negative return value implies the expression will be scalarized. 1726 /// Currently, only single-use chains are considered for scalarization. 1727 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1728 ElementCount VF); 1729 1730 /// Collect the instructions that are uniform after vectorization. An 1731 /// instruction is uniform if we represent it with a single scalar value in 1732 /// the vectorized loop corresponding to each vector iteration. Examples of 1733 /// uniform instructions include pointer operands of consecutive or 1734 /// interleaved memory accesses. Note that although uniformity implies an 1735 /// instruction will be scalar, the reverse is not true. In general, a 1736 /// scalarized instruction will be represented by VF scalar values in the 1737 /// vectorized loop, each corresponding to an iteration of the original 1738 /// scalar loop. 1739 void collectLoopUniforms(ElementCount VF); 1740 1741 /// Collect the instructions that are scalar after vectorization. An 1742 /// instruction is scalar if it is known to be uniform or will be scalarized 1743 /// during vectorization. Non-uniform scalarized instructions will be 1744 /// represented by VF values in the vectorized loop, each corresponding to an 1745 /// iteration of the original scalar loop. 1746 void collectLoopScalars(ElementCount VF); 1747 1748 /// Keeps cost model vectorization decision and cost for instructions. 1749 /// Right now it is used for memory instructions only. 1750 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1751 std::pair<InstWidening, InstructionCost>>; 1752 1753 DecisionList WideningDecisions; 1754 1755 /// Returns true if \p V is expected to be vectorized and it needs to be 1756 /// extracted. 1757 bool needsExtract(Value *V, ElementCount VF) const { 1758 Instruction *I = dyn_cast<Instruction>(V); 1759 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1760 TheLoop->isLoopInvariant(I)) 1761 return false; 1762 1763 // Assume we can vectorize V (and hence we need extraction) if the 1764 // scalars are not computed yet. This can happen, because it is called 1765 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1766 // the scalars are collected. That should be a safe assumption in most 1767 // cases, because we check if the operands have vectorizable types 1768 // beforehand in LoopVectorizationLegality. 1769 return Scalars.find(VF) == Scalars.end() || 1770 !isScalarAfterVectorization(I, VF); 1771 }; 1772 1773 /// Returns a range containing only operands needing to be extracted. 1774 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1775 ElementCount VF) { 1776 return SmallVector<Value *, 4>(make_filter_range( 1777 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1778 } 1779 1780 /// Determines if we have the infrastructure to vectorize loop \p L and its 1781 /// epilogue, assuming the main loop is vectorized by \p VF. 1782 bool isCandidateForEpilogueVectorization(const Loop &L, 1783 const ElementCount VF) const; 1784 1785 /// Returns true if epilogue vectorization is considered profitable, and 1786 /// false otherwise. 1787 /// \p VF is the vectorization factor chosen for the original loop. 1788 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1789 1790 public: 1791 /// The loop that we evaluate. 1792 Loop *TheLoop; 1793 1794 /// Predicated scalar evolution analysis. 1795 PredicatedScalarEvolution &PSE; 1796 1797 /// Loop Info analysis. 1798 LoopInfo *LI; 1799 1800 /// Vectorization legality. 1801 LoopVectorizationLegality *Legal; 1802 1803 /// Vector target information. 1804 const TargetTransformInfo &TTI; 1805 1806 /// Target Library Info. 1807 const TargetLibraryInfo *TLI; 1808 1809 /// Demanded bits analysis. 1810 DemandedBits *DB; 1811 1812 /// Assumption cache. 1813 AssumptionCache *AC; 1814 1815 /// Interface to emit optimization remarks. 1816 OptimizationRemarkEmitter *ORE; 1817 1818 const Function *TheFunction; 1819 1820 /// Loop Vectorize Hint. 1821 const LoopVectorizeHints *Hints; 1822 1823 /// The interleave access information contains groups of interleaved accesses 1824 /// with the same stride and close to each other. 1825 InterleavedAccessInfo &InterleaveInfo; 1826 1827 /// Values to ignore in the cost model. 1828 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1829 1830 /// Values to ignore in the cost model when VF > 1. 1831 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1832 1833 /// Profitable vector factors. 1834 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1835 }; 1836 1837 } // end namespace llvm 1838 1839 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1840 // vectorization. The loop needs to be annotated with #pragma omp simd 1841 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1842 // vector length information is not provided, vectorization is not considered 1843 // explicit. Interleave hints are not allowed either. These limitations will be 1844 // relaxed in the future. 1845 // Please, note that we are currently forced to abuse the pragma 'clang 1846 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1847 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1848 // provides *explicit vectorization hints* (LV can bypass legal checks and 1849 // assume that vectorization is legal). However, both hints are implemented 1850 // using the same metadata (llvm.loop.vectorize, processed by 1851 // LoopVectorizeHints). This will be fixed in the future when the native IR 1852 // representation for pragma 'omp simd' is introduced. 1853 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1854 OptimizationRemarkEmitter *ORE) { 1855 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 1856 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1857 1858 // Only outer loops with an explicit vectorization hint are supported. 1859 // Unannotated outer loops are ignored. 1860 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1861 return false; 1862 1863 Function *Fn = OuterLp->getHeader()->getParent(); 1864 if (!Hints.allowVectorization(Fn, OuterLp, 1865 true /*VectorizeOnlyWhenForced*/)) { 1866 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1867 return false; 1868 } 1869 1870 if (Hints.getInterleave() > 1) { 1871 // TODO: Interleave support is future work. 1872 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1873 "outer loops.\n"); 1874 Hints.emitRemarkWithHints(); 1875 return false; 1876 } 1877 1878 return true; 1879 } 1880 1881 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1882 OptimizationRemarkEmitter *ORE, 1883 SmallVectorImpl<Loop *> &V) { 1884 // Collect inner loops and outer loops without irreducible control flow. For 1885 // now, only collect outer loops that have explicit vectorization hints. If we 1886 // are stress testing the VPlan H-CFG construction, we collect the outermost 1887 // loop of every loop nest. 1888 if (L.isInnermost() || VPlanBuildStressTest || 1889 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1890 LoopBlocksRPO RPOT(&L); 1891 RPOT.perform(LI); 1892 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1893 V.push_back(&L); 1894 // TODO: Collect inner loops inside marked outer loops in case 1895 // vectorization fails for the outer loop. Do not invoke 1896 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1897 // already known to be reducible. We can use an inherited attribute for 1898 // that. 1899 return; 1900 } 1901 } 1902 for (Loop *InnerL : L) 1903 collectSupportedLoops(*InnerL, LI, ORE, V); 1904 } 1905 1906 namespace { 1907 1908 /// The LoopVectorize Pass. 1909 struct LoopVectorize : public FunctionPass { 1910 /// Pass identification, replacement for typeid 1911 static char ID; 1912 1913 LoopVectorizePass Impl; 1914 1915 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 1916 bool VectorizeOnlyWhenForced = false) 1917 : FunctionPass(ID), 1918 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 1919 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1920 } 1921 1922 bool runOnFunction(Function &F) override { 1923 if (skipFunction(F)) 1924 return false; 1925 1926 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1927 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1928 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1929 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1930 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1931 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1932 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 1933 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1934 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1935 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1936 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1937 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1938 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 1939 1940 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1941 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1942 1943 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1944 GetLAA, *ORE, PSI).MadeAnyChange; 1945 } 1946 1947 void getAnalysisUsage(AnalysisUsage &AU) const override { 1948 AU.addRequired<AssumptionCacheTracker>(); 1949 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1950 AU.addRequired<DominatorTreeWrapperPass>(); 1951 AU.addRequired<LoopInfoWrapperPass>(); 1952 AU.addRequired<ScalarEvolutionWrapperPass>(); 1953 AU.addRequired<TargetTransformInfoWrapperPass>(); 1954 AU.addRequired<AAResultsWrapperPass>(); 1955 AU.addRequired<LoopAccessLegacyAnalysis>(); 1956 AU.addRequired<DemandedBitsWrapperPass>(); 1957 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1958 AU.addRequired<InjectTLIMappingsLegacy>(); 1959 1960 // We currently do not preserve loopinfo/dominator analyses with outer loop 1961 // vectorization. Until this is addressed, mark these analyses as preserved 1962 // only for non-VPlan-native path. 1963 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 1964 if (!EnableVPlanNativePath) { 1965 AU.addPreserved<LoopInfoWrapperPass>(); 1966 AU.addPreserved<DominatorTreeWrapperPass>(); 1967 } 1968 1969 AU.addPreserved<BasicAAWrapperPass>(); 1970 AU.addPreserved<GlobalsAAWrapperPass>(); 1971 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 1972 } 1973 }; 1974 1975 } // end anonymous namespace 1976 1977 //===----------------------------------------------------------------------===// 1978 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1979 // LoopVectorizationCostModel and LoopVectorizationPlanner. 1980 //===----------------------------------------------------------------------===// 1981 1982 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1983 // We need to place the broadcast of invariant variables outside the loop, 1984 // but only if it's proven safe to do so. Else, broadcast will be inside 1985 // vector loop body. 1986 Instruction *Instr = dyn_cast<Instruction>(V); 1987 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 1988 (!Instr || 1989 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 1990 // Place the code for broadcasting invariant variables in the new preheader. 1991 IRBuilder<>::InsertPointGuard Guard(Builder); 1992 if (SafeToHoist) 1993 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1994 1995 // Broadcast the scalar into all locations in the vector. 1996 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1997 1998 return Shuf; 1999 } 2000 2001 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2002 const InductionDescriptor &II, Value *Step, Value *Start, 2003 Instruction *EntryVal, VPValue *Def, VPValue *CastDef, 2004 VPTransformState &State) { 2005 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2006 "Expected either an induction phi-node or a truncate of it!"); 2007 2008 // Construct the initial value of the vector IV in the vector loop preheader 2009 auto CurrIP = Builder.saveIP(); 2010 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2011 if (isa<TruncInst>(EntryVal)) { 2012 assert(Start->getType()->isIntegerTy() && 2013 "Truncation requires an integer type"); 2014 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2015 Step = Builder.CreateTrunc(Step, TruncType); 2016 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2017 } 2018 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2019 Value *SteppedStart = 2020 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 2021 2022 // We create vector phi nodes for both integer and floating-point induction 2023 // variables. Here, we determine the kind of arithmetic we will perform. 2024 Instruction::BinaryOps AddOp; 2025 Instruction::BinaryOps MulOp; 2026 if (Step->getType()->isIntegerTy()) { 2027 AddOp = Instruction::Add; 2028 MulOp = Instruction::Mul; 2029 } else { 2030 AddOp = II.getInductionOpcode(); 2031 MulOp = Instruction::FMul; 2032 } 2033 2034 // Multiply the vectorization factor by the step using integer or 2035 // floating-point arithmetic as appropriate. 2036 Value *ConstVF = 2037 getSignedIntOrFpConstant(Step->getType(), VF.getKnownMinValue()); 2038 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 2039 2040 // Create a vector splat to use in the induction update. 2041 // 2042 // FIXME: If the step is non-constant, we create the vector splat with 2043 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2044 // handle a constant vector splat. 2045 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2046 Value *SplatVF = isa<Constant>(Mul) 2047 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2048 : Builder.CreateVectorSplat(VF, Mul); 2049 Builder.restoreIP(CurrIP); 2050 2051 // We may need to add the step a number of times, depending on the unroll 2052 // factor. The last of those goes into the PHI. 2053 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2054 &*LoopVectorBody->getFirstInsertionPt()); 2055 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2056 Instruction *LastInduction = VecInd; 2057 for (unsigned Part = 0; Part < UF; ++Part) { 2058 State.set(Def, LastInduction, Part); 2059 2060 if (isa<TruncInst>(EntryVal)) 2061 addMetadata(LastInduction, EntryVal); 2062 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, 2063 State, Part); 2064 2065 LastInduction = cast<Instruction>(addFastMathFlag( 2066 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 2067 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2068 } 2069 2070 // Move the last step to the end of the latch block. This ensures consistent 2071 // placement of all induction updates. 2072 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2073 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2074 auto *ICmp = cast<Instruction>(Br->getCondition()); 2075 LastInduction->moveBefore(ICmp); 2076 LastInduction->setName("vec.ind.next"); 2077 2078 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2079 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2080 } 2081 2082 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2083 return Cost->isScalarAfterVectorization(I, VF) || 2084 Cost->isProfitableToScalarize(I, VF); 2085 } 2086 2087 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2088 if (shouldScalarizeInstruction(IV)) 2089 return true; 2090 auto isScalarInst = [&](User *U) -> bool { 2091 auto *I = cast<Instruction>(U); 2092 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2093 }; 2094 return llvm::any_of(IV->users(), isScalarInst); 2095 } 2096 2097 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2098 const InductionDescriptor &ID, const Instruction *EntryVal, 2099 Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, 2100 unsigned Part, unsigned Lane) { 2101 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2102 "Expected either an induction phi-node or a truncate of it!"); 2103 2104 // This induction variable is not the phi from the original loop but the 2105 // newly-created IV based on the proof that casted Phi is equal to the 2106 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2107 // re-uses the same InductionDescriptor that original IV uses but we don't 2108 // have to do any recording in this case - that is done when original IV is 2109 // processed. 2110 if (isa<TruncInst>(EntryVal)) 2111 return; 2112 2113 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 2114 if (Casts.empty()) 2115 return; 2116 // Only the first Cast instruction in the Casts vector is of interest. 2117 // The rest of the Casts (if exist) have no uses outside the 2118 // induction update chain itself. 2119 if (Lane < UINT_MAX) 2120 State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); 2121 else 2122 State.set(CastDef, VectorLoopVal, Part); 2123 } 2124 2125 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2126 TruncInst *Trunc, VPValue *Def, 2127 VPValue *CastDef, 2128 VPTransformState &State) { 2129 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2130 "Primary induction variable must have an integer type"); 2131 2132 auto II = Legal->getInductionVars().find(IV); 2133 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2134 2135 auto ID = II->second; 2136 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2137 2138 // The value from the original loop to which we are mapping the new induction 2139 // variable. 2140 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2141 2142 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2143 2144 // Generate code for the induction step. Note that induction steps are 2145 // required to be loop-invariant 2146 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2147 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2148 "Induction step should be loop invariant"); 2149 if (PSE.getSE()->isSCEVable(IV->getType())) { 2150 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2151 return Exp.expandCodeFor(Step, Step->getType(), 2152 LoopVectorPreHeader->getTerminator()); 2153 } 2154 return cast<SCEVUnknown>(Step)->getValue(); 2155 }; 2156 2157 // The scalar value to broadcast. This is derived from the canonical 2158 // induction variable. If a truncation type is given, truncate the canonical 2159 // induction variable and step. Otherwise, derive these values from the 2160 // induction descriptor. 2161 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2162 Value *ScalarIV = Induction; 2163 if (IV != OldInduction) { 2164 ScalarIV = IV->getType()->isIntegerTy() 2165 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2166 : Builder.CreateCast(Instruction::SIToFP, Induction, 2167 IV->getType()); 2168 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2169 ScalarIV->setName("offset.idx"); 2170 } 2171 if (Trunc) { 2172 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2173 assert(Step->getType()->isIntegerTy() && 2174 "Truncation requires an integer step"); 2175 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2176 Step = Builder.CreateTrunc(Step, TruncType); 2177 } 2178 return ScalarIV; 2179 }; 2180 2181 // Create the vector values from the scalar IV, in the absence of creating a 2182 // vector IV. 2183 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2184 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2185 for (unsigned Part = 0; Part < UF; ++Part) { 2186 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2187 Value *EntryPart = 2188 getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step, 2189 ID.getInductionOpcode()); 2190 State.set(Def, EntryPart, Part); 2191 if (Trunc) 2192 addMetadata(EntryPart, Trunc); 2193 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, 2194 State, Part); 2195 } 2196 }; 2197 2198 // Now do the actual transformations, and start with creating the step value. 2199 Value *Step = CreateStepValue(ID.getStep()); 2200 if (VF.isZero() || VF.isScalar()) { 2201 Value *ScalarIV = CreateScalarIV(Step); 2202 CreateSplatIV(ScalarIV, Step); 2203 return; 2204 } 2205 2206 // Determine if we want a scalar version of the induction variable. This is 2207 // true if the induction variable itself is not widened, or if it has at 2208 // least one user in the loop that is not widened. 2209 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2210 if (!NeedsScalarIV) { 2211 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2212 State); 2213 return; 2214 } 2215 2216 // Try to create a new independent vector induction variable. If we can't 2217 // create the phi node, we will splat the scalar induction variable in each 2218 // loop iteration. 2219 if (!shouldScalarizeInstruction(EntryVal)) { 2220 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2221 State); 2222 Value *ScalarIV = CreateScalarIV(Step); 2223 // Create scalar steps that can be used by instructions we will later 2224 // scalarize. Note that the addition of the scalar steps will not increase 2225 // the number of instructions in the loop in the common case prior to 2226 // InstCombine. We will be trading one vector extract for each scalar step. 2227 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2228 return; 2229 } 2230 2231 // All IV users are scalar instructions, so only emit a scalar IV, not a 2232 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2233 // predicate used by the masked loads/stores. 2234 Value *ScalarIV = CreateScalarIV(Step); 2235 if (!Cost->isScalarEpilogueAllowed()) 2236 CreateSplatIV(ScalarIV, Step); 2237 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2238 } 2239 2240 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2241 Instruction::BinaryOps BinOp) { 2242 // Create and check the types. 2243 auto *ValVTy = cast<FixedVectorType>(Val->getType()); 2244 int VLen = ValVTy->getNumElements(); 2245 2246 Type *STy = Val->getType()->getScalarType(); 2247 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2248 "Induction Step must be an integer or FP"); 2249 assert(Step->getType() == STy && "Step has wrong type"); 2250 2251 SmallVector<Constant *, 8> Indices; 2252 2253 if (STy->isIntegerTy()) { 2254 // Create a vector of consecutive numbers from zero to VF. 2255 for (int i = 0; i < VLen; ++i) 2256 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2257 2258 // Add the consecutive indices to the vector value. 2259 Constant *Cv = ConstantVector::get(Indices); 2260 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2261 Step = Builder.CreateVectorSplat(VLen, Step); 2262 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2263 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2264 // which can be found from the original scalar operations. 2265 Step = Builder.CreateMul(Cv, Step); 2266 return Builder.CreateAdd(Val, Step, "induction"); 2267 } 2268 2269 // Floating point induction. 2270 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2271 "Binary Opcode should be specified for FP induction"); 2272 // Create a vector of consecutive numbers from zero to VF. 2273 for (int i = 0; i < VLen; ++i) 2274 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2275 2276 // Add the consecutive indices to the vector value. 2277 Constant *Cv = ConstantVector::get(Indices); 2278 2279 Step = Builder.CreateVectorSplat(VLen, Step); 2280 2281 // Floating point operations had to be 'fast' to enable the induction. 2282 FastMathFlags Flags; 2283 Flags.setFast(); 2284 2285 Value *MulOp = Builder.CreateFMul(Cv, Step); 2286 if (isa<Instruction>(MulOp)) 2287 // Have to check, MulOp may be a constant 2288 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2289 2290 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2291 if (isa<Instruction>(BOp)) 2292 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2293 return BOp; 2294 } 2295 2296 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2297 Instruction *EntryVal, 2298 const InductionDescriptor &ID, 2299 VPValue *Def, VPValue *CastDef, 2300 VPTransformState &State) { 2301 // We shouldn't have to build scalar steps if we aren't vectorizing. 2302 assert(VF.isVector() && "VF should be greater than one"); 2303 // Get the value type and ensure it and the step have the same integer type. 2304 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2305 assert(ScalarIVTy == Step->getType() && 2306 "Val and Step should have the same type"); 2307 2308 // We build scalar steps for both integer and floating-point induction 2309 // variables. Here, we determine the kind of arithmetic we will perform. 2310 Instruction::BinaryOps AddOp; 2311 Instruction::BinaryOps MulOp; 2312 if (ScalarIVTy->isIntegerTy()) { 2313 AddOp = Instruction::Add; 2314 MulOp = Instruction::Mul; 2315 } else { 2316 AddOp = ID.getInductionOpcode(); 2317 MulOp = Instruction::FMul; 2318 } 2319 2320 // Determine the number of scalars we need to generate for each unroll 2321 // iteration. If EntryVal is uniform, we only need to generate the first 2322 // lane. Otherwise, we generate all VF values. 2323 unsigned Lanes = 2324 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) 2325 ? 1 2326 : VF.getKnownMinValue(); 2327 assert((!VF.isScalable() || Lanes == 1) && 2328 "Should never scalarize a scalable vector"); 2329 // Compute the scalar steps and save the results in State. 2330 for (unsigned Part = 0; Part < UF; ++Part) { 2331 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2332 auto *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2333 ScalarIVTy->getScalarSizeInBits()); 2334 Value *StartIdx = 2335 createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF); 2336 if (ScalarIVTy->isFloatingPointTy()) 2337 StartIdx = Builder.CreateSIToFP(StartIdx, ScalarIVTy); 2338 StartIdx = addFastMathFlag(Builder.CreateBinOp( 2339 AddOp, StartIdx, getSignedIntOrFpConstant(ScalarIVTy, Lane))); 2340 // The step returned by `createStepForVF` is a runtime-evaluated value 2341 // when VF is scalable. Otherwise, it should be folded into a Constant. 2342 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2343 "Expected StartIdx to be folded to a constant when VF is not " 2344 "scalable"); 2345 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 2346 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 2347 State.set(Def, Add, VPIteration(Part, Lane)); 2348 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2349 Part, Lane); 2350 } 2351 } 2352 } 2353 2354 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2355 const VPIteration &Instance, 2356 VPTransformState &State) { 2357 Value *ScalarInst = State.get(Def, Instance); 2358 Value *VectorValue = State.get(Def, Instance.Part); 2359 VectorValue = Builder.CreateInsertElement( 2360 VectorValue, ScalarInst, State.Builder.getInt32(Instance.Lane)); 2361 State.set(Def, VectorValue, Instance.Part); 2362 } 2363 2364 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2365 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2366 assert(!VF.isScalable() && "Cannot reverse scalable vectors"); 2367 SmallVector<int, 8> ShuffleMask; 2368 for (unsigned i = 0; i < VF.getKnownMinValue(); ++i) 2369 ShuffleMask.push_back(VF.getKnownMinValue() - i - 1); 2370 2371 return Builder.CreateShuffleVector(Vec, ShuffleMask, "reverse"); 2372 } 2373 2374 // Return whether we allow using masked interleave-groups (for dealing with 2375 // strided loads/stores that reside in predicated blocks, or for dealing 2376 // with gaps). 2377 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2378 // If an override option has been passed in for interleaved accesses, use it. 2379 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2380 return EnableMaskedInterleavedMemAccesses; 2381 2382 return TTI.enableMaskedInterleavedAccessVectorization(); 2383 } 2384 2385 // Try to vectorize the interleave group that \p Instr belongs to. 2386 // 2387 // E.g. Translate following interleaved load group (factor = 3): 2388 // for (i = 0; i < N; i+=3) { 2389 // R = Pic[i]; // Member of index 0 2390 // G = Pic[i+1]; // Member of index 1 2391 // B = Pic[i+2]; // Member of index 2 2392 // ... // do something to R, G, B 2393 // } 2394 // To: 2395 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2396 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2397 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2398 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2399 // 2400 // Or translate following interleaved store group (factor = 3): 2401 // for (i = 0; i < N; i+=3) { 2402 // ... do something to R, G, B 2403 // Pic[i] = R; // Member of index 0 2404 // Pic[i+1] = G; // Member of index 1 2405 // Pic[i+2] = B; // Member of index 2 2406 // } 2407 // To: 2408 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2409 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2410 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2411 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2412 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2413 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2414 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2415 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2416 VPValue *BlockInMask) { 2417 Instruction *Instr = Group->getInsertPos(); 2418 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2419 2420 // Prepare for the vector type of the interleaved load/store. 2421 Type *ScalarTy = getMemInstValueType(Instr); 2422 unsigned InterleaveFactor = Group->getFactor(); 2423 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2424 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2425 2426 // Prepare for the new pointers. 2427 SmallVector<Value *, 2> AddrParts; 2428 unsigned Index = Group->getIndex(Instr); 2429 2430 // TODO: extend the masked interleaved-group support to reversed access. 2431 assert((!BlockInMask || !Group->isReverse()) && 2432 "Reversed masked interleave-group not supported."); 2433 2434 // If the group is reverse, adjust the index to refer to the last vector lane 2435 // instead of the first. We adjust the index from the first vector lane, 2436 // rather than directly getting the pointer for lane VF - 1, because the 2437 // pointer operand of the interleaved access is supposed to be uniform. For 2438 // uniform instructions, we're only required to generate a value for the 2439 // first vector lane in each unroll iteration. 2440 assert(!VF.isScalable() && 2441 "scalable vector reverse operation is not implemented"); 2442 if (Group->isReverse()) 2443 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2444 2445 for (unsigned Part = 0; Part < UF; Part++) { 2446 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2447 setDebugLocFromInst(Builder, AddrPart); 2448 2449 // Notice current instruction could be any index. Need to adjust the address 2450 // to the member of index 0. 2451 // 2452 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2453 // b = A[i]; // Member of index 0 2454 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2455 // 2456 // E.g. A[i+1] = a; // Member of index 1 2457 // A[i] = b; // Member of index 0 2458 // A[i+2] = c; // Member of index 2 (Current instruction) 2459 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2460 2461 bool InBounds = false; 2462 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2463 InBounds = gep->isInBounds(); 2464 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2465 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2466 2467 // Cast to the vector pointer type. 2468 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2469 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2470 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2471 } 2472 2473 setDebugLocFromInst(Builder, Instr); 2474 Value *PoisonVec = PoisonValue::get(VecTy); 2475 2476 Value *MaskForGaps = nullptr; 2477 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2478 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2479 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2480 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2481 } 2482 2483 // Vectorize the interleaved load group. 2484 if (isa<LoadInst>(Instr)) { 2485 // For each unroll part, create a wide load for the group. 2486 SmallVector<Value *, 2> NewLoads; 2487 for (unsigned Part = 0; Part < UF; Part++) { 2488 Instruction *NewLoad; 2489 if (BlockInMask || MaskForGaps) { 2490 assert(useMaskedInterleavedAccesses(*TTI) && 2491 "masked interleaved groups are not allowed."); 2492 Value *GroupMask = MaskForGaps; 2493 if (BlockInMask) { 2494 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2495 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2496 Value *ShuffledMask = Builder.CreateShuffleVector( 2497 BlockInMaskPart, 2498 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2499 "interleaved.mask"); 2500 GroupMask = MaskForGaps 2501 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2502 MaskForGaps) 2503 : ShuffledMask; 2504 } 2505 NewLoad = 2506 Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(), 2507 GroupMask, PoisonVec, "wide.masked.vec"); 2508 } 2509 else 2510 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2511 Group->getAlign(), "wide.vec"); 2512 Group->addMetadata(NewLoad); 2513 NewLoads.push_back(NewLoad); 2514 } 2515 2516 // For each member in the group, shuffle out the appropriate data from the 2517 // wide loads. 2518 unsigned J = 0; 2519 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2520 Instruction *Member = Group->getMember(I); 2521 2522 // Skip the gaps in the group. 2523 if (!Member) 2524 continue; 2525 2526 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2527 auto StrideMask = 2528 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2529 for (unsigned Part = 0; Part < UF; Part++) { 2530 Value *StridedVec = Builder.CreateShuffleVector( 2531 NewLoads[Part], StrideMask, "strided.vec"); 2532 2533 // If this member has different type, cast the result type. 2534 if (Member->getType() != ScalarTy) { 2535 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2536 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2537 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2538 } 2539 2540 if (Group->isReverse()) 2541 StridedVec = reverseVector(StridedVec); 2542 2543 State.set(VPDefs[J], StridedVec, Part); 2544 } 2545 ++J; 2546 } 2547 return; 2548 } 2549 2550 // The sub vector type for current instruction. 2551 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2552 auto *SubVT = VectorType::get(ScalarTy, VF); 2553 2554 // Vectorize the interleaved store group. 2555 for (unsigned Part = 0; Part < UF; Part++) { 2556 // Collect the stored vector from each member. 2557 SmallVector<Value *, 4> StoredVecs; 2558 for (unsigned i = 0; i < InterleaveFactor; i++) { 2559 // Interleaved store group doesn't allow a gap, so each index has a member 2560 assert(Group->getMember(i) && "Fail to get a member from an interleaved store group"); 2561 2562 Value *StoredVec = State.get(StoredValues[i], Part); 2563 2564 if (Group->isReverse()) 2565 StoredVec = reverseVector(StoredVec); 2566 2567 // If this member has different type, cast it to a unified type. 2568 2569 if (StoredVec->getType() != SubVT) 2570 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2571 2572 StoredVecs.push_back(StoredVec); 2573 } 2574 2575 // Concatenate all vectors into a wide vector. 2576 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2577 2578 // Interleave the elements in the wide vector. 2579 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2580 Value *IVec = Builder.CreateShuffleVector( 2581 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2582 "interleaved.vec"); 2583 2584 Instruction *NewStoreInstr; 2585 if (BlockInMask) { 2586 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2587 Value *ShuffledMask = Builder.CreateShuffleVector( 2588 BlockInMaskPart, 2589 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2590 "interleaved.mask"); 2591 NewStoreInstr = Builder.CreateMaskedStore( 2592 IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); 2593 } 2594 else 2595 NewStoreInstr = 2596 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2597 2598 Group->addMetadata(NewStoreInstr); 2599 } 2600 } 2601 2602 void InnerLoopVectorizer::vectorizeMemoryInstruction( 2603 Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, 2604 VPValue *StoredValue, VPValue *BlockInMask) { 2605 // Attempt to issue a wide load. 2606 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2607 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2608 2609 assert((LI || SI) && "Invalid Load/Store instruction"); 2610 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2611 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2612 2613 LoopVectorizationCostModel::InstWidening Decision = 2614 Cost->getWideningDecision(Instr, VF); 2615 assert((Decision == LoopVectorizationCostModel::CM_Widen || 2616 Decision == LoopVectorizationCostModel::CM_Widen_Reverse || 2617 Decision == LoopVectorizationCostModel::CM_GatherScatter) && 2618 "CM decision is not to widen the memory instruction"); 2619 2620 Type *ScalarDataTy = getMemInstValueType(Instr); 2621 2622 auto *DataTy = VectorType::get(ScalarDataTy, VF); 2623 const Align Alignment = getLoadStoreAlignment(Instr); 2624 2625 // Determine if the pointer operand of the access is either consecutive or 2626 // reverse consecutive. 2627 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2628 bool ConsecutiveStride = 2629 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2630 bool CreateGatherScatter = 2631 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2632 2633 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2634 // gather/scatter. Otherwise Decision should have been to Scalarize. 2635 assert((ConsecutiveStride || CreateGatherScatter) && 2636 "The instruction should be scalarized"); 2637 (void)ConsecutiveStride; 2638 2639 VectorParts BlockInMaskParts(UF); 2640 bool isMaskRequired = BlockInMask; 2641 if (isMaskRequired) 2642 for (unsigned Part = 0; Part < UF; ++Part) 2643 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2644 2645 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2646 // Calculate the pointer for the specific unroll-part. 2647 GetElementPtrInst *PartPtr = nullptr; 2648 2649 bool InBounds = false; 2650 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2651 InBounds = gep->isInBounds(); 2652 2653 if (Reverse) { 2654 assert(!VF.isScalable() && 2655 "Reversing vectors is not yet supported for scalable vectors."); 2656 2657 // If the address is consecutive but reversed, then the 2658 // wide store needs to start at the last vector element. 2659 PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP( 2660 ScalarDataTy, Ptr, Builder.getInt32(-Part * VF.getKnownMinValue()))); 2661 PartPtr->setIsInBounds(InBounds); 2662 PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP( 2663 ScalarDataTy, PartPtr, Builder.getInt32(1 - VF.getKnownMinValue()))); 2664 PartPtr->setIsInBounds(InBounds); 2665 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2666 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2667 } else { 2668 Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF); 2669 PartPtr = cast<GetElementPtrInst>( 2670 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 2671 PartPtr->setIsInBounds(InBounds); 2672 } 2673 2674 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2675 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2676 }; 2677 2678 // Handle Stores: 2679 if (SI) { 2680 setDebugLocFromInst(Builder, SI); 2681 2682 for (unsigned Part = 0; Part < UF; ++Part) { 2683 Instruction *NewSI = nullptr; 2684 Value *StoredVal = State.get(StoredValue, Part); 2685 if (CreateGatherScatter) { 2686 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2687 Value *VectorGep = State.get(Addr, Part); 2688 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2689 MaskPart); 2690 } else { 2691 if (Reverse) { 2692 // If we store to reverse consecutive memory locations, then we need 2693 // to reverse the order of elements in the stored value. 2694 StoredVal = reverseVector(StoredVal); 2695 // We don't want to update the value in the map as it might be used in 2696 // another expression. So don't call resetVectorValue(StoredVal). 2697 } 2698 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 2699 if (isMaskRequired) 2700 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2701 BlockInMaskParts[Part]); 2702 else 2703 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2704 } 2705 addMetadata(NewSI, SI); 2706 } 2707 return; 2708 } 2709 2710 // Handle loads. 2711 assert(LI && "Must have a load instruction"); 2712 setDebugLocFromInst(Builder, LI); 2713 for (unsigned Part = 0; Part < UF; ++Part) { 2714 Value *NewLI; 2715 if (CreateGatherScatter) { 2716 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2717 Value *VectorGep = State.get(Addr, Part); 2718 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2719 nullptr, "wide.masked.gather"); 2720 addMetadata(NewLI, LI); 2721 } else { 2722 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 2723 if (isMaskRequired) 2724 NewLI = Builder.CreateMaskedLoad( 2725 VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy), 2726 "wide.masked.load"); 2727 else 2728 NewLI = 2729 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2730 2731 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2732 addMetadata(NewLI, LI); 2733 if (Reverse) 2734 NewLI = reverseVector(NewLI); 2735 } 2736 2737 State.set(Def, NewLI, Part); 2738 } 2739 } 2740 2741 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def, 2742 VPUser &User, 2743 const VPIteration &Instance, 2744 bool IfPredicateInstr, 2745 VPTransformState &State) { 2746 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2747 2748 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2749 // the first lane and part. 2750 if (isa<NoAliasScopeDeclInst>(Instr)) 2751 if (!Instance.isFirstIteration()) 2752 return; 2753 2754 setDebugLocFromInst(Builder, Instr); 2755 2756 // Does this instruction return a value ? 2757 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2758 2759 Instruction *Cloned = Instr->clone(); 2760 if (!IsVoidRetTy) 2761 Cloned->setName(Instr->getName() + ".cloned"); 2762 2763 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 2764 Builder.GetInsertPoint()); 2765 // Replace the operands of the cloned instructions with their scalar 2766 // equivalents in the new loop. 2767 for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { 2768 auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); 2769 auto InputInstance = Instance; 2770 if (!Operand || !OrigLoop->contains(Operand) || 2771 (Cost->isUniformAfterVectorization(Operand, State.VF))) 2772 InputInstance.Lane = 0; 2773 auto *NewOp = State.get(User.getOperand(op), InputInstance); 2774 Cloned->setOperand(op, NewOp); 2775 } 2776 addNewMetadata(Cloned, Instr); 2777 2778 // Place the cloned scalar in the new loop. 2779 Builder.Insert(Cloned); 2780 2781 State.set(Def, Cloned, Instance); 2782 2783 // If we just cloned a new assumption, add it the assumption cache. 2784 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2785 if (II->getIntrinsicID() == Intrinsic::assume) 2786 AC->registerAssumption(II); 2787 2788 // End if-block. 2789 if (IfPredicateInstr) 2790 PredicatedInstructions.push_back(Cloned); 2791 } 2792 2793 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2794 Value *End, Value *Step, 2795 Instruction *DL) { 2796 BasicBlock *Header = L->getHeader(); 2797 BasicBlock *Latch = L->getLoopLatch(); 2798 // As we're just creating this loop, it's possible no latch exists 2799 // yet. If so, use the header as this will be a single block loop. 2800 if (!Latch) 2801 Latch = Header; 2802 2803 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2804 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2805 setDebugLocFromInst(Builder, OldInst); 2806 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2807 2808 Builder.SetInsertPoint(Latch->getTerminator()); 2809 setDebugLocFromInst(Builder, OldInst); 2810 2811 // Create i+1 and fill the PHINode. 2812 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2813 Induction->addIncoming(Start, L->getLoopPreheader()); 2814 Induction->addIncoming(Next, Latch); 2815 // Create the compare. 2816 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2817 Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 2818 2819 // Now we have two terminators. Remove the old one from the block. 2820 Latch->getTerminator()->eraseFromParent(); 2821 2822 return Induction; 2823 } 2824 2825 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2826 if (TripCount) 2827 return TripCount; 2828 2829 assert(L && "Create Trip Count for null loop."); 2830 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2831 // Find the loop boundaries. 2832 ScalarEvolution *SE = PSE.getSE(); 2833 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2834 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 2835 "Invalid loop count"); 2836 2837 Type *IdxTy = Legal->getWidestInductionType(); 2838 assert(IdxTy && "No type for induction"); 2839 2840 // The exit count might have the type of i64 while the phi is i32. This can 2841 // happen if we have an induction variable that is sign extended before the 2842 // compare. The only way that we get a backedge taken count is that the 2843 // induction variable was signed and as such will not overflow. In such a case 2844 // truncation is legal. 2845 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2846 IdxTy->getPrimitiveSizeInBits()) 2847 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2848 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2849 2850 // Get the total trip count from the count by adding 1. 2851 const SCEV *ExitCount = SE->getAddExpr( 2852 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2853 2854 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2855 2856 // Expand the trip count and place the new instructions in the preheader. 2857 // Notice that the pre-header does not change, only the loop body. 2858 SCEVExpander Exp(*SE, DL, "induction"); 2859 2860 // Count holds the overall loop count (N). 2861 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2862 L->getLoopPreheader()->getTerminator()); 2863 2864 if (TripCount->getType()->isPointerTy()) 2865 TripCount = 2866 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2867 L->getLoopPreheader()->getTerminator()); 2868 2869 return TripCount; 2870 } 2871 2872 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2873 if (VectorTripCount) 2874 return VectorTripCount; 2875 2876 Value *TC = getOrCreateTripCount(L); 2877 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2878 2879 Type *Ty = TC->getType(); 2880 // This is where we can make the step a runtime constant. 2881 Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF); 2882 2883 // If the tail is to be folded by masking, round the number of iterations N 2884 // up to a multiple of Step instead of rounding down. This is done by first 2885 // adding Step-1 and then rounding down. Note that it's ok if this addition 2886 // overflows: the vector induction variable will eventually wrap to zero given 2887 // that it starts at zero and its Step is a power of two; the loop will then 2888 // exit, with the last early-exit vector comparison also producing all-true. 2889 if (Cost->foldTailByMasking()) { 2890 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 2891 "VF*UF must be a power of 2 when folding tail by masking"); 2892 assert(!VF.isScalable() && 2893 "Tail folding not yet supported for scalable vectors"); 2894 TC = Builder.CreateAdd( 2895 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 2896 } 2897 2898 // Now we need to generate the expression for the part of the loop that the 2899 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2900 // iterations are not required for correctness, or N - Step, otherwise. Step 2901 // is equal to the vectorization factor (number of SIMD elements) times the 2902 // unroll factor (number of SIMD instructions). 2903 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2904 2905 // There are two cases where we need to ensure (at least) the last iteration 2906 // runs in the scalar remainder loop. Thus, if the step evenly divides 2907 // the trip count, we set the remainder to be equal to the step. If the step 2908 // does not evenly divide the trip count, no adjustment is necessary since 2909 // there will already be scalar iterations. Note that the minimum iterations 2910 // check ensures that N >= Step. The cases are: 2911 // 1) If there is a non-reversed interleaved group that may speculatively 2912 // access memory out-of-bounds. 2913 // 2) If any instruction may follow a conditionally taken exit. That is, if 2914 // the loop contains multiple exiting blocks, or a single exiting block 2915 // which is not the latch. 2916 if (VF.isVector() && Cost->requiresScalarEpilogue()) { 2917 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2918 R = Builder.CreateSelect(IsZero, Step, R); 2919 } 2920 2921 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2922 2923 return VectorTripCount; 2924 } 2925 2926 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2927 const DataLayout &DL) { 2928 // Verify that V is a vector type with same number of elements as DstVTy. 2929 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 2930 unsigned VF = DstFVTy->getNumElements(); 2931 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 2932 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2933 Type *SrcElemTy = SrcVecTy->getElementType(); 2934 Type *DstElemTy = DstFVTy->getElementType(); 2935 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2936 "Vector elements must have same size"); 2937 2938 // Do a direct cast if element types are castable. 2939 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2940 return Builder.CreateBitOrPointerCast(V, DstFVTy); 2941 } 2942 // V cannot be directly casted to desired vector type. 2943 // May happen when V is a floating point vector but DstVTy is a vector of 2944 // pointers or vice-versa. Handle this using a two-step bitcast using an 2945 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2946 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2947 "Only one type should be a pointer type"); 2948 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2949 "Only one type should be a floating point type"); 2950 Type *IntTy = 2951 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2952 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 2953 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2954 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 2955 } 2956 2957 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2958 BasicBlock *Bypass) { 2959 Value *Count = getOrCreateTripCount(L); 2960 // Reuse existing vector loop preheader for TC checks. 2961 // Note that new preheader block is generated for vector loop. 2962 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 2963 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 2964 2965 // Generate code to check if the loop's trip count is less than VF * UF, or 2966 // equal to it in case a scalar epilogue is required; this implies that the 2967 // vector trip count is zero. This check also covers the case where adding one 2968 // to the backedge-taken count overflowed leading to an incorrect trip count 2969 // of zero. In this case we will also jump to the scalar loop. 2970 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 2971 : ICmpInst::ICMP_ULT; 2972 2973 // If tail is to be folded, vector loop takes care of all iterations. 2974 Value *CheckMinIters = Builder.getFalse(); 2975 if (!Cost->foldTailByMasking()) { 2976 Value *Step = 2977 createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF); 2978 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 2979 } 2980 // Create new preheader for vector loop. 2981 LoopVectorPreHeader = 2982 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 2983 "vector.ph"); 2984 2985 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 2986 DT->getNode(Bypass)->getIDom()) && 2987 "TC check is expected to dominate Bypass"); 2988 2989 // Update dominator for Bypass & LoopExit. 2990 DT->changeImmediateDominator(Bypass, TCCheckBlock); 2991 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 2992 2993 ReplaceInstWithInst( 2994 TCCheckBlock->getTerminator(), 2995 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 2996 LoopBypassBlocks.push_back(TCCheckBlock); 2997 } 2998 2999 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3000 // Reuse existing vector loop preheader for SCEV checks. 3001 // Note that new preheader block is generated for vector loop. 3002 BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader; 3003 3004 // Generate the code to check that the SCEV assumptions that we made. 3005 // We want the new basic block to start at the first instruction in a 3006 // sequence of instructions that form a check. 3007 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3008 "scev.check"); 3009 Value *SCEVCheck = Exp.expandCodeForPredicate( 3010 &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator()); 3011 3012 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3013 if (C->isZero()) 3014 return; 3015 3016 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3017 (OptForSizeBasedOnProfile && 3018 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3019 "Cannot SCEV check stride or overflow when optimizing for size"); 3020 3021 SCEVCheckBlock->setName("vector.scevcheck"); 3022 // Create new preheader for vector loop. 3023 LoopVectorPreHeader = 3024 SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI, 3025 nullptr, "vector.ph"); 3026 3027 // Update dominator only if this is first RT check. 3028 if (LoopBypassBlocks.empty()) { 3029 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3030 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3031 } 3032 3033 ReplaceInstWithInst( 3034 SCEVCheckBlock->getTerminator(), 3035 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck)); 3036 LoopBypassBlocks.push_back(SCEVCheckBlock); 3037 AddedSafetyChecks = true; 3038 } 3039 3040 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3041 // VPlan-native path does not do any analysis for runtime checks currently. 3042 if (EnableVPlanNativePath) 3043 return; 3044 3045 // Reuse existing vector loop preheader for runtime memory checks. 3046 // Note that new preheader block is generated for vector loop. 3047 BasicBlock *const MemCheckBlock = L->getLoopPreheader(); 3048 3049 // Generate the code that checks in runtime if arrays overlap. We put the 3050 // checks into a separate block to make the more common case of few elements 3051 // faster. 3052 auto *LAI = Legal->getLAI(); 3053 const auto &RtPtrChecking = *LAI->getRuntimePointerChecking(); 3054 if (!RtPtrChecking.Need) 3055 return; 3056 3057 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3058 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3059 "Cannot emit memory checks when optimizing for size, unless forced " 3060 "to vectorize."); 3061 ORE->emit([&]() { 3062 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3063 L->getStartLoc(), L->getHeader()) 3064 << "Code-size may be reduced by not forcing " 3065 "vectorization, or by source-code modifications " 3066 "eliminating the need for runtime checks " 3067 "(e.g., adding 'restrict')."; 3068 }); 3069 } 3070 3071 MemCheckBlock->setName("vector.memcheck"); 3072 // Create new preheader for vector loop. 3073 LoopVectorPreHeader = 3074 SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr, 3075 "vector.ph"); 3076 3077 auto *CondBranch = cast<BranchInst>( 3078 Builder.CreateCondBr(Builder.getTrue(), Bypass, LoopVectorPreHeader)); 3079 ReplaceInstWithInst(MemCheckBlock->getTerminator(), CondBranch); 3080 LoopBypassBlocks.push_back(MemCheckBlock); 3081 AddedSafetyChecks = true; 3082 3083 // Update dominator only if this is first RT check. 3084 if (LoopBypassBlocks.empty()) { 3085 DT->changeImmediateDominator(Bypass, MemCheckBlock); 3086 DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock); 3087 } 3088 3089 Instruction *FirstCheckInst; 3090 Instruction *MemRuntimeCheck; 3091 SCEVExpander Exp(*PSE.getSE(), MemCheckBlock->getModule()->getDataLayout(), 3092 "induction"); 3093 std::tie(FirstCheckInst, MemRuntimeCheck) = addRuntimeChecks( 3094 MemCheckBlock->getTerminator(), OrigLoop, RtPtrChecking.getChecks(), Exp); 3095 assert(MemRuntimeCheck && "no RT checks generated although RtPtrChecking " 3096 "claimed checks are required"); 3097 CondBranch->setCondition(MemRuntimeCheck); 3098 3099 // We currently don't use LoopVersioning for the actual loop cloning but we 3100 // still use it to add the noalias metadata. 3101 LVer = std::make_unique<LoopVersioning>( 3102 *Legal->getLAI(), 3103 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3104 DT, PSE.getSE()); 3105 LVer->prepareNoAliasMetadata(); 3106 } 3107 3108 Value *InnerLoopVectorizer::emitTransformedIndex( 3109 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3110 const InductionDescriptor &ID) const { 3111 3112 SCEVExpander Exp(*SE, DL, "induction"); 3113 auto Step = ID.getStep(); 3114 auto StartValue = ID.getStartValue(); 3115 assert(Index->getType() == Step->getType() && 3116 "Index type does not match StepValue type"); 3117 3118 // Note: the IR at this point is broken. We cannot use SE to create any new 3119 // SCEV and then expand it, hoping that SCEV's simplification will give us 3120 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3121 // lead to various SCEV crashes. So all we can do is to use builder and rely 3122 // on InstCombine for future simplifications. Here we handle some trivial 3123 // cases only. 3124 auto CreateAdd = [&B](Value *X, Value *Y) { 3125 assert(X->getType() == Y->getType() && "Types don't match!"); 3126 if (auto *CX = dyn_cast<ConstantInt>(X)) 3127 if (CX->isZero()) 3128 return Y; 3129 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3130 if (CY->isZero()) 3131 return X; 3132 return B.CreateAdd(X, Y); 3133 }; 3134 3135 auto CreateMul = [&B](Value *X, Value *Y) { 3136 assert(X->getType() == Y->getType() && "Types don't match!"); 3137 if (auto *CX = dyn_cast<ConstantInt>(X)) 3138 if (CX->isOne()) 3139 return Y; 3140 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3141 if (CY->isOne()) 3142 return X; 3143 return B.CreateMul(X, Y); 3144 }; 3145 3146 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3147 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3148 // the DomTree is not kept up-to-date for additional blocks generated in the 3149 // vector loop. By using the header as insertion point, we guarantee that the 3150 // expanded instructions dominate all their uses. 3151 auto GetInsertPoint = [this, &B]() { 3152 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3153 if (InsertBB != LoopVectorBody && 3154 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3155 return LoopVectorBody->getTerminator(); 3156 return &*B.GetInsertPoint(); 3157 }; 3158 switch (ID.getKind()) { 3159 case InductionDescriptor::IK_IntInduction: { 3160 assert(Index->getType() == StartValue->getType() && 3161 "Index type does not match StartValue type"); 3162 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3163 return B.CreateSub(StartValue, Index); 3164 auto *Offset = CreateMul( 3165 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3166 return CreateAdd(StartValue, Offset); 3167 } 3168 case InductionDescriptor::IK_PtrInduction: { 3169 assert(isa<SCEVConstant>(Step) && 3170 "Expected constant step for pointer induction"); 3171 return B.CreateGEP( 3172 StartValue->getType()->getPointerElementType(), StartValue, 3173 CreateMul(Index, 3174 Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()))); 3175 } 3176 case InductionDescriptor::IK_FpInduction: { 3177 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3178 auto InductionBinOp = ID.getInductionBinOp(); 3179 assert(InductionBinOp && 3180 (InductionBinOp->getOpcode() == Instruction::FAdd || 3181 InductionBinOp->getOpcode() == Instruction::FSub) && 3182 "Original bin op should be defined for FP induction"); 3183 3184 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3185 3186 // Floating point operations had to be 'fast' to enable the induction. 3187 FastMathFlags Flags; 3188 Flags.setFast(); 3189 3190 Value *MulExp = B.CreateFMul(StepValue, Index); 3191 if (isa<Instruction>(MulExp)) 3192 // We have to check, the MulExp may be a constant. 3193 cast<Instruction>(MulExp)->setFastMathFlags(Flags); 3194 3195 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3196 "induction"); 3197 if (isa<Instruction>(BOp)) 3198 cast<Instruction>(BOp)->setFastMathFlags(Flags); 3199 3200 return BOp; 3201 } 3202 case InductionDescriptor::IK_NoInduction: 3203 return nullptr; 3204 } 3205 llvm_unreachable("invalid enum"); 3206 } 3207 3208 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3209 LoopScalarBody = OrigLoop->getHeader(); 3210 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3211 LoopExitBlock = OrigLoop->getUniqueExitBlock(); 3212 assert(LoopExitBlock && "Must have an exit block"); 3213 assert(LoopVectorPreHeader && "Invalid loop structure"); 3214 3215 LoopMiddleBlock = 3216 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3217 LI, nullptr, Twine(Prefix) + "middle.block"); 3218 LoopScalarPreHeader = 3219 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3220 nullptr, Twine(Prefix) + "scalar.ph"); 3221 3222 // Set up branch from middle block to the exit and scalar preheader blocks. 3223 // completeLoopSkeleton will update the condition to use an iteration check, 3224 // if required to decide whether to execute the remainder. 3225 BranchInst *BrInst = 3226 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue()); 3227 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3228 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3229 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3230 3231 // We intentionally don't let SplitBlock to update LoopInfo since 3232 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3233 // LoopVectorBody is explicitly added to the correct place few lines later. 3234 LoopVectorBody = 3235 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3236 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3237 3238 // Update dominator for loop exit. 3239 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3240 3241 // Create and register the new vector loop. 3242 Loop *Lp = LI->AllocateLoop(); 3243 Loop *ParentLoop = OrigLoop->getParentLoop(); 3244 3245 // Insert the new loop into the loop nest and register the new basic blocks 3246 // before calling any utilities such as SCEV that require valid LoopInfo. 3247 if (ParentLoop) { 3248 ParentLoop->addChildLoop(Lp); 3249 } else { 3250 LI->addTopLevelLoop(Lp); 3251 } 3252 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3253 return Lp; 3254 } 3255 3256 void InnerLoopVectorizer::createInductionResumeValues( 3257 Loop *L, Value *VectorTripCount, 3258 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3259 assert(VectorTripCount && L && "Expected valid arguments"); 3260 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3261 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3262 "Inconsistent information about additional bypass."); 3263 // We are going to resume the execution of the scalar loop. 3264 // Go over all of the induction variables that we found and fix the 3265 // PHIs that are left in the scalar version of the loop. 3266 // The starting values of PHI nodes depend on the counter of the last 3267 // iteration in the vectorized loop. 3268 // If we come from a bypass edge then we need to start from the original 3269 // start value. 3270 for (auto &InductionEntry : Legal->getInductionVars()) { 3271 PHINode *OrigPhi = InductionEntry.first; 3272 InductionDescriptor II = InductionEntry.second; 3273 3274 // Create phi nodes to merge from the backedge-taken check block. 3275 PHINode *BCResumeVal = 3276 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3277 LoopScalarPreHeader->getTerminator()); 3278 // Copy original phi DL over to the new one. 3279 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3280 Value *&EndValue = IVEndValues[OrigPhi]; 3281 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3282 if (OrigPhi == OldInduction) { 3283 // We know what the end value is. 3284 EndValue = VectorTripCount; 3285 } else { 3286 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3287 Type *StepType = II.getStep()->getType(); 3288 Instruction::CastOps CastOp = 3289 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3290 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3291 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3292 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3293 EndValue->setName("ind.end"); 3294 3295 // Compute the end value for the additional bypass (if applicable). 3296 if (AdditionalBypass.first) { 3297 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3298 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3299 StepType, true); 3300 CRD = 3301 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3302 EndValueFromAdditionalBypass = 3303 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3304 EndValueFromAdditionalBypass->setName("ind.end"); 3305 } 3306 } 3307 // The new PHI merges the original incoming value, in case of a bypass, 3308 // or the value at the end of the vectorized loop. 3309 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3310 3311 // Fix the scalar body counter (PHI node). 3312 // The old induction's phi node in the scalar body needs the truncated 3313 // value. 3314 for (BasicBlock *BB : LoopBypassBlocks) 3315 BCResumeVal->addIncoming(II.getStartValue(), BB); 3316 3317 if (AdditionalBypass.first) 3318 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3319 EndValueFromAdditionalBypass); 3320 3321 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3322 } 3323 } 3324 3325 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3326 MDNode *OrigLoopID) { 3327 assert(L && "Expected valid loop."); 3328 3329 // The trip counts should be cached by now. 3330 Value *Count = getOrCreateTripCount(L); 3331 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3332 3333 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3334 3335 // Add a check in the middle block to see if we have completed 3336 // all of the iterations in the first vector loop. 3337 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3338 // If tail is to be folded, we know we don't need to run the remainder. 3339 if (!Cost->foldTailByMasking()) { 3340 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3341 Count, VectorTripCount, "cmp.n", 3342 LoopMiddleBlock->getTerminator()); 3343 3344 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3345 // of the corresponding compare because they may have ended up with 3346 // different line numbers and we want to avoid awkward line stepping while 3347 // debugging. Eg. if the compare has got a line number inside the loop. 3348 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3349 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3350 } 3351 3352 // Get ready to start creating new instructions into the vectorized body. 3353 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3354 "Inconsistent vector loop preheader"); 3355 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3356 3357 Optional<MDNode *> VectorizedLoopID = 3358 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3359 LLVMLoopVectorizeFollowupVectorized}); 3360 if (VectorizedLoopID.hasValue()) { 3361 L->setLoopID(VectorizedLoopID.getValue()); 3362 3363 // Do not setAlreadyVectorized if loop attributes have been defined 3364 // explicitly. 3365 return LoopVectorPreHeader; 3366 } 3367 3368 // Keep all loop hints from the original loop on the vector loop (we'll 3369 // replace the vectorizer-specific hints below). 3370 if (MDNode *LID = OrigLoop->getLoopID()) 3371 L->setLoopID(LID); 3372 3373 LoopVectorizeHints Hints(L, true, *ORE); 3374 Hints.setAlreadyVectorized(); 3375 3376 #ifdef EXPENSIVE_CHECKS 3377 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3378 LI->verify(*DT); 3379 #endif 3380 3381 return LoopVectorPreHeader; 3382 } 3383 3384 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3385 /* 3386 In this function we generate a new loop. The new loop will contain 3387 the vectorized instructions while the old loop will continue to run the 3388 scalar remainder. 3389 3390 [ ] <-- loop iteration number check. 3391 / | 3392 / v 3393 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3394 | / | 3395 | / v 3396 || [ ] <-- vector pre header. 3397 |/ | 3398 | v 3399 | [ ] \ 3400 | [ ]_| <-- vector loop. 3401 | | 3402 | v 3403 | -[ ] <--- middle-block. 3404 | / | 3405 | / v 3406 -|- >[ ] <--- new preheader. 3407 | | 3408 | v 3409 | [ ] \ 3410 | [ ]_| <-- old scalar loop to handle remainder. 3411 \ | 3412 \ v 3413 >[ ] <-- exit block. 3414 ... 3415 */ 3416 3417 // Get the metadata of the original loop before it gets modified. 3418 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3419 3420 // Create an empty vector loop, and prepare basic blocks for the runtime 3421 // checks. 3422 Loop *Lp = createVectorLoopSkeleton(""); 3423 3424 // Now, compare the new count to zero. If it is zero skip the vector loop and 3425 // jump to the scalar loop. This check also covers the case where the 3426 // backedge-taken count is uint##_max: adding one to it will overflow leading 3427 // to an incorrect trip count of zero. In this (rare) case we will also jump 3428 // to the scalar loop. 3429 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3430 3431 // Generate the code to check any assumptions that we've made for SCEV 3432 // expressions. 3433 emitSCEVChecks(Lp, LoopScalarPreHeader); 3434 3435 // Generate the code that checks in runtime if arrays overlap. We put the 3436 // checks into a separate block to make the more common case of few elements 3437 // faster. 3438 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3439 3440 // Some loops have a single integer induction variable, while other loops 3441 // don't. One example is c++ iterators that often have multiple pointer 3442 // induction variables. In the code below we also support a case where we 3443 // don't have a single induction variable. 3444 // 3445 // We try to obtain an induction variable from the original loop as hard 3446 // as possible. However if we don't find one that: 3447 // - is an integer 3448 // - counts from zero, stepping by one 3449 // - is the size of the widest induction variable type 3450 // then we create a new one. 3451 OldInduction = Legal->getPrimaryInduction(); 3452 Type *IdxTy = Legal->getWidestInductionType(); 3453 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3454 // The loop step is equal to the vectorization factor (num of SIMD elements) 3455 // times the unroll factor (num of SIMD instructions). 3456 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3457 Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF); 3458 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3459 Induction = 3460 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3461 getDebugLocFromInstOrOperands(OldInduction)); 3462 3463 // Emit phis for the new starting index of the scalar loop. 3464 createInductionResumeValues(Lp, CountRoundDown); 3465 3466 return completeLoopSkeleton(Lp, OrigLoopID); 3467 } 3468 3469 // Fix up external users of the induction variable. At this point, we are 3470 // in LCSSA form, with all external PHIs that use the IV having one input value, 3471 // coming from the remainder loop. We need those PHIs to also have a correct 3472 // value for the IV when arriving directly from the middle block. 3473 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3474 const InductionDescriptor &II, 3475 Value *CountRoundDown, Value *EndValue, 3476 BasicBlock *MiddleBlock) { 3477 // There are two kinds of external IV usages - those that use the value 3478 // computed in the last iteration (the PHI) and those that use the penultimate 3479 // value (the value that feeds into the phi from the loop latch). 3480 // We allow both, but they, obviously, have different values. 3481 3482 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3483 3484 DenseMap<Value *, Value *> MissingVals; 3485 3486 // An external user of the last iteration's value should see the value that 3487 // the remainder loop uses to initialize its own IV. 3488 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3489 for (User *U : PostInc->users()) { 3490 Instruction *UI = cast<Instruction>(U); 3491 if (!OrigLoop->contains(UI)) { 3492 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3493 MissingVals[UI] = EndValue; 3494 } 3495 } 3496 3497 // An external user of the penultimate value need to see EndValue - Step. 3498 // The simplest way to get this is to recompute it from the constituent SCEVs, 3499 // that is Start + (Step * (CRD - 1)). 3500 for (User *U : OrigPhi->users()) { 3501 auto *UI = cast<Instruction>(U); 3502 if (!OrigLoop->contains(UI)) { 3503 const DataLayout &DL = 3504 OrigLoop->getHeader()->getModule()->getDataLayout(); 3505 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3506 3507 IRBuilder<> B(MiddleBlock->getTerminator()); 3508 Value *CountMinusOne = B.CreateSub( 3509 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3510 Value *CMO = 3511 !II.getStep()->getType()->isIntegerTy() 3512 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3513 II.getStep()->getType()) 3514 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3515 CMO->setName("cast.cmo"); 3516 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3517 Escape->setName("ind.escape"); 3518 MissingVals[UI] = Escape; 3519 } 3520 } 3521 3522 for (auto &I : MissingVals) { 3523 PHINode *PHI = cast<PHINode>(I.first); 3524 // One corner case we have to handle is two IVs "chasing" each-other, 3525 // that is %IV2 = phi [...], [ %IV1, %latch ] 3526 // In this case, if IV1 has an external use, we need to avoid adding both 3527 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3528 // don't already have an incoming value for the middle block. 3529 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3530 PHI->addIncoming(I.second, MiddleBlock); 3531 } 3532 } 3533 3534 namespace { 3535 3536 struct CSEDenseMapInfo { 3537 static bool canHandle(const Instruction *I) { 3538 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3539 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3540 } 3541 3542 static inline Instruction *getEmptyKey() { 3543 return DenseMapInfo<Instruction *>::getEmptyKey(); 3544 } 3545 3546 static inline Instruction *getTombstoneKey() { 3547 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3548 } 3549 3550 static unsigned getHashValue(const Instruction *I) { 3551 assert(canHandle(I) && "Unknown instruction!"); 3552 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3553 I->value_op_end())); 3554 } 3555 3556 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3557 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3558 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3559 return LHS == RHS; 3560 return LHS->isIdenticalTo(RHS); 3561 } 3562 }; 3563 3564 } // end anonymous namespace 3565 3566 ///Perform cse of induction variable instructions. 3567 static void cse(BasicBlock *BB) { 3568 // Perform simple cse. 3569 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3570 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3571 Instruction *In = &*I++; 3572 3573 if (!CSEDenseMapInfo::canHandle(In)) 3574 continue; 3575 3576 // Check if we can replace this instruction with any of the 3577 // visited instructions. 3578 if (Instruction *V = CSEMap.lookup(In)) { 3579 In->replaceAllUsesWith(V); 3580 In->eraseFromParent(); 3581 continue; 3582 } 3583 3584 CSEMap[In] = In; 3585 } 3586 } 3587 3588 InstructionCost 3589 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3590 bool &NeedToScalarize) { 3591 Function *F = CI->getCalledFunction(); 3592 Type *ScalarRetTy = CI->getType(); 3593 SmallVector<Type *, 4> Tys, ScalarTys; 3594 for (auto &ArgOp : CI->arg_operands()) 3595 ScalarTys.push_back(ArgOp->getType()); 3596 3597 // Estimate cost of scalarized vector call. The source operands are assumed 3598 // to be vectors, so we need to extract individual elements from there, 3599 // execute VF scalar calls, and then gather the result into the vector return 3600 // value. 3601 InstructionCost ScalarCallCost = 3602 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3603 if (VF.isScalar()) 3604 return ScalarCallCost; 3605 3606 // Compute corresponding vector type for return value and arguments. 3607 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3608 for (Type *ScalarTy : ScalarTys) 3609 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3610 3611 // Compute costs of unpacking argument values for the scalar calls and 3612 // packing the return values to a vector. 3613 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3614 3615 InstructionCost Cost = 3616 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3617 3618 // If we can't emit a vector call for this function, then the currently found 3619 // cost is the cost we need to return. 3620 NeedToScalarize = true; 3621 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3622 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3623 3624 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3625 return Cost; 3626 3627 // If the corresponding vector cost is cheaper, return its cost. 3628 InstructionCost VectorCallCost = 3629 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3630 if (VectorCallCost < Cost) { 3631 NeedToScalarize = false; 3632 Cost = VectorCallCost; 3633 } 3634 return Cost; 3635 } 3636 3637 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3638 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3639 return Elt; 3640 return VectorType::get(Elt, VF); 3641 } 3642 3643 InstructionCost 3644 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3645 ElementCount VF) { 3646 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3647 assert(ID && "Expected intrinsic call!"); 3648 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3649 FastMathFlags FMF; 3650 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3651 FMF = FPMO->getFastMathFlags(); 3652 3653 SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end()); 3654 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3655 SmallVector<Type *> ParamTys; 3656 std::transform(FTy->param_begin(), FTy->param_end(), 3657 std::back_inserter(ParamTys), 3658 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3659 3660 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3661 dyn_cast<IntrinsicInst>(CI)); 3662 return TTI.getIntrinsicInstrCost(CostAttrs, 3663 TargetTransformInfo::TCK_RecipThroughput); 3664 } 3665 3666 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3667 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3668 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3669 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3670 } 3671 3672 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3673 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3674 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3675 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3676 } 3677 3678 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3679 // For every instruction `I` in MinBWs, truncate the operands, create a 3680 // truncated version of `I` and reextend its result. InstCombine runs 3681 // later and will remove any ext/trunc pairs. 3682 SmallPtrSet<Value *, 4> Erased; 3683 for (const auto &KV : Cost->getMinimalBitwidths()) { 3684 // If the value wasn't vectorized, we must maintain the original scalar 3685 // type. The absence of the value from State indicates that it 3686 // wasn't vectorized. 3687 VPValue *Def = State.Plan->getVPValue(KV.first); 3688 if (!State.hasAnyVectorValue(Def)) 3689 continue; 3690 for (unsigned Part = 0; Part < UF; ++Part) { 3691 Value *I = State.get(Def, Part); 3692 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3693 continue; 3694 Type *OriginalTy = I->getType(); 3695 Type *ScalarTruncatedTy = 3696 IntegerType::get(OriginalTy->getContext(), KV.second); 3697 auto *TruncatedTy = FixedVectorType::get( 3698 ScalarTruncatedTy, 3699 cast<FixedVectorType>(OriginalTy)->getNumElements()); 3700 if (TruncatedTy == OriginalTy) 3701 continue; 3702 3703 IRBuilder<> B(cast<Instruction>(I)); 3704 auto ShrinkOperand = [&](Value *V) -> Value * { 3705 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3706 if (ZI->getSrcTy() == TruncatedTy) 3707 return ZI->getOperand(0); 3708 return B.CreateZExtOrTrunc(V, TruncatedTy); 3709 }; 3710 3711 // The actual instruction modification depends on the instruction type, 3712 // unfortunately. 3713 Value *NewI = nullptr; 3714 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3715 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3716 ShrinkOperand(BO->getOperand(1))); 3717 3718 // Any wrapping introduced by shrinking this operation shouldn't be 3719 // considered undefined behavior. So, we can't unconditionally copy 3720 // arithmetic wrapping flags to NewI. 3721 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3722 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3723 NewI = 3724 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3725 ShrinkOperand(CI->getOperand(1))); 3726 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3727 NewI = B.CreateSelect(SI->getCondition(), 3728 ShrinkOperand(SI->getTrueValue()), 3729 ShrinkOperand(SI->getFalseValue())); 3730 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3731 switch (CI->getOpcode()) { 3732 default: 3733 llvm_unreachable("Unhandled cast!"); 3734 case Instruction::Trunc: 3735 NewI = ShrinkOperand(CI->getOperand(0)); 3736 break; 3737 case Instruction::SExt: 3738 NewI = B.CreateSExtOrTrunc( 3739 CI->getOperand(0), 3740 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3741 break; 3742 case Instruction::ZExt: 3743 NewI = B.CreateZExtOrTrunc( 3744 CI->getOperand(0), 3745 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3746 break; 3747 } 3748 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3749 auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType()) 3750 ->getNumElements(); 3751 auto *O0 = B.CreateZExtOrTrunc( 3752 SI->getOperand(0), 3753 FixedVectorType::get(ScalarTruncatedTy, Elements0)); 3754 auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType()) 3755 ->getNumElements(); 3756 auto *O1 = B.CreateZExtOrTrunc( 3757 SI->getOperand(1), 3758 FixedVectorType::get(ScalarTruncatedTy, Elements1)); 3759 3760 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3761 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3762 // Don't do anything with the operands, just extend the result. 3763 continue; 3764 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3765 auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType()) 3766 ->getNumElements(); 3767 auto *O0 = B.CreateZExtOrTrunc( 3768 IE->getOperand(0), 3769 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3770 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3771 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3772 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3773 auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType()) 3774 ->getNumElements(); 3775 auto *O0 = B.CreateZExtOrTrunc( 3776 EE->getOperand(0), 3777 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3778 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3779 } else { 3780 // If we don't know what to do, be conservative and don't do anything. 3781 continue; 3782 } 3783 3784 // Lastly, extend the result. 3785 NewI->takeName(cast<Instruction>(I)); 3786 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3787 I->replaceAllUsesWith(Res); 3788 cast<Instruction>(I)->eraseFromParent(); 3789 Erased.insert(I); 3790 State.reset(Def, Res, Part); 3791 } 3792 } 3793 3794 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3795 for (const auto &KV : Cost->getMinimalBitwidths()) { 3796 // If the value wasn't vectorized, we must maintain the original scalar 3797 // type. The absence of the value from State indicates that it 3798 // wasn't vectorized. 3799 VPValue *Def = State.Plan->getVPValue(KV.first); 3800 if (!State.hasAnyVectorValue(Def)) 3801 continue; 3802 for (unsigned Part = 0; Part < UF; ++Part) { 3803 Value *I = State.get(Def, Part); 3804 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3805 if (Inst && Inst->use_empty()) { 3806 Value *NewI = Inst->getOperand(0); 3807 Inst->eraseFromParent(); 3808 State.reset(Def, NewI, Part); 3809 } 3810 } 3811 } 3812 } 3813 3814 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 3815 // Insert truncates and extends for any truncated instructions as hints to 3816 // InstCombine. 3817 if (VF.isVector()) 3818 truncateToMinimalBitwidths(State); 3819 3820 // Fix widened non-induction PHIs by setting up the PHI operands. 3821 if (OrigPHIsToFix.size()) { 3822 assert(EnableVPlanNativePath && 3823 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3824 fixNonInductionPHIs(State); 3825 } 3826 3827 // At this point every instruction in the original loop is widened to a 3828 // vector form. Now we need to fix the recurrences in the loop. These PHI 3829 // nodes are currently empty because we did not want to introduce cycles. 3830 // This is the second stage of vectorizing recurrences. 3831 fixCrossIterationPHIs(State); 3832 3833 // Forget the original basic block. 3834 PSE.getSE()->forgetLoop(OrigLoop); 3835 3836 // Fix-up external users of the induction variables. 3837 for (auto &Entry : Legal->getInductionVars()) 3838 fixupIVUsers(Entry.first, Entry.second, 3839 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3840 IVEndValues[Entry.first], LoopMiddleBlock); 3841 3842 fixLCSSAPHIs(State); 3843 for (Instruction *PI : PredicatedInstructions) 3844 sinkScalarOperands(&*PI); 3845 3846 // Remove redundant induction instructions. 3847 cse(LoopVectorBody); 3848 3849 // Set/update profile weights for the vector and remainder loops as original 3850 // loop iterations are now distributed among them. Note that original loop 3851 // represented by LoopScalarBody becomes remainder loop after vectorization. 3852 // 3853 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3854 // end up getting slightly roughened result but that should be OK since 3855 // profile is not inherently precise anyway. Note also possible bypass of 3856 // vector code caused by legality checks is ignored, assigning all the weight 3857 // to the vector loop, optimistically. 3858 // 3859 // For scalable vectorization we can't know at compile time how many iterations 3860 // of the loop are handled in one vector iteration, so instead assume a pessimistic 3861 // vscale of '1'. 3862 setProfileInfoAfterUnrolling( 3863 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 3864 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 3865 } 3866 3867 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 3868 // In order to support recurrences we need to be able to vectorize Phi nodes. 3869 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3870 // stage #2: We now need to fix the recurrences by adding incoming edges to 3871 // the currently empty PHI nodes. At this point every instruction in the 3872 // original loop is widened to a vector form so we can use them to construct 3873 // the incoming edges. 3874 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 3875 // Handle first-order recurrences and reductions that need to be fixed. 3876 if (Legal->isFirstOrderRecurrence(&Phi)) 3877 fixFirstOrderRecurrence(&Phi, State); 3878 else if (Legal->isReductionVariable(&Phi)) 3879 fixReduction(&Phi, State); 3880 } 3881 } 3882 3883 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi, 3884 VPTransformState &State) { 3885 // This is the second phase of vectorizing first-order recurrences. An 3886 // overview of the transformation is described below. Suppose we have the 3887 // following loop. 3888 // 3889 // for (int i = 0; i < n; ++i) 3890 // b[i] = a[i] - a[i - 1]; 3891 // 3892 // There is a first-order recurrence on "a". For this loop, the shorthand 3893 // scalar IR looks like: 3894 // 3895 // scalar.ph: 3896 // s_init = a[-1] 3897 // br scalar.body 3898 // 3899 // scalar.body: 3900 // i = phi [0, scalar.ph], [i+1, scalar.body] 3901 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3902 // s2 = a[i] 3903 // b[i] = s2 - s1 3904 // br cond, scalar.body, ... 3905 // 3906 // In this example, s1 is a recurrence because it's value depends on the 3907 // previous iteration. In the first phase of vectorization, we created a 3908 // temporary value for s1. We now complete the vectorization and produce the 3909 // shorthand vector IR shown below (for VF = 4, UF = 1). 3910 // 3911 // vector.ph: 3912 // v_init = vector(..., ..., ..., a[-1]) 3913 // br vector.body 3914 // 3915 // vector.body 3916 // i = phi [0, vector.ph], [i+4, vector.body] 3917 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3918 // v2 = a[i, i+1, i+2, i+3]; 3919 // v3 = vector(v1(3), v2(0, 1, 2)) 3920 // b[i, i+1, i+2, i+3] = v2 - v3 3921 // br cond, vector.body, middle.block 3922 // 3923 // middle.block: 3924 // x = v2(3) 3925 // br scalar.ph 3926 // 3927 // scalar.ph: 3928 // s_init = phi [x, middle.block], [a[-1], otherwise] 3929 // br scalar.body 3930 // 3931 // After execution completes the vector loop, we extract the next value of 3932 // the recurrence (x) to use as the initial value in the scalar loop. 3933 3934 // Get the original loop preheader and single loop latch. 3935 auto *Preheader = OrigLoop->getLoopPreheader(); 3936 auto *Latch = OrigLoop->getLoopLatch(); 3937 3938 // Get the initial and previous values of the scalar recurrence. 3939 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3940 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3941 3942 // Create a vector from the initial value. 3943 auto *VectorInit = ScalarInit; 3944 if (VF.isVector()) { 3945 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3946 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 3947 VectorInit = Builder.CreateInsertElement( 3948 PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3949 Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init"); 3950 } 3951 3952 VPValue *PhiDef = State.Plan->getVPValue(Phi); 3953 VPValue *PreviousDef = State.Plan->getVPValue(Previous); 3954 // We constructed a temporary phi node in the first phase of vectorization. 3955 // This phi node will eventually be deleted. 3956 Builder.SetInsertPoint(cast<Instruction>(State.get(PhiDef, 0))); 3957 3958 // Create a phi node for the new recurrence. The current value will either be 3959 // the initial value inserted into a vector or loop-varying vector value. 3960 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3961 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3962 3963 // Get the vectorized previous value of the last part UF - 1. It appears last 3964 // among all unrolled iterations, due to the order of their construction. 3965 Value *PreviousLastPart = State.get(PreviousDef, UF - 1); 3966 3967 // Find and set the insertion point after the previous value if it is an 3968 // instruction. 3969 BasicBlock::iterator InsertPt; 3970 // Note that the previous value may have been constant-folded so it is not 3971 // guaranteed to be an instruction in the vector loop. 3972 // FIXME: Loop invariant values do not form recurrences. We should deal with 3973 // them earlier. 3974 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart)) 3975 InsertPt = LoopVectorBody->getFirstInsertionPt(); 3976 else { 3977 Instruction *PreviousInst = cast<Instruction>(PreviousLastPart); 3978 if (isa<PHINode>(PreviousLastPart)) 3979 // If the previous value is a phi node, we should insert after all the phi 3980 // nodes in the block containing the PHI to avoid breaking basic block 3981 // verification. Note that the basic block may be different to 3982 // LoopVectorBody, in case we predicate the loop. 3983 InsertPt = PreviousInst->getParent()->getFirstInsertionPt(); 3984 else 3985 InsertPt = ++PreviousInst->getIterator(); 3986 } 3987 Builder.SetInsertPoint(&*InsertPt); 3988 3989 // We will construct a vector for the recurrence by combining the values for 3990 // the current and previous iterations. This is the required shuffle mask. 3991 assert(!VF.isScalable()); 3992 SmallVector<int, 8> ShuffleMask(VF.getKnownMinValue()); 3993 ShuffleMask[0] = VF.getKnownMinValue() - 1; 3994 for (unsigned I = 1; I < VF.getKnownMinValue(); ++I) 3995 ShuffleMask[I] = I + VF.getKnownMinValue() - 1; 3996 3997 // The vector from which to take the initial value for the current iteration 3998 // (actual or unrolled). Initially, this is the vector phi node. 3999 Value *Incoming = VecPhi; 4000 4001 // Shuffle the current and previous vector and update the vector parts. 4002 for (unsigned Part = 0; Part < UF; ++Part) { 4003 Value *PreviousPart = State.get(PreviousDef, Part); 4004 Value *PhiPart = State.get(PhiDef, Part); 4005 auto *Shuffle = 4006 VF.isVector() 4007 ? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask) 4008 : Incoming; 4009 PhiPart->replaceAllUsesWith(Shuffle); 4010 cast<Instruction>(PhiPart)->eraseFromParent(); 4011 State.reset(PhiDef, Shuffle, Part); 4012 Incoming = PreviousPart; 4013 } 4014 4015 // Fix the latch value of the new recurrence in the vector loop. 4016 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4017 4018 // Extract the last vector element in the middle block. This will be the 4019 // initial value for the recurrence when jumping to the scalar loop. 4020 auto *ExtractForScalar = Incoming; 4021 if (VF.isVector()) { 4022 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4023 ExtractForScalar = Builder.CreateExtractElement( 4024 ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1), 4025 "vector.recur.extract"); 4026 } 4027 // Extract the second last element in the middle block if the 4028 // Phi is used outside the loop. We need to extract the phi itself 4029 // and not the last element (the phi update in the current iteration). This 4030 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4031 // when the scalar loop is not run at all. 4032 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4033 if (VF.isVector()) 4034 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4035 Incoming, Builder.getInt32(VF.getKnownMinValue() - 2), 4036 "vector.recur.extract.for.phi"); 4037 // When loop is unrolled without vectorizing, initialize 4038 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 4039 // `Incoming`. This is analogous to the vectorized case above: extracting the 4040 // second last element when VF > 1. 4041 else if (UF > 1) 4042 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4043 4044 // Fix the initial value of the original recurrence in the scalar loop. 4045 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4046 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4047 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4048 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4049 Start->addIncoming(Incoming, BB); 4050 } 4051 4052 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4053 Phi->setName("scalar.recur"); 4054 4055 // Finally, fix users of the recurrence outside the loop. The users will need 4056 // either the last value of the scalar recurrence or the last value of the 4057 // vector recurrence we extracted in the middle block. Since the loop is in 4058 // LCSSA form, we just need to find all the phi nodes for the original scalar 4059 // recurrence in the exit block, and then add an edge for the middle block. 4060 // Note that LCSSA does not imply single entry when the original scalar loop 4061 // had multiple exiting edges (as we always run the last iteration in the 4062 // scalar epilogue); in that case, the exiting path through middle will be 4063 // dynamically dead and the value picked for the phi doesn't matter. 4064 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4065 if (any_of(LCSSAPhi.incoming_values(), 4066 [Phi](Value *V) { return V == Phi; })) 4067 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4068 } 4069 4070 void InnerLoopVectorizer::fixReduction(PHINode *Phi, VPTransformState &State) { 4071 // Get it's reduction variable descriptor. 4072 assert(Legal->isReductionVariable(Phi) && 4073 "Unable to find the reduction variable"); 4074 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 4075 4076 RecurKind RK = RdxDesc.getRecurrenceKind(); 4077 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4078 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4079 setDebugLocFromInst(Builder, ReductionStartValue); 4080 bool IsInLoopReductionPhi = Cost->isInLoopReduction(Phi); 4081 4082 VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst); 4083 // This is the vector-clone of the value that leaves the loop. 4084 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4085 4086 // Wrap flags are in general invalid after vectorization, clear them. 4087 clearReductionWrapFlags(RdxDesc, State); 4088 4089 // Fix the vector-loop phi. 4090 4091 // Reductions do not have to start at zero. They can start with 4092 // any loop invariant values. 4093 BasicBlock *Latch = OrigLoop->getLoopLatch(); 4094 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 4095 4096 for (unsigned Part = 0; Part < UF; ++Part) { 4097 Value *VecRdxPhi = State.get(State.Plan->getVPValue(Phi), Part); 4098 Value *Val = State.get(State.Plan->getVPValue(LoopVal), Part); 4099 cast<PHINode>(VecRdxPhi) 4100 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4101 } 4102 4103 // Before each round, move the insertion point right between 4104 // the PHIs and the values we are going to write. 4105 // This allows us to write both PHINodes and the extractelement 4106 // instructions. 4107 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4108 4109 setDebugLocFromInst(Builder, LoopExitInst); 4110 4111 // If tail is folded by masking, the vector value to leave the loop should be 4112 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4113 // instead of the former. For an inloop reduction the reduction will already 4114 // be predicated, and does not need to be handled here. 4115 if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) { 4116 for (unsigned Part = 0; Part < UF; ++Part) { 4117 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4118 Value *Sel = nullptr; 4119 for (User *U : VecLoopExitInst->users()) { 4120 if (isa<SelectInst>(U)) { 4121 assert(!Sel && "Reduction exit feeding two selects"); 4122 Sel = U; 4123 } else 4124 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4125 } 4126 assert(Sel && "Reduction exit feeds no select"); 4127 State.reset(LoopExitInstDef, Sel, Part); 4128 4129 // If the target can create a predicated operator for the reduction at no 4130 // extra cost in the loop (for example a predicated vadd), it can be 4131 // cheaper for the select to remain in the loop than be sunk out of it, 4132 // and so use the select value for the phi instead of the old 4133 // LoopExitValue. 4134 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 4135 if (PreferPredicatedReductionSelect || 4136 TTI->preferPredicatedReductionSelect( 4137 RdxDesc.getOpcode(), Phi->getType(), 4138 TargetTransformInfo::ReductionFlags())) { 4139 auto *VecRdxPhi = 4140 cast<PHINode>(State.get(State.Plan->getVPValue(Phi), Part)); 4141 VecRdxPhi->setIncomingValueForBlock( 4142 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4143 } 4144 } 4145 } 4146 4147 // If the vector reduction can be performed in a smaller type, we truncate 4148 // then extend the loop exit value to enable InstCombine to evaluate the 4149 // entire expression in the smaller type. 4150 if (VF.isVector() && Phi->getType() != RdxDesc.getRecurrenceType()) { 4151 assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!"); 4152 assert(!VF.isScalable() && "scalable vectors not yet supported."); 4153 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4154 Builder.SetInsertPoint( 4155 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4156 VectorParts RdxParts(UF); 4157 for (unsigned Part = 0; Part < UF; ++Part) { 4158 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4159 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4160 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4161 : Builder.CreateZExt(Trunc, VecTy); 4162 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 4163 UI != RdxParts[Part]->user_end();) 4164 if (*UI != Trunc) { 4165 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 4166 RdxParts[Part] = Extnd; 4167 } else { 4168 ++UI; 4169 } 4170 } 4171 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4172 for (unsigned Part = 0; Part < UF; ++Part) { 4173 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4174 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4175 } 4176 } 4177 4178 // Reduce all of the unrolled parts into a single vector. 4179 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4180 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4181 4182 // The middle block terminator has already been assigned a DebugLoc here (the 4183 // OrigLoop's single latch terminator). We want the whole middle block to 4184 // appear to execute on this line because: (a) it is all compiler generated, 4185 // (b) these instructions are always executed after evaluating the latch 4186 // conditional branch, and (c) other passes may add new predecessors which 4187 // terminate on this line. This is the easiest way to ensure we don't 4188 // accidentally cause an extra step back into the loop while debugging. 4189 setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator()); 4190 { 4191 // Floating-point operations should have some FMF to enable the reduction. 4192 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4193 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4194 for (unsigned Part = 1; Part < UF; ++Part) { 4195 Value *RdxPart = State.get(LoopExitInstDef, Part); 4196 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4197 ReducedPartRdx = Builder.CreateBinOp( 4198 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4199 } else { 4200 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4201 } 4202 } 4203 } 4204 4205 // Create the reduction after the loop. Note that inloop reductions create the 4206 // target reduction in the loop using a Reduction recipe. 4207 if (VF.isVector() && !IsInLoopReductionPhi) { 4208 ReducedPartRdx = 4209 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx); 4210 // If the reduction can be performed in a smaller type, we need to extend 4211 // the reduction to the wider type before we branch to the original loop. 4212 if (Phi->getType() != RdxDesc.getRecurrenceType()) 4213 ReducedPartRdx = 4214 RdxDesc.isSigned() 4215 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 4216 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 4217 } 4218 4219 // Create a phi node that merges control-flow from the backedge-taken check 4220 // block and the middle block. 4221 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4222 LoopScalarPreHeader->getTerminator()); 4223 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4224 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4225 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4226 4227 // Now, we need to fix the users of the reduction variable 4228 // inside and outside of the scalar remainder loop. 4229 4230 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4231 // in the exit blocks. See comment on analogous loop in 4232 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4233 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4234 if (any_of(LCSSAPhi.incoming_values(), 4235 [LoopExitInst](Value *V) { return V == LoopExitInst; })) 4236 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4237 4238 // Fix the scalar loop reduction variable with the incoming reduction sum 4239 // from the vector body and from the backedge value. 4240 int IncomingEdgeBlockIdx = 4241 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4242 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4243 // Pick the other block. 4244 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4245 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4246 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4247 } 4248 4249 void InnerLoopVectorizer::clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc, 4250 VPTransformState &State) { 4251 RecurKind RK = RdxDesc.getRecurrenceKind(); 4252 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4253 return; 4254 4255 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4256 assert(LoopExitInstr && "null loop exit instruction"); 4257 SmallVector<Instruction *, 8> Worklist; 4258 SmallPtrSet<Instruction *, 8> Visited; 4259 Worklist.push_back(LoopExitInstr); 4260 Visited.insert(LoopExitInstr); 4261 4262 while (!Worklist.empty()) { 4263 Instruction *Cur = Worklist.pop_back_val(); 4264 if (isa<OverflowingBinaryOperator>(Cur)) 4265 for (unsigned Part = 0; Part < UF; ++Part) { 4266 Value *V = State.get(State.Plan->getVPValue(Cur), Part); 4267 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4268 } 4269 4270 for (User *U : Cur->users()) { 4271 Instruction *UI = cast<Instruction>(U); 4272 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4273 Visited.insert(UI).second) 4274 Worklist.push_back(UI); 4275 } 4276 } 4277 } 4278 4279 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4280 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4281 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4282 // Some phis were already hand updated by the reduction and recurrence 4283 // code above, leave them alone. 4284 continue; 4285 4286 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4287 // Non-instruction incoming values will have only one value. 4288 unsigned LastLane = 0; 4289 if (isa<Instruction>(IncomingValue)) 4290 LastLane = Cost->isUniformAfterVectorization( 4291 cast<Instruction>(IncomingValue), VF) 4292 ? 0 4293 : VF.getKnownMinValue() - 1; 4294 assert((!VF.isScalable() || LastLane == 0) && 4295 "scalable vectors dont support non-uniform scalars yet"); 4296 // Can be a loop invariant incoming value or the last scalar value to be 4297 // extracted from the vectorized loop. 4298 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4299 Value *lastIncomingValue = 4300 OrigLoop->isLoopInvariant(IncomingValue) 4301 ? IncomingValue 4302 : State.get(State.Plan->getVPValue(IncomingValue), 4303 VPIteration(UF - 1, LastLane)); 4304 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4305 } 4306 } 4307 4308 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4309 // The basic block and loop containing the predicated instruction. 4310 auto *PredBB = PredInst->getParent(); 4311 auto *VectorLoop = LI->getLoopFor(PredBB); 4312 4313 // Initialize a worklist with the operands of the predicated instruction. 4314 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4315 4316 // Holds instructions that we need to analyze again. An instruction may be 4317 // reanalyzed if we don't yet know if we can sink it or not. 4318 SmallVector<Instruction *, 8> InstsToReanalyze; 4319 4320 // Returns true if a given use occurs in the predicated block. Phi nodes use 4321 // their operands in their corresponding predecessor blocks. 4322 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4323 auto *I = cast<Instruction>(U.getUser()); 4324 BasicBlock *BB = I->getParent(); 4325 if (auto *Phi = dyn_cast<PHINode>(I)) 4326 BB = Phi->getIncomingBlock( 4327 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4328 return BB == PredBB; 4329 }; 4330 4331 // Iteratively sink the scalarized operands of the predicated instruction 4332 // into the block we created for it. When an instruction is sunk, it's 4333 // operands are then added to the worklist. The algorithm ends after one pass 4334 // through the worklist doesn't sink a single instruction. 4335 bool Changed; 4336 do { 4337 // Add the instructions that need to be reanalyzed to the worklist, and 4338 // reset the changed indicator. 4339 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4340 InstsToReanalyze.clear(); 4341 Changed = false; 4342 4343 while (!Worklist.empty()) { 4344 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4345 4346 // We can't sink an instruction if it is a phi node, is already in the 4347 // predicated block, is not in the loop, or may have side effects. 4348 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4349 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4350 continue; 4351 4352 // It's legal to sink the instruction if all its uses occur in the 4353 // predicated block. Otherwise, there's nothing to do yet, and we may 4354 // need to reanalyze the instruction. 4355 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4356 InstsToReanalyze.push_back(I); 4357 continue; 4358 } 4359 4360 // Move the instruction to the beginning of the predicated block, and add 4361 // it's operands to the worklist. 4362 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4363 Worklist.insert(I->op_begin(), I->op_end()); 4364 4365 // The sinking may have enabled other instructions to be sunk, so we will 4366 // need to iterate. 4367 Changed = true; 4368 } 4369 } while (Changed); 4370 } 4371 4372 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4373 for (PHINode *OrigPhi : OrigPHIsToFix) { 4374 VPWidenPHIRecipe *VPPhi = 4375 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4376 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4377 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4378 VPValue *Inc = VPPhi->getIncomingValue(i); 4379 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4380 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4381 } 4382 } 4383 } 4384 4385 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, 4386 VPUser &Operands, unsigned UF, 4387 ElementCount VF, bool IsPtrLoopInvariant, 4388 SmallBitVector &IsIndexLoopInvariant, 4389 VPTransformState &State) { 4390 // Construct a vector GEP by widening the operands of the scalar GEP as 4391 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4392 // results in a vector of pointers when at least one operand of the GEP 4393 // is vector-typed. Thus, to keep the representation compact, we only use 4394 // vector-typed operands for loop-varying values. 4395 4396 if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4397 // If we are vectorizing, but the GEP has only loop-invariant operands, 4398 // the GEP we build (by only using vector-typed operands for 4399 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4400 // produce a vector of pointers, we need to either arbitrarily pick an 4401 // operand to broadcast, or broadcast a clone of the original GEP. 4402 // Here, we broadcast a clone of the original. 4403 // 4404 // TODO: If at some point we decide to scalarize instructions having 4405 // loop-invariant operands, this special case will no longer be 4406 // required. We would add the scalarization decision to 4407 // collectLoopScalars() and teach getVectorValue() to broadcast 4408 // the lane-zero scalar value. 4409 auto *Clone = Builder.Insert(GEP->clone()); 4410 for (unsigned Part = 0; Part < UF; ++Part) { 4411 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4412 State.set(VPDef, EntryPart, Part); 4413 addMetadata(EntryPart, GEP); 4414 } 4415 } else { 4416 // If the GEP has at least one loop-varying operand, we are sure to 4417 // produce a vector of pointers. But if we are only unrolling, we want 4418 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4419 // produce with the code below will be scalar (if VF == 1) or vector 4420 // (otherwise). Note that for the unroll-only case, we still maintain 4421 // values in the vector mapping with initVector, as we do for other 4422 // instructions. 4423 for (unsigned Part = 0; Part < UF; ++Part) { 4424 // The pointer operand of the new GEP. If it's loop-invariant, we 4425 // won't broadcast it. 4426 auto *Ptr = IsPtrLoopInvariant 4427 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 4428 : State.get(Operands.getOperand(0), Part); 4429 4430 // Collect all the indices for the new GEP. If any index is 4431 // loop-invariant, we won't broadcast it. 4432 SmallVector<Value *, 4> Indices; 4433 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { 4434 VPValue *Operand = Operands.getOperand(I); 4435 if (IsIndexLoopInvariant[I - 1]) 4436 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 4437 else 4438 Indices.push_back(State.get(Operand, Part)); 4439 } 4440 4441 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4442 // but it should be a vector, otherwise. 4443 auto *NewGEP = 4444 GEP->isInBounds() 4445 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4446 Indices) 4447 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4448 assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) && 4449 "NewGEP is not a pointer vector"); 4450 State.set(VPDef, NewGEP, Part); 4451 addMetadata(NewGEP, GEP); 4452 } 4453 } 4454 } 4455 4456 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4457 RecurrenceDescriptor *RdxDesc, 4458 VPValue *StartVPV, VPValue *Def, 4459 VPTransformState &State) { 4460 PHINode *P = cast<PHINode>(PN); 4461 if (EnableVPlanNativePath) { 4462 // Currently we enter here in the VPlan-native path for non-induction 4463 // PHIs where all control flow is uniform. We simply widen these PHIs. 4464 // Create a vector phi with no operands - the vector phi operands will be 4465 // set at the end of vector code generation. 4466 Type *VecTy = (State.VF.isScalar()) 4467 ? PN->getType() 4468 : VectorType::get(PN->getType(), State.VF); 4469 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4470 State.set(Def, VecPhi, 0); 4471 OrigPHIsToFix.push_back(P); 4472 4473 return; 4474 } 4475 4476 assert(PN->getParent() == OrigLoop->getHeader() && 4477 "Non-header phis should have been handled elsewhere"); 4478 4479 Value *StartV = StartVPV ? StartVPV->getLiveInIRValue() : nullptr; 4480 // In order to support recurrences we need to be able to vectorize Phi nodes. 4481 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4482 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4483 // this value when we vectorize all of the instructions that use the PHI. 4484 if (RdxDesc || Legal->isFirstOrderRecurrence(P)) { 4485 Value *Iden = nullptr; 4486 bool ScalarPHI = 4487 (State.VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN)); 4488 Type *VecTy = 4489 ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF); 4490 4491 if (RdxDesc) { 4492 assert(Legal->isReductionVariable(P) && StartV && 4493 "RdxDesc should only be set for reduction variables; in that case " 4494 "a StartV is also required"); 4495 RecurKind RK = RdxDesc->getRecurrenceKind(); 4496 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) { 4497 // MinMax reduction have the start value as their identify. 4498 if (ScalarPHI) { 4499 Iden = StartV; 4500 } else { 4501 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4502 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4503 StartV = Iden = 4504 Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident"); 4505 } 4506 } else { 4507 Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity( 4508 RK, VecTy->getScalarType()); 4509 Iden = IdenC; 4510 4511 if (!ScalarPHI) { 4512 Iden = ConstantVector::getSplat(State.VF, IdenC); 4513 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4514 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4515 Constant *Zero = Builder.getInt32(0); 4516 StartV = Builder.CreateInsertElement(Iden, StartV, Zero); 4517 } 4518 } 4519 } 4520 4521 for (unsigned Part = 0; Part < State.UF; ++Part) { 4522 // This is phase one of vectorizing PHIs. 4523 Value *EntryPart = PHINode::Create( 4524 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4525 State.set(Def, EntryPart, Part); 4526 if (StartV) { 4527 // Make sure to add the reduction start value only to the 4528 // first unroll part. 4529 Value *StartVal = (Part == 0) ? StartV : Iden; 4530 cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader); 4531 } 4532 } 4533 return; 4534 } 4535 4536 assert(!Legal->isReductionVariable(P) && 4537 "reductions should be handled above"); 4538 4539 setDebugLocFromInst(Builder, P); 4540 4541 // This PHINode must be an induction variable. 4542 // Make sure that we know about it. 4543 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4544 4545 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4546 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4547 4548 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4549 // which can be found from the original scalar operations. 4550 switch (II.getKind()) { 4551 case InductionDescriptor::IK_NoInduction: 4552 llvm_unreachable("Unknown induction"); 4553 case InductionDescriptor::IK_IntInduction: 4554 case InductionDescriptor::IK_FpInduction: 4555 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4556 case InductionDescriptor::IK_PtrInduction: { 4557 // Handle the pointer induction variable case. 4558 assert(P->getType()->isPointerTy() && "Unexpected type."); 4559 4560 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4561 // This is the normalized GEP that starts counting at zero. 4562 Value *PtrInd = 4563 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4564 // Determine the number of scalars we need to generate for each unroll 4565 // iteration. If the instruction is uniform, we only need to generate the 4566 // first lane. Otherwise, we generate all VF values. 4567 unsigned Lanes = Cost->isUniformAfterVectorization(P, State.VF) 4568 ? 1 4569 : State.VF.getKnownMinValue(); 4570 for (unsigned Part = 0; Part < UF; ++Part) { 4571 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4572 Constant *Idx = ConstantInt::get( 4573 PtrInd->getType(), Lane + Part * State.VF.getKnownMinValue()); 4574 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4575 Value *SclrGep = 4576 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4577 SclrGep->setName("next.gep"); 4578 State.set(Def, SclrGep, VPIteration(Part, Lane)); 4579 } 4580 } 4581 return; 4582 } 4583 assert(isa<SCEVConstant>(II.getStep()) && 4584 "Induction step not a SCEV constant!"); 4585 Type *PhiType = II.getStep()->getType(); 4586 4587 // Build a pointer phi 4588 Value *ScalarStartValue = II.getStartValue(); 4589 Type *ScStValueType = ScalarStartValue->getType(); 4590 PHINode *NewPointerPhi = 4591 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4592 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4593 4594 // A pointer induction, performed by using a gep 4595 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4596 Instruction *InductionLoc = LoopLatch->getTerminator(); 4597 const SCEV *ScalarStep = II.getStep(); 4598 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4599 Value *ScalarStepValue = 4600 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4601 Value *InductionGEP = GetElementPtrInst::Create( 4602 ScStValueType->getPointerElementType(), NewPointerPhi, 4603 Builder.CreateMul( 4604 ScalarStepValue, 4605 ConstantInt::get(PhiType, State.VF.getKnownMinValue() * State.UF)), 4606 "ptr.ind", InductionLoc); 4607 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4608 4609 // Create UF many actual address geps that use the pointer 4610 // phi as base and a vectorized version of the step value 4611 // (<step*0, ..., step*N>) as offset. 4612 for (unsigned Part = 0; Part < State.UF; ++Part) { 4613 SmallVector<Constant *, 8> Indices; 4614 // Create a vector of consecutive numbers from zero to VF. 4615 for (unsigned i = 0; i < State.VF.getKnownMinValue(); ++i) 4616 Indices.push_back( 4617 ConstantInt::get(PhiType, i + Part * State.VF.getKnownMinValue())); 4618 Constant *StartOffset = ConstantVector::get(Indices); 4619 4620 Value *GEP = Builder.CreateGEP( 4621 ScStValueType->getPointerElementType(), NewPointerPhi, 4622 Builder.CreateMul(StartOffset, 4623 Builder.CreateVectorSplat( 4624 State.VF.getKnownMinValue(), ScalarStepValue), 4625 "vector.gep")); 4626 State.set(Def, GEP, Part); 4627 } 4628 } 4629 } 4630 } 4631 4632 /// A helper function for checking whether an integer division-related 4633 /// instruction may divide by zero (in which case it must be predicated if 4634 /// executed conditionally in the scalar code). 4635 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4636 /// Non-zero divisors that are non compile-time constants will not be 4637 /// converted into multiplication, so we will still end up scalarizing 4638 /// the division, but can do so w/o predication. 4639 static bool mayDivideByZero(Instruction &I) { 4640 assert((I.getOpcode() == Instruction::UDiv || 4641 I.getOpcode() == Instruction::SDiv || 4642 I.getOpcode() == Instruction::URem || 4643 I.getOpcode() == Instruction::SRem) && 4644 "Unexpected instruction"); 4645 Value *Divisor = I.getOperand(1); 4646 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4647 return !CInt || CInt->isZero(); 4648 } 4649 4650 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def, 4651 VPUser &User, 4652 VPTransformState &State) { 4653 switch (I.getOpcode()) { 4654 case Instruction::Call: 4655 case Instruction::Br: 4656 case Instruction::PHI: 4657 case Instruction::GetElementPtr: 4658 case Instruction::Select: 4659 llvm_unreachable("This instruction is handled by a different recipe."); 4660 case Instruction::UDiv: 4661 case Instruction::SDiv: 4662 case Instruction::SRem: 4663 case Instruction::URem: 4664 case Instruction::Add: 4665 case Instruction::FAdd: 4666 case Instruction::Sub: 4667 case Instruction::FSub: 4668 case Instruction::FNeg: 4669 case Instruction::Mul: 4670 case Instruction::FMul: 4671 case Instruction::FDiv: 4672 case Instruction::FRem: 4673 case Instruction::Shl: 4674 case Instruction::LShr: 4675 case Instruction::AShr: 4676 case Instruction::And: 4677 case Instruction::Or: 4678 case Instruction::Xor: { 4679 // Just widen unops and binops. 4680 setDebugLocFromInst(Builder, &I); 4681 4682 for (unsigned Part = 0; Part < UF; ++Part) { 4683 SmallVector<Value *, 2> Ops; 4684 for (VPValue *VPOp : User.operands()) 4685 Ops.push_back(State.get(VPOp, Part)); 4686 4687 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4688 4689 if (auto *VecOp = dyn_cast<Instruction>(V)) 4690 VecOp->copyIRFlags(&I); 4691 4692 // Use this vector value for all users of the original instruction. 4693 State.set(Def, V, Part); 4694 addMetadata(V, &I); 4695 } 4696 4697 break; 4698 } 4699 case Instruction::ICmp: 4700 case Instruction::FCmp: { 4701 // Widen compares. Generate vector compares. 4702 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4703 auto *Cmp = cast<CmpInst>(&I); 4704 setDebugLocFromInst(Builder, Cmp); 4705 for (unsigned Part = 0; Part < UF; ++Part) { 4706 Value *A = State.get(User.getOperand(0), Part); 4707 Value *B = State.get(User.getOperand(1), Part); 4708 Value *C = nullptr; 4709 if (FCmp) { 4710 // Propagate fast math flags. 4711 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4712 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4713 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4714 } else { 4715 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4716 } 4717 State.set(Def, C, Part); 4718 addMetadata(C, &I); 4719 } 4720 4721 break; 4722 } 4723 4724 case Instruction::ZExt: 4725 case Instruction::SExt: 4726 case Instruction::FPToUI: 4727 case Instruction::FPToSI: 4728 case Instruction::FPExt: 4729 case Instruction::PtrToInt: 4730 case Instruction::IntToPtr: 4731 case Instruction::SIToFP: 4732 case Instruction::UIToFP: 4733 case Instruction::Trunc: 4734 case Instruction::FPTrunc: 4735 case Instruction::BitCast: { 4736 auto *CI = cast<CastInst>(&I); 4737 setDebugLocFromInst(Builder, CI); 4738 4739 /// Vectorize casts. 4740 Type *DestTy = 4741 (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); 4742 4743 for (unsigned Part = 0; Part < UF; ++Part) { 4744 Value *A = State.get(User.getOperand(0), Part); 4745 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4746 State.set(Def, Cast, Part); 4747 addMetadata(Cast, &I); 4748 } 4749 break; 4750 } 4751 default: 4752 // This instruction is not vectorized by simple widening. 4753 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4754 llvm_unreachable("Unhandled instruction!"); 4755 } // end of switch. 4756 } 4757 4758 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4759 VPUser &ArgOperands, 4760 VPTransformState &State) { 4761 assert(!isa<DbgInfoIntrinsic>(I) && 4762 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4763 setDebugLocFromInst(Builder, &I); 4764 4765 Module *M = I.getParent()->getParent()->getParent(); 4766 auto *CI = cast<CallInst>(&I); 4767 4768 SmallVector<Type *, 4> Tys; 4769 for (Value *ArgOperand : CI->arg_operands()) 4770 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4771 4772 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4773 4774 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4775 // version of the instruction. 4776 // Is it beneficial to perform intrinsic call compared to lib call? 4777 bool NeedToScalarize = false; 4778 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4779 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4780 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4781 assert((UseVectorIntrinsic || !NeedToScalarize) && 4782 "Instruction should be scalarized elsewhere."); 4783 assert(IntrinsicCost.isValid() && CallCost.isValid() && 4784 "Cannot have invalid costs while widening"); 4785 4786 for (unsigned Part = 0; Part < UF; ++Part) { 4787 SmallVector<Value *, 4> Args; 4788 for (auto &I : enumerate(ArgOperands.operands())) { 4789 // Some intrinsics have a scalar argument - don't replace it with a 4790 // vector. 4791 Value *Arg; 4792 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4793 Arg = State.get(I.value(), Part); 4794 else 4795 Arg = State.get(I.value(), VPIteration(0, 0)); 4796 Args.push_back(Arg); 4797 } 4798 4799 Function *VectorF; 4800 if (UseVectorIntrinsic) { 4801 // Use vector version of the intrinsic. 4802 Type *TysForDecl[] = {CI->getType()}; 4803 if (VF.isVector()) 4804 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4805 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4806 assert(VectorF && "Can't retrieve vector intrinsic."); 4807 } else { 4808 // Use vector version of the function call. 4809 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4810 #ifndef NDEBUG 4811 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4812 "Can't create vector function."); 4813 #endif 4814 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4815 } 4816 SmallVector<OperandBundleDef, 1> OpBundles; 4817 CI->getOperandBundlesAsDefs(OpBundles); 4818 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4819 4820 if (isa<FPMathOperator>(V)) 4821 V->copyFastMathFlags(CI); 4822 4823 State.set(Def, V, Part); 4824 addMetadata(V, &I); 4825 } 4826 } 4827 4828 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, 4829 VPUser &Operands, 4830 bool InvariantCond, 4831 VPTransformState &State) { 4832 setDebugLocFromInst(Builder, &I); 4833 4834 // The condition can be loop invariant but still defined inside the 4835 // loop. This means that we can't just use the original 'cond' value. 4836 // We have to take the 'vectorized' value and pick the first lane. 4837 // Instcombine will make this a no-op. 4838 auto *InvarCond = InvariantCond 4839 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 4840 : nullptr; 4841 4842 for (unsigned Part = 0; Part < UF; ++Part) { 4843 Value *Cond = 4844 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); 4845 Value *Op0 = State.get(Operands.getOperand(1), Part); 4846 Value *Op1 = State.get(Operands.getOperand(2), Part); 4847 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); 4848 State.set(VPDef, Sel, Part); 4849 addMetadata(Sel, &I); 4850 } 4851 } 4852 4853 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4854 // We should not collect Scalars more than once per VF. Right now, this 4855 // function is called from collectUniformsAndScalars(), which already does 4856 // this check. Collecting Scalars for VF=1 does not make any sense. 4857 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4858 "This function should not be visited twice for the same VF"); 4859 4860 SmallSetVector<Instruction *, 8> Worklist; 4861 4862 // These sets are used to seed the analysis with pointers used by memory 4863 // accesses that will remain scalar. 4864 SmallSetVector<Instruction *, 8> ScalarPtrs; 4865 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4866 auto *Latch = TheLoop->getLoopLatch(); 4867 4868 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4869 // The pointer operands of loads and stores will be scalar as long as the 4870 // memory access is not a gather or scatter operation. The value operand of a 4871 // store will remain scalar if the store is scalarized. 4872 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4873 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4874 assert(WideningDecision != CM_Unknown && 4875 "Widening decision should be ready at this moment"); 4876 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4877 if (Ptr == Store->getValueOperand()) 4878 return WideningDecision == CM_Scalarize; 4879 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4880 "Ptr is neither a value or pointer operand"); 4881 return WideningDecision != CM_GatherScatter; 4882 }; 4883 4884 // A helper that returns true if the given value is a bitcast or 4885 // getelementptr instruction contained in the loop. 4886 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4887 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4888 isa<GetElementPtrInst>(V)) && 4889 !TheLoop->isLoopInvariant(V); 4890 }; 4891 4892 auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { 4893 if (!isa<PHINode>(Ptr) || 4894 !Legal->getInductionVars().count(cast<PHINode>(Ptr))) 4895 return false; 4896 auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; 4897 if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) 4898 return false; 4899 return isScalarUse(MemAccess, Ptr); 4900 }; 4901 4902 // A helper that evaluates a memory access's use of a pointer. If the 4903 // pointer is actually the pointer induction of a loop, it is being 4904 // inserted into Worklist. If the use will be a scalar use, and the 4905 // pointer is only used by memory accesses, we place the pointer in 4906 // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. 4907 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4908 if (isScalarPtrInduction(MemAccess, Ptr)) { 4909 Worklist.insert(cast<Instruction>(Ptr)); 4910 Instruction *Update = cast<Instruction>( 4911 cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); 4912 Worklist.insert(Update); 4913 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr 4914 << "\n"); 4915 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update 4916 << "\n"); 4917 return; 4918 } 4919 // We only care about bitcast and getelementptr instructions contained in 4920 // the loop. 4921 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4922 return; 4923 4924 // If the pointer has already been identified as scalar (e.g., if it was 4925 // also identified as uniform), there's nothing to do. 4926 auto *I = cast<Instruction>(Ptr); 4927 if (Worklist.count(I)) 4928 return; 4929 4930 // If the use of the pointer will be a scalar use, and all users of the 4931 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4932 // place the pointer in PossibleNonScalarPtrs. 4933 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4934 return isa<LoadInst>(U) || isa<StoreInst>(U); 4935 })) 4936 ScalarPtrs.insert(I); 4937 else 4938 PossibleNonScalarPtrs.insert(I); 4939 }; 4940 4941 // We seed the scalars analysis with three classes of instructions: (1) 4942 // instructions marked uniform-after-vectorization and (2) bitcast, 4943 // getelementptr and (pointer) phi instructions used by memory accesses 4944 // requiring a scalar use. 4945 // 4946 // (1) Add to the worklist all instructions that have been identified as 4947 // uniform-after-vectorization. 4948 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4949 4950 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4951 // memory accesses requiring a scalar use. The pointer operands of loads and 4952 // stores will be scalar as long as the memory accesses is not a gather or 4953 // scatter operation. The value operand of a store will remain scalar if the 4954 // store is scalarized. 4955 for (auto *BB : TheLoop->blocks()) 4956 for (auto &I : *BB) { 4957 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4958 evaluatePtrUse(Load, Load->getPointerOperand()); 4959 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4960 evaluatePtrUse(Store, Store->getPointerOperand()); 4961 evaluatePtrUse(Store, Store->getValueOperand()); 4962 } 4963 } 4964 for (auto *I : ScalarPtrs) 4965 if (!PossibleNonScalarPtrs.count(I)) { 4966 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4967 Worklist.insert(I); 4968 } 4969 4970 // Insert the forced scalars. 4971 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4972 // induction variable when the PHI user is scalarized. 4973 auto ForcedScalar = ForcedScalars.find(VF); 4974 if (ForcedScalar != ForcedScalars.end()) 4975 for (auto *I : ForcedScalar->second) 4976 Worklist.insert(I); 4977 4978 // Expand the worklist by looking through any bitcasts and getelementptr 4979 // instructions we've already identified as scalar. This is similar to the 4980 // expansion step in collectLoopUniforms(); however, here we're only 4981 // expanding to include additional bitcasts and getelementptr instructions. 4982 unsigned Idx = 0; 4983 while (Idx != Worklist.size()) { 4984 Instruction *Dst = Worklist[Idx++]; 4985 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4986 continue; 4987 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4988 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4989 auto *J = cast<Instruction>(U); 4990 return !TheLoop->contains(J) || Worklist.count(J) || 4991 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4992 isScalarUse(J, Src)); 4993 })) { 4994 Worklist.insert(Src); 4995 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4996 } 4997 } 4998 4999 // An induction variable will remain scalar if all users of the induction 5000 // variable and induction variable update remain scalar. 5001 for (auto &Induction : Legal->getInductionVars()) { 5002 auto *Ind = Induction.first; 5003 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5004 5005 // If tail-folding is applied, the primary induction variable will be used 5006 // to feed a vector compare. 5007 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 5008 continue; 5009 5010 // Determine if all users of the induction variable are scalar after 5011 // vectorization. 5012 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5013 auto *I = cast<Instruction>(U); 5014 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5015 }); 5016 if (!ScalarInd) 5017 continue; 5018 5019 // Determine if all users of the induction variable update instruction are 5020 // scalar after vectorization. 5021 auto ScalarIndUpdate = 5022 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5023 auto *I = cast<Instruction>(U); 5024 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5025 }); 5026 if (!ScalarIndUpdate) 5027 continue; 5028 5029 // The induction variable and its update instruction will remain scalar. 5030 Worklist.insert(Ind); 5031 Worklist.insert(IndUpdate); 5032 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5033 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 5034 << "\n"); 5035 } 5036 5037 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5038 } 5039 5040 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, 5041 ElementCount VF) { 5042 if (!blockNeedsPredication(I->getParent())) 5043 return false; 5044 switch(I->getOpcode()) { 5045 default: 5046 break; 5047 case Instruction::Load: 5048 case Instruction::Store: { 5049 if (!Legal->isMaskRequired(I)) 5050 return false; 5051 auto *Ptr = getLoadStorePointerOperand(I); 5052 auto *Ty = getMemInstValueType(I); 5053 // We have already decided how to vectorize this instruction, get that 5054 // result. 5055 if (VF.isVector()) { 5056 InstWidening WideningDecision = getWideningDecision(I, VF); 5057 assert(WideningDecision != CM_Unknown && 5058 "Widening decision should be ready at this moment"); 5059 return WideningDecision == CM_Scalarize; 5060 } 5061 const Align Alignment = getLoadStoreAlignment(I); 5062 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 5063 isLegalMaskedGather(Ty, Alignment)) 5064 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 5065 isLegalMaskedScatter(Ty, Alignment)); 5066 } 5067 case Instruction::UDiv: 5068 case Instruction::SDiv: 5069 case Instruction::SRem: 5070 case Instruction::URem: 5071 return mayDivideByZero(*I); 5072 } 5073 return false; 5074 } 5075 5076 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5077 Instruction *I, ElementCount VF) { 5078 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5079 assert(getWideningDecision(I, VF) == CM_Unknown && 5080 "Decision should not be set yet."); 5081 auto *Group = getInterleavedAccessGroup(I); 5082 assert(Group && "Must have a group."); 5083 5084 // If the instruction's allocated size doesn't equal it's type size, it 5085 // requires padding and will be scalarized. 5086 auto &DL = I->getModule()->getDataLayout(); 5087 auto *ScalarTy = getMemInstValueType(I); 5088 if (hasIrregularType(ScalarTy, DL, VF)) 5089 return false; 5090 5091 // Check if masking is required. 5092 // A Group may need masking for one of two reasons: it resides in a block that 5093 // needs predication, or it was decided to use masking to deal with gaps. 5094 bool PredicatedAccessRequiresMasking = 5095 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 5096 bool AccessWithGapsRequiresMasking = 5097 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 5098 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 5099 return true; 5100 5101 // If masked interleaving is required, we expect that the user/target had 5102 // enabled it, because otherwise it either wouldn't have been created or 5103 // it should have been invalidated by the CostModel. 5104 assert(useMaskedInterleavedAccesses(TTI) && 5105 "Masked interleave-groups for predicated accesses are not enabled."); 5106 5107 auto *Ty = getMemInstValueType(I); 5108 const Align Alignment = getLoadStoreAlignment(I); 5109 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5110 : TTI.isLegalMaskedStore(Ty, Alignment); 5111 } 5112 5113 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5114 Instruction *I, ElementCount VF) { 5115 // Get and ensure we have a valid memory instruction. 5116 LoadInst *LI = dyn_cast<LoadInst>(I); 5117 StoreInst *SI = dyn_cast<StoreInst>(I); 5118 assert((LI || SI) && "Invalid memory instruction"); 5119 5120 auto *Ptr = getLoadStorePointerOperand(I); 5121 5122 // In order to be widened, the pointer should be consecutive, first of all. 5123 if (!Legal->isConsecutivePtr(Ptr)) 5124 return false; 5125 5126 // If the instruction is a store located in a predicated block, it will be 5127 // scalarized. 5128 if (isScalarWithPredication(I)) 5129 return false; 5130 5131 // If the instruction's allocated size doesn't equal it's type size, it 5132 // requires padding and will be scalarized. 5133 auto &DL = I->getModule()->getDataLayout(); 5134 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5135 if (hasIrregularType(ScalarTy, DL, VF)) 5136 return false; 5137 5138 return true; 5139 } 5140 5141 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5142 // We should not collect Uniforms more than once per VF. Right now, 5143 // this function is called from collectUniformsAndScalars(), which 5144 // already does this check. Collecting Uniforms for VF=1 does not make any 5145 // sense. 5146 5147 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5148 "This function should not be visited twice for the same VF"); 5149 5150 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5151 // not analyze again. Uniforms.count(VF) will return 1. 5152 Uniforms[VF].clear(); 5153 5154 // We now know that the loop is vectorizable! 5155 // Collect instructions inside the loop that will remain uniform after 5156 // vectorization. 5157 5158 // Global values, params and instructions outside of current loop are out of 5159 // scope. 5160 auto isOutOfScope = [&](Value *V) -> bool { 5161 Instruction *I = dyn_cast<Instruction>(V); 5162 return (!I || !TheLoop->contains(I)); 5163 }; 5164 5165 SetVector<Instruction *> Worklist; 5166 BasicBlock *Latch = TheLoop->getLoopLatch(); 5167 5168 // Instructions that are scalar with predication must not be considered 5169 // uniform after vectorization, because that would create an erroneous 5170 // replicating region where only a single instance out of VF should be formed. 5171 // TODO: optimize such seldom cases if found important, see PR40816. 5172 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5173 if (isOutOfScope(I)) { 5174 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5175 << *I << "\n"); 5176 return; 5177 } 5178 if (isScalarWithPredication(I, VF)) { 5179 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5180 << *I << "\n"); 5181 return; 5182 } 5183 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5184 Worklist.insert(I); 5185 }; 5186 5187 // Start with the conditional branch. If the branch condition is an 5188 // instruction contained in the loop that is only used by the branch, it is 5189 // uniform. 5190 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5191 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5192 addToWorklistIfAllowed(Cmp); 5193 5194 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5195 InstWidening WideningDecision = getWideningDecision(I, VF); 5196 assert(WideningDecision != CM_Unknown && 5197 "Widening decision should be ready at this moment"); 5198 5199 // A uniform memory op is itself uniform. We exclude uniform stores 5200 // here as they demand the last lane, not the first one. 5201 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5202 assert(WideningDecision == CM_Scalarize); 5203 return true; 5204 } 5205 5206 return (WideningDecision == CM_Widen || 5207 WideningDecision == CM_Widen_Reverse || 5208 WideningDecision == CM_Interleave); 5209 }; 5210 5211 5212 // Returns true if Ptr is the pointer operand of a memory access instruction 5213 // I, and I is known to not require scalarization. 5214 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5215 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5216 }; 5217 5218 // Holds a list of values which are known to have at least one uniform use. 5219 // Note that there may be other uses which aren't uniform. A "uniform use" 5220 // here is something which only demands lane 0 of the unrolled iterations; 5221 // it does not imply that all lanes produce the same value (e.g. this is not 5222 // the usual meaning of uniform) 5223 SmallPtrSet<Value *, 8> HasUniformUse; 5224 5225 // Scan the loop for instructions which are either a) known to have only 5226 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5227 for (auto *BB : TheLoop->blocks()) 5228 for (auto &I : *BB) { 5229 // If there's no pointer operand, there's nothing to do. 5230 auto *Ptr = getLoadStorePointerOperand(&I); 5231 if (!Ptr) 5232 continue; 5233 5234 // A uniform memory op is itself uniform. We exclude uniform stores 5235 // here as they demand the last lane, not the first one. 5236 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5237 addToWorklistIfAllowed(&I); 5238 5239 if (isUniformDecision(&I, VF)) { 5240 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5241 HasUniformUse.insert(Ptr); 5242 } 5243 } 5244 5245 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5246 // demanding) users. Since loops are assumed to be in LCSSA form, this 5247 // disallows uses outside the loop as well. 5248 for (auto *V : HasUniformUse) { 5249 if (isOutOfScope(V)) 5250 continue; 5251 auto *I = cast<Instruction>(V); 5252 auto UsersAreMemAccesses = 5253 llvm::all_of(I->users(), [&](User *U) -> bool { 5254 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5255 }); 5256 if (UsersAreMemAccesses) 5257 addToWorklistIfAllowed(I); 5258 } 5259 5260 // Expand Worklist in topological order: whenever a new instruction 5261 // is added , its users should be already inside Worklist. It ensures 5262 // a uniform instruction will only be used by uniform instructions. 5263 unsigned idx = 0; 5264 while (idx != Worklist.size()) { 5265 Instruction *I = Worklist[idx++]; 5266 5267 for (auto OV : I->operand_values()) { 5268 // isOutOfScope operands cannot be uniform instructions. 5269 if (isOutOfScope(OV)) 5270 continue; 5271 // First order recurrence Phi's should typically be considered 5272 // non-uniform. 5273 auto *OP = dyn_cast<PHINode>(OV); 5274 if (OP && Legal->isFirstOrderRecurrence(OP)) 5275 continue; 5276 // If all the users of the operand are uniform, then add the 5277 // operand into the uniform worklist. 5278 auto *OI = cast<Instruction>(OV); 5279 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5280 auto *J = cast<Instruction>(U); 5281 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5282 })) 5283 addToWorklistIfAllowed(OI); 5284 } 5285 } 5286 5287 // For an instruction to be added into Worklist above, all its users inside 5288 // the loop should also be in Worklist. However, this condition cannot be 5289 // true for phi nodes that form a cyclic dependence. We must process phi 5290 // nodes separately. An induction variable will remain uniform if all users 5291 // of the induction variable and induction variable update remain uniform. 5292 // The code below handles both pointer and non-pointer induction variables. 5293 for (auto &Induction : Legal->getInductionVars()) { 5294 auto *Ind = Induction.first; 5295 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5296 5297 // Determine if all users of the induction variable are uniform after 5298 // vectorization. 5299 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5300 auto *I = cast<Instruction>(U); 5301 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5302 isVectorizedMemAccessUse(I, Ind); 5303 }); 5304 if (!UniformInd) 5305 continue; 5306 5307 // Determine if all users of the induction variable update instruction are 5308 // uniform after vectorization. 5309 auto UniformIndUpdate = 5310 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5311 auto *I = cast<Instruction>(U); 5312 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5313 isVectorizedMemAccessUse(I, IndUpdate); 5314 }); 5315 if (!UniformIndUpdate) 5316 continue; 5317 5318 // The induction variable and its update instruction will remain uniform. 5319 addToWorklistIfAllowed(Ind); 5320 addToWorklistIfAllowed(IndUpdate); 5321 } 5322 5323 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5324 } 5325 5326 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5327 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5328 5329 if (Legal->getRuntimePointerChecking()->Need) { 5330 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5331 "runtime pointer checks needed. Enable vectorization of this " 5332 "loop with '#pragma clang loop vectorize(enable)' when " 5333 "compiling with -Os/-Oz", 5334 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5335 return true; 5336 } 5337 5338 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5339 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5340 "runtime SCEV checks needed. Enable vectorization of this " 5341 "loop with '#pragma clang loop vectorize(enable)' when " 5342 "compiling with -Os/-Oz", 5343 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5344 return true; 5345 } 5346 5347 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5348 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5349 reportVectorizationFailure("Runtime stride check for small trip count", 5350 "runtime stride == 1 checks needed. Enable vectorization of " 5351 "this loop without such check by compiling with -Os/-Oz", 5352 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5353 return true; 5354 } 5355 5356 return false; 5357 } 5358 5359 Optional<ElementCount> 5360 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5361 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5362 // TODO: It may by useful to do since it's still likely to be dynamically 5363 // uniform if the target can skip. 5364 reportVectorizationFailure( 5365 "Not inserting runtime ptr check for divergent target", 5366 "runtime pointer checks needed. Not enabled for divergent target", 5367 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5368 return None; 5369 } 5370 5371 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5372 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5373 if (TC == 1) { 5374 reportVectorizationFailure("Single iteration (non) loop", 5375 "loop trip count is one, irrelevant for vectorization", 5376 "SingleIterationLoop", ORE, TheLoop); 5377 return None; 5378 } 5379 5380 switch (ScalarEpilogueStatus) { 5381 case CM_ScalarEpilogueAllowed: 5382 return computeFeasibleMaxVF(TC, UserVF); 5383 case CM_ScalarEpilogueNotAllowedUsePredicate: 5384 LLVM_FALLTHROUGH; 5385 case CM_ScalarEpilogueNotNeededUsePredicate: 5386 LLVM_DEBUG( 5387 dbgs() << "LV: vector predicate hint/switch found.\n" 5388 << "LV: Not allowing scalar epilogue, creating predicated " 5389 << "vector loop.\n"); 5390 break; 5391 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5392 // fallthrough as a special case of OptForSize 5393 case CM_ScalarEpilogueNotAllowedOptSize: 5394 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5395 LLVM_DEBUG( 5396 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5397 else 5398 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5399 << "count.\n"); 5400 5401 // Bail if runtime checks are required, which are not good when optimising 5402 // for size. 5403 if (runtimeChecksRequired()) 5404 return None; 5405 5406 break; 5407 } 5408 5409 // The only loops we can vectorize without a scalar epilogue, are loops with 5410 // a bottom-test and a single exiting block. We'd have to handle the fact 5411 // that not every instruction executes on the last iteration. This will 5412 // require a lane mask which varies through the vector loop body. (TODO) 5413 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5414 // If there was a tail-folding hint/switch, but we can't fold the tail by 5415 // masking, fallback to a vectorization with a scalar epilogue. 5416 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5417 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5418 "scalar epilogue instead.\n"); 5419 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5420 return computeFeasibleMaxVF(TC, UserVF); 5421 } 5422 return None; 5423 } 5424 5425 // Now try the tail folding 5426 5427 // Invalidate interleave groups that require an epilogue if we can't mask 5428 // the interleave-group. 5429 if (!useMaskedInterleavedAccesses(TTI)) { 5430 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5431 "No decisions should have been taken at this point"); 5432 // Note: There is no need to invalidate any cost modeling decisions here, as 5433 // non where taken so far. 5434 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5435 } 5436 5437 ElementCount MaxVF = computeFeasibleMaxVF(TC, UserVF); 5438 assert(!MaxVF.isScalable() && 5439 "Scalable vectors do not yet support tail folding"); 5440 assert((UserVF.isNonZero() || isPowerOf2_32(MaxVF.getFixedValue())) && 5441 "MaxVF must be a power of 2"); 5442 unsigned MaxVFtimesIC = 5443 UserIC ? MaxVF.getFixedValue() * UserIC : MaxVF.getFixedValue(); 5444 // Avoid tail folding if the trip count is known to be a multiple of any VF we 5445 // chose. 5446 ScalarEvolution *SE = PSE.getSE(); 5447 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5448 const SCEV *ExitCount = SE->getAddExpr( 5449 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5450 const SCEV *Rem = SE->getURemExpr( 5451 SE->applyLoopGuards(ExitCount, TheLoop), 5452 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5453 if (Rem->isZero()) { 5454 // Accept MaxVF if we do not have a tail. 5455 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5456 return MaxVF; 5457 } 5458 5459 // If we don't know the precise trip count, or if the trip count that we 5460 // found modulo the vectorization factor is not zero, try to fold the tail 5461 // by masking. 5462 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5463 if (Legal->prepareToFoldTailByMasking()) { 5464 FoldTailByMasking = true; 5465 return MaxVF; 5466 } 5467 5468 // If there was a tail-folding hint/switch, but we can't fold the tail by 5469 // masking, fallback to a vectorization with a scalar epilogue. 5470 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5471 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5472 "scalar epilogue instead.\n"); 5473 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5474 return MaxVF; 5475 } 5476 5477 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5478 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5479 return None; 5480 } 5481 5482 if (TC == 0) { 5483 reportVectorizationFailure( 5484 "Unable to calculate the loop count due to complex control flow", 5485 "unable to calculate the loop count due to complex control flow", 5486 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5487 return None; 5488 } 5489 5490 reportVectorizationFailure( 5491 "Cannot optimize for size and vectorize at the same time.", 5492 "cannot optimize for size and vectorize at the same time. " 5493 "Enable vectorization of this loop with '#pragma clang loop " 5494 "vectorize(enable)' when compiling with -Os/-Oz", 5495 "NoTailLoopWithOptForSize", ORE, TheLoop); 5496 return None; 5497 } 5498 5499 ElementCount 5500 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5501 ElementCount UserVF) { 5502 bool IgnoreScalableUserVF = UserVF.isScalable() && 5503 !TTI.supportsScalableVectors() && 5504 !ForceTargetSupportsScalableVectors; 5505 if (IgnoreScalableUserVF) { 5506 LLVM_DEBUG( 5507 dbgs() << "LV: Ignoring VF=" << UserVF 5508 << " because target does not support scalable vectors.\n"); 5509 ORE->emit([&]() { 5510 return OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreScalableUserVF", 5511 TheLoop->getStartLoc(), 5512 TheLoop->getHeader()) 5513 << "Ignoring VF=" << ore::NV("UserVF", UserVF) 5514 << " because target does not support scalable vectors."; 5515 }); 5516 } 5517 5518 // Beyond this point two scenarios are handled. If UserVF isn't specified 5519 // then a suitable VF is chosen. If UserVF is specified and there are 5520 // dependencies, check if it's legal. However, if a UserVF is specified and 5521 // there are no dependencies, then there's nothing to do. 5522 if (UserVF.isNonZero() && !IgnoreScalableUserVF) { 5523 if (!canVectorizeReductions(UserVF)) { 5524 reportVectorizationFailure( 5525 "LV: Scalable vectorization not supported for the reduction " 5526 "operations found in this loop. Using fixed-width " 5527 "vectorization instead.", 5528 "Scalable vectorization not supported for the reduction operations " 5529 "found in this loop. Using fixed-width vectorization instead.", 5530 "ScalableVFUnfeasible", ORE, TheLoop); 5531 return computeFeasibleMaxVF( 5532 ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue())); 5533 } 5534 5535 if (Legal->isSafeForAnyVectorWidth()) 5536 return UserVF; 5537 } 5538 5539 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5540 unsigned SmallestType, WidestType; 5541 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5542 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5543 5544 // Get the maximum safe dependence distance in bits computed by LAA. 5545 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5546 // the memory accesses that is most restrictive (involved in the smallest 5547 // dependence distance). 5548 unsigned MaxSafeVectorWidthInBits = Legal->getMaxSafeVectorWidthInBits(); 5549 5550 // If the user vectorization factor is legally unsafe, clamp it to a safe 5551 // value. Otherwise, return as is. 5552 if (UserVF.isNonZero() && !IgnoreScalableUserVF) { 5553 unsigned MaxSafeElements = 5554 PowerOf2Floor(MaxSafeVectorWidthInBits / WidestType); 5555 ElementCount MaxSafeVF = ElementCount::getFixed(MaxSafeElements); 5556 5557 if (UserVF.isScalable()) { 5558 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5559 5560 // Scale VF by vscale before checking if it's safe. 5561 MaxSafeVF = ElementCount::getScalable( 5562 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5563 5564 if (MaxSafeVF.isZero()) { 5565 // The dependence distance is too small to use scalable vectors, 5566 // fallback on fixed. 5567 LLVM_DEBUG( 5568 dbgs() 5569 << "LV: Max legal vector width too small, scalable vectorization " 5570 "unfeasible. Using fixed-width vectorization instead.\n"); 5571 ORE->emit([&]() { 5572 return OptimizationRemarkAnalysis(DEBUG_TYPE, "ScalableVFUnfeasible", 5573 TheLoop->getStartLoc(), 5574 TheLoop->getHeader()) 5575 << "Max legal vector width too small, scalable vectorization " 5576 << "unfeasible. Using fixed-width vectorization instead."; 5577 }); 5578 return computeFeasibleMaxVF( 5579 ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue())); 5580 } 5581 } 5582 5583 LLVM_DEBUG(dbgs() << "LV: The max safe VF is: " << MaxSafeVF << ".\n"); 5584 5585 if (ElementCount::isKnownLE(UserVF, MaxSafeVF)) 5586 return UserVF; 5587 5588 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5589 << " is unsafe, clamping to max safe VF=" << MaxSafeVF 5590 << ".\n"); 5591 ORE->emit([&]() { 5592 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5593 TheLoop->getStartLoc(), 5594 TheLoop->getHeader()) 5595 << "User-specified vectorization factor " 5596 << ore::NV("UserVectorizationFactor", UserVF) 5597 << " is unsafe, clamping to maximum safe vectorization factor " 5598 << ore::NV("VectorizationFactor", MaxSafeVF); 5599 }); 5600 return MaxSafeVF; 5601 } 5602 5603 WidestRegister = std::min(WidestRegister, MaxSafeVectorWidthInBits); 5604 5605 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5606 // Note that both WidestRegister and WidestType may not be a powers of 2. 5607 auto MaxVectorSize = 5608 ElementCount::getFixed(PowerOf2Floor(WidestRegister / WidestType)); 5609 5610 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5611 << " / " << WidestType << " bits.\n"); 5612 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5613 << WidestRegister << " bits.\n"); 5614 5615 assert(MaxVectorSize.getFixedValue() <= WidestRegister && 5616 "Did not expect to pack so many elements" 5617 " into one vector!"); 5618 if (MaxVectorSize.getFixedValue() == 0) { 5619 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5620 return ElementCount::getFixed(1); 5621 } else if (ConstTripCount && ConstTripCount < MaxVectorSize.getFixedValue() && 5622 isPowerOf2_32(ConstTripCount)) { 5623 // We need to clamp the VF to be the ConstTripCount. There is no point in 5624 // choosing a higher viable VF as done in the loop below. 5625 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5626 << ConstTripCount << "\n"); 5627 return ElementCount::getFixed(ConstTripCount); 5628 } 5629 5630 ElementCount MaxVF = MaxVectorSize; 5631 if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) || 5632 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5633 // Collect all viable vectorization factors larger than the default MaxVF 5634 // (i.e. MaxVectorSize). 5635 SmallVector<ElementCount, 8> VFs; 5636 auto MaxVectorSizeMaxBW = 5637 ElementCount::getFixed(WidestRegister / SmallestType); 5638 for (ElementCount VS = MaxVectorSize * 2; 5639 ElementCount::isKnownLE(VS, MaxVectorSizeMaxBW); VS *= 2) 5640 VFs.push_back(VS); 5641 5642 // For each VF calculate its register usage. 5643 auto RUs = calculateRegisterUsage(VFs); 5644 5645 // Select the largest VF which doesn't require more registers than existing 5646 // ones. 5647 for (int i = RUs.size() - 1; i >= 0; --i) { 5648 bool Selected = true; 5649 for (auto &pair : RUs[i].MaxLocalUsers) { 5650 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5651 if (pair.second > TargetNumRegisters) 5652 Selected = false; 5653 } 5654 if (Selected) { 5655 MaxVF = VFs[i]; 5656 break; 5657 } 5658 } 5659 if (ElementCount MinVF = 5660 TTI.getMinimumVF(SmallestType, /*IsScalable=*/false)) { 5661 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5662 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5663 << ") with target's minimum: " << MinVF << '\n'); 5664 MaxVF = MinVF; 5665 } 5666 } 5667 } 5668 return MaxVF; 5669 } 5670 5671 VectorizationFactor 5672 LoopVectorizationCostModel::selectVectorizationFactor(ElementCount MaxVF) { 5673 // FIXME: This can be fixed for scalable vectors later, because at this stage 5674 // the LoopVectorizer will only consider vectorizing a loop with scalable 5675 // vectors when the loop has a hint to enable vectorization for a given VF. 5676 assert(!MaxVF.isScalable() && "scalable vectors not yet supported"); 5677 5678 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5679 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5680 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5681 5682 auto Width = ElementCount::getFixed(1); 5683 const float ScalarCost = *ExpectedCost.getValue(); 5684 float Cost = ScalarCost; 5685 5686 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5687 if (ForceVectorization && MaxVF.isVector()) { 5688 // Ignore scalar width, because the user explicitly wants vectorization. 5689 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5690 // evaluation. 5691 Cost = std::numeric_limits<float>::max(); 5692 } 5693 5694 for (auto i = ElementCount::getFixed(2); ElementCount::isKnownLE(i, MaxVF); 5695 i *= 2) { 5696 // Notice that the vector loop needs to be executed less times, so 5697 // we need to divide the cost of the vector loops by the width of 5698 // the vector elements. 5699 VectorizationCostTy C = expectedCost(i); 5700 assert(C.first.isValid() && "Unexpected invalid cost for vector loop"); 5701 float VectorCost = *C.first.getValue() / (float)i.getFixedValue(); 5702 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5703 << " costs: " << (int)VectorCost << ".\n"); 5704 if (!C.second && !ForceVectorization) { 5705 LLVM_DEBUG( 5706 dbgs() << "LV: Not considering vector loop of width " << i 5707 << " because it will not generate any vector instructions.\n"); 5708 continue; 5709 } 5710 5711 // If profitable add it to ProfitableVF list. 5712 if (VectorCost < ScalarCost) { 5713 ProfitableVFs.push_back(VectorizationFactor( 5714 {i, (unsigned)VectorCost})); 5715 } 5716 5717 if (VectorCost < Cost) { 5718 Cost = VectorCost; 5719 Width = i; 5720 } 5721 } 5722 5723 if (!EnableCondStoresVectorization && NumPredStores) { 5724 reportVectorizationFailure("There are conditional stores.", 5725 "store that is conditionally executed prevents vectorization", 5726 "ConditionalStore", ORE, TheLoop); 5727 Width = ElementCount::getFixed(1); 5728 Cost = ScalarCost; 5729 } 5730 5731 LLVM_DEBUG(if (ForceVectorization && !Width.isScalar() && Cost >= ScalarCost) dbgs() 5732 << "LV: Vectorization seems to be not beneficial, " 5733 << "but was forced by a user.\n"); 5734 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5735 VectorizationFactor Factor = {Width, 5736 (unsigned)(Width.getKnownMinValue() * Cost)}; 5737 return Factor; 5738 } 5739 5740 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5741 const Loop &L, ElementCount VF) const { 5742 // Cross iteration phis such as reductions need special handling and are 5743 // currently unsupported. 5744 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 5745 return Legal->isFirstOrderRecurrence(&Phi) || 5746 Legal->isReductionVariable(&Phi); 5747 })) 5748 return false; 5749 5750 // Phis with uses outside of the loop require special handling and are 5751 // currently unsupported. 5752 for (auto &Entry : Legal->getInductionVars()) { 5753 // Look for uses of the value of the induction at the last iteration. 5754 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5755 for (User *U : PostInc->users()) 5756 if (!L.contains(cast<Instruction>(U))) 5757 return false; 5758 // Look for uses of penultimate value of the induction. 5759 for (User *U : Entry.first->users()) 5760 if (!L.contains(cast<Instruction>(U))) 5761 return false; 5762 } 5763 5764 // Induction variables that are widened require special handling that is 5765 // currently not supported. 5766 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5767 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5768 this->isProfitableToScalarize(Entry.first, VF)); 5769 })) 5770 return false; 5771 5772 return true; 5773 } 5774 5775 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5776 const ElementCount VF) const { 5777 // FIXME: We need a much better cost-model to take different parameters such 5778 // as register pressure, code size increase and cost of extra branches into 5779 // account. For now we apply a very crude heuristic and only consider loops 5780 // with vectorization factors larger than a certain value. 5781 // We also consider epilogue vectorization unprofitable for targets that don't 5782 // consider interleaving beneficial (eg. MVE). 5783 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5784 return false; 5785 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 5786 return true; 5787 return false; 5788 } 5789 5790 VectorizationFactor 5791 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5792 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5793 VectorizationFactor Result = VectorizationFactor::Disabled(); 5794 if (!EnableEpilogueVectorization) { 5795 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5796 return Result; 5797 } 5798 5799 if (!isScalarEpilogueAllowed()) { 5800 LLVM_DEBUG( 5801 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5802 "allowed.\n";); 5803 return Result; 5804 } 5805 5806 // FIXME: This can be fixed for scalable vectors later, because at this stage 5807 // the LoopVectorizer will only consider vectorizing a loop with scalable 5808 // vectors when the loop has a hint to enable vectorization for a given VF. 5809 if (MainLoopVF.isScalable()) { 5810 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not " 5811 "yet supported.\n"); 5812 return Result; 5813 } 5814 5815 // Not really a cost consideration, but check for unsupported cases here to 5816 // simplify the logic. 5817 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5818 LLVM_DEBUG( 5819 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5820 "not a supported candidate.\n";); 5821 return Result; 5822 } 5823 5824 if (EpilogueVectorizationForceVF > 1) { 5825 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5826 if (LVP.hasPlanWithVFs( 5827 {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)})) 5828 return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0}; 5829 else { 5830 LLVM_DEBUG( 5831 dbgs() 5832 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5833 return Result; 5834 } 5835 } 5836 5837 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5838 TheLoop->getHeader()->getParent()->hasMinSize()) { 5839 LLVM_DEBUG( 5840 dbgs() 5841 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5842 return Result; 5843 } 5844 5845 if (!isEpilogueVectorizationProfitable(MainLoopVF)) 5846 return Result; 5847 5848 for (auto &NextVF : ProfitableVFs) 5849 if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) && 5850 (Result.Width.getFixedValue() == 1 || NextVF.Cost < Result.Cost) && 5851 LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width})) 5852 Result = NextVF; 5853 5854 if (Result != VectorizationFactor::Disabled()) 5855 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5856 << Result.Width.getFixedValue() << "\n";); 5857 return Result; 5858 } 5859 5860 std::pair<unsigned, unsigned> 5861 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5862 unsigned MinWidth = -1U; 5863 unsigned MaxWidth = 8; 5864 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5865 5866 // For each block. 5867 for (BasicBlock *BB : TheLoop->blocks()) { 5868 // For each instruction in the loop. 5869 for (Instruction &I : BB->instructionsWithoutDebug()) { 5870 Type *T = I.getType(); 5871 5872 // Skip ignored values. 5873 if (ValuesToIgnore.count(&I)) 5874 continue; 5875 5876 // Only examine Loads, Stores and PHINodes. 5877 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5878 continue; 5879 5880 // Examine PHI nodes that are reduction variables. Update the type to 5881 // account for the recurrence type. 5882 if (auto *PN = dyn_cast<PHINode>(&I)) { 5883 if (!Legal->isReductionVariable(PN)) 5884 continue; 5885 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN]; 5886 if (PreferInLoopReductions || 5887 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 5888 RdxDesc.getRecurrenceType(), 5889 TargetTransformInfo::ReductionFlags())) 5890 continue; 5891 T = RdxDesc.getRecurrenceType(); 5892 } 5893 5894 // Examine the stored values. 5895 if (auto *ST = dyn_cast<StoreInst>(&I)) 5896 T = ST->getValueOperand()->getType(); 5897 5898 // Ignore loaded pointer types and stored pointer types that are not 5899 // vectorizable. 5900 // 5901 // FIXME: The check here attempts to predict whether a load or store will 5902 // be vectorized. We only know this for certain after a VF has 5903 // been selected. Here, we assume that if an access can be 5904 // vectorized, it will be. We should also look at extending this 5905 // optimization to non-pointer types. 5906 // 5907 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 5908 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 5909 continue; 5910 5911 MinWidth = std::min(MinWidth, 5912 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5913 MaxWidth = std::max(MaxWidth, 5914 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5915 } 5916 } 5917 5918 return {MinWidth, MaxWidth}; 5919 } 5920 5921 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 5922 unsigned LoopCost) { 5923 // -- The interleave heuristics -- 5924 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5925 // There are many micro-architectural considerations that we can't predict 5926 // at this level. For example, frontend pressure (on decode or fetch) due to 5927 // code size, or the number and capabilities of the execution ports. 5928 // 5929 // We use the following heuristics to select the interleave count: 5930 // 1. If the code has reductions, then we interleave to break the cross 5931 // iteration dependency. 5932 // 2. If the loop is really small, then we interleave to reduce the loop 5933 // overhead. 5934 // 3. We don't interleave if we think that we will spill registers to memory 5935 // due to the increased register pressure. 5936 5937 if (!isScalarEpilogueAllowed()) 5938 return 1; 5939 5940 // We used the distance for the interleave count. 5941 if (Legal->getMaxSafeDepDistBytes() != -1U) 5942 return 1; 5943 5944 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 5945 const bool HasReductions = !Legal->getReductionVars().empty(); 5946 // Do not interleave loops with a relatively small known or estimated trip 5947 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 5948 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 5949 // because with the above conditions interleaving can expose ILP and break 5950 // cross iteration dependences for reductions. 5951 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 5952 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 5953 return 1; 5954 5955 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5956 // We divide by these constants so assume that we have at least one 5957 // instruction that uses at least one register. 5958 for (auto& pair : R.MaxLocalUsers) { 5959 pair.second = std::max(pair.second, 1U); 5960 } 5961 5962 // We calculate the interleave count using the following formula. 5963 // Subtract the number of loop invariants from the number of available 5964 // registers. These registers are used by all of the interleaved instances. 5965 // Next, divide the remaining registers by the number of registers that is 5966 // required by the loop, in order to estimate how many parallel instances 5967 // fit without causing spills. All of this is rounded down if necessary to be 5968 // a power of two. We want power of two interleave count to simplify any 5969 // addressing operations or alignment considerations. 5970 // We also want power of two interleave counts to ensure that the induction 5971 // variable of the vector loop wraps to zero, when tail is folded by masking; 5972 // this currently happens when OptForSize, in which case IC is set to 1 above. 5973 unsigned IC = UINT_MAX; 5974 5975 for (auto& pair : R.MaxLocalUsers) { 5976 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5977 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5978 << " registers of " 5979 << TTI.getRegisterClassName(pair.first) << " register class\n"); 5980 if (VF.isScalar()) { 5981 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5982 TargetNumRegisters = ForceTargetNumScalarRegs; 5983 } else { 5984 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5985 TargetNumRegisters = ForceTargetNumVectorRegs; 5986 } 5987 unsigned MaxLocalUsers = pair.second; 5988 unsigned LoopInvariantRegs = 0; 5989 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 5990 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 5991 5992 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 5993 // Don't count the induction variable as interleaved. 5994 if (EnableIndVarRegisterHeur) { 5995 TmpIC = 5996 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 5997 std::max(1U, (MaxLocalUsers - 1))); 5998 } 5999 6000 IC = std::min(IC, TmpIC); 6001 } 6002 6003 // Clamp the interleave ranges to reasonable counts. 6004 unsigned MaxInterleaveCount = 6005 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6006 6007 // Check if the user has overridden the max. 6008 if (VF.isScalar()) { 6009 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6010 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6011 } else { 6012 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6013 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6014 } 6015 6016 // If trip count is known or estimated compile time constant, limit the 6017 // interleave count to be less than the trip count divided by VF, provided it 6018 // is at least 1. 6019 // 6020 // For scalable vectors we can't know if interleaving is beneficial. It may 6021 // not be beneficial for small loops if none of the lanes in the second vector 6022 // iterations is enabled. However, for larger loops, there is likely to be a 6023 // similar benefit as for fixed-width vectors. For now, we choose to leave 6024 // the InterleaveCount as if vscale is '1', although if some information about 6025 // the vector is known (e.g. min vector size), we can make a better decision. 6026 if (BestKnownTC) { 6027 MaxInterleaveCount = 6028 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6029 // Make sure MaxInterleaveCount is greater than 0. 6030 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6031 } 6032 6033 assert(MaxInterleaveCount > 0 && 6034 "Maximum interleave count must be greater than 0"); 6035 6036 // Clamp the calculated IC to be between the 1 and the max interleave count 6037 // that the target and trip count allows. 6038 if (IC > MaxInterleaveCount) 6039 IC = MaxInterleaveCount; 6040 else 6041 // Make sure IC is greater than 0. 6042 IC = std::max(1u, IC); 6043 6044 assert(IC > 0 && "Interleave count must be greater than 0."); 6045 6046 // If we did not calculate the cost for VF (because the user selected the VF) 6047 // then we calculate the cost of VF here. 6048 if (LoopCost == 0) { 6049 assert(expectedCost(VF).first.isValid() && "Expected a valid cost"); 6050 LoopCost = *expectedCost(VF).first.getValue(); 6051 } 6052 6053 assert(LoopCost && "Non-zero loop cost expected"); 6054 6055 // Interleave if we vectorized this loop and there is a reduction that could 6056 // benefit from interleaving. 6057 if (VF.isVector() && HasReductions) { 6058 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6059 return IC; 6060 } 6061 6062 // Note that if we've already vectorized the loop we will have done the 6063 // runtime check and so interleaving won't require further checks. 6064 bool InterleavingRequiresRuntimePointerCheck = 6065 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6066 6067 // We want to interleave small loops in order to reduce the loop overhead and 6068 // potentially expose ILP opportunities. 6069 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6070 << "LV: IC is " << IC << '\n' 6071 << "LV: VF is " << VF << '\n'); 6072 const bool AggressivelyInterleaveReductions = 6073 TTI.enableAggressiveInterleaving(HasReductions); 6074 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6075 // We assume that the cost overhead is 1 and we use the cost model 6076 // to estimate the cost of the loop and interleave until the cost of the 6077 // loop overhead is about 5% of the cost of the loop. 6078 unsigned SmallIC = 6079 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6080 6081 // Interleave until store/load ports (estimated by max interleave count) are 6082 // saturated. 6083 unsigned NumStores = Legal->getNumStores(); 6084 unsigned NumLoads = Legal->getNumLoads(); 6085 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6086 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6087 6088 // If we have a scalar reduction (vector reductions are already dealt with 6089 // by this point), we can increase the critical path length if the loop 6090 // we're interleaving is inside another loop. Limit, by default to 2, so the 6091 // critical path only gets increased by one reduction operation. 6092 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6093 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6094 SmallIC = std::min(SmallIC, F); 6095 StoresIC = std::min(StoresIC, F); 6096 LoadsIC = std::min(LoadsIC, F); 6097 } 6098 6099 if (EnableLoadStoreRuntimeInterleave && 6100 std::max(StoresIC, LoadsIC) > SmallIC) { 6101 LLVM_DEBUG( 6102 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6103 return std::max(StoresIC, LoadsIC); 6104 } 6105 6106 // If there are scalar reductions and TTI has enabled aggressive 6107 // interleaving for reductions, we will interleave to expose ILP. 6108 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6109 AggressivelyInterleaveReductions) { 6110 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6111 // Interleave no less than SmallIC but not as aggressive as the normal IC 6112 // to satisfy the rare situation when resources are too limited. 6113 return std::max(IC / 2, SmallIC); 6114 } else { 6115 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6116 return SmallIC; 6117 } 6118 } 6119 6120 // Interleave if this is a large loop (small loops are already dealt with by 6121 // this point) that could benefit from interleaving. 6122 if (AggressivelyInterleaveReductions) { 6123 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6124 return IC; 6125 } 6126 6127 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6128 return 1; 6129 } 6130 6131 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6132 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6133 // This function calculates the register usage by measuring the highest number 6134 // of values that are alive at a single location. Obviously, this is a very 6135 // rough estimation. We scan the loop in a topological order in order and 6136 // assign a number to each instruction. We use RPO to ensure that defs are 6137 // met before their users. We assume that each instruction that has in-loop 6138 // users starts an interval. We record every time that an in-loop value is 6139 // used, so we have a list of the first and last occurrences of each 6140 // instruction. Next, we transpose this data structure into a multi map that 6141 // holds the list of intervals that *end* at a specific location. This multi 6142 // map allows us to perform a linear search. We scan the instructions linearly 6143 // and record each time that a new interval starts, by placing it in a set. 6144 // If we find this value in the multi-map then we remove it from the set. 6145 // The max register usage is the maximum size of the set. 6146 // We also search for instructions that are defined outside the loop, but are 6147 // used inside the loop. We need this number separately from the max-interval 6148 // usage number because when we unroll, loop-invariant values do not take 6149 // more register. 6150 LoopBlocksDFS DFS(TheLoop); 6151 DFS.perform(LI); 6152 6153 RegisterUsage RU; 6154 6155 // Each 'key' in the map opens a new interval. The values 6156 // of the map are the index of the 'last seen' usage of the 6157 // instruction that is the key. 6158 using IntervalMap = DenseMap<Instruction *, unsigned>; 6159 6160 // Maps instruction to its index. 6161 SmallVector<Instruction *, 64> IdxToInstr; 6162 // Marks the end of each interval. 6163 IntervalMap EndPoint; 6164 // Saves the list of instruction indices that are used in the loop. 6165 SmallPtrSet<Instruction *, 8> Ends; 6166 // Saves the list of values that are used in the loop but are 6167 // defined outside the loop, such as arguments and constants. 6168 SmallPtrSet<Value *, 8> LoopInvariants; 6169 6170 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6171 for (Instruction &I : BB->instructionsWithoutDebug()) { 6172 IdxToInstr.push_back(&I); 6173 6174 // Save the end location of each USE. 6175 for (Value *U : I.operands()) { 6176 auto *Instr = dyn_cast<Instruction>(U); 6177 6178 // Ignore non-instruction values such as arguments, constants, etc. 6179 if (!Instr) 6180 continue; 6181 6182 // If this instruction is outside the loop then record it and continue. 6183 if (!TheLoop->contains(Instr)) { 6184 LoopInvariants.insert(Instr); 6185 continue; 6186 } 6187 6188 // Overwrite previous end points. 6189 EndPoint[Instr] = IdxToInstr.size(); 6190 Ends.insert(Instr); 6191 } 6192 } 6193 } 6194 6195 // Saves the list of intervals that end with the index in 'key'. 6196 using InstrList = SmallVector<Instruction *, 2>; 6197 DenseMap<unsigned, InstrList> TransposeEnds; 6198 6199 // Transpose the EndPoints to a list of values that end at each index. 6200 for (auto &Interval : EndPoint) 6201 TransposeEnds[Interval.second].push_back(Interval.first); 6202 6203 SmallPtrSet<Instruction *, 8> OpenIntervals; 6204 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6205 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6206 6207 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6208 6209 // A lambda that gets the register usage for the given type and VF. 6210 const auto &TTICapture = TTI; 6211 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) { 6212 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6213 return 0U; 6214 return TTICapture.getRegUsageForType(VectorType::get(Ty, VF)); 6215 }; 6216 6217 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6218 Instruction *I = IdxToInstr[i]; 6219 6220 // Remove all of the instructions that end at this location. 6221 InstrList &List = TransposeEnds[i]; 6222 for (Instruction *ToRemove : List) 6223 OpenIntervals.erase(ToRemove); 6224 6225 // Ignore instructions that are never used within the loop. 6226 if (!Ends.count(I)) 6227 continue; 6228 6229 // Skip ignored values. 6230 if (ValuesToIgnore.count(I)) 6231 continue; 6232 6233 // For each VF find the maximum usage of registers. 6234 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6235 // Count the number of live intervals. 6236 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6237 6238 if (VFs[j].isScalar()) { 6239 for (auto Inst : OpenIntervals) { 6240 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6241 if (RegUsage.find(ClassID) == RegUsage.end()) 6242 RegUsage[ClassID] = 1; 6243 else 6244 RegUsage[ClassID] += 1; 6245 } 6246 } else { 6247 collectUniformsAndScalars(VFs[j]); 6248 for (auto Inst : OpenIntervals) { 6249 // Skip ignored values for VF > 1. 6250 if (VecValuesToIgnore.count(Inst)) 6251 continue; 6252 if (isScalarAfterVectorization(Inst, VFs[j])) { 6253 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6254 if (RegUsage.find(ClassID) == RegUsage.end()) 6255 RegUsage[ClassID] = 1; 6256 else 6257 RegUsage[ClassID] += 1; 6258 } else { 6259 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6260 if (RegUsage.find(ClassID) == RegUsage.end()) 6261 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6262 else 6263 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6264 } 6265 } 6266 } 6267 6268 for (auto& pair : RegUsage) { 6269 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6270 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6271 else 6272 MaxUsages[j][pair.first] = pair.second; 6273 } 6274 } 6275 6276 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6277 << OpenIntervals.size() << '\n'); 6278 6279 // Add the current instruction to the list of open intervals. 6280 OpenIntervals.insert(I); 6281 } 6282 6283 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6284 SmallMapVector<unsigned, unsigned, 4> Invariant; 6285 6286 for (auto Inst : LoopInvariants) { 6287 unsigned Usage = 6288 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6289 unsigned ClassID = 6290 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6291 if (Invariant.find(ClassID) == Invariant.end()) 6292 Invariant[ClassID] = Usage; 6293 else 6294 Invariant[ClassID] += Usage; 6295 } 6296 6297 LLVM_DEBUG({ 6298 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6299 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6300 << " item\n"; 6301 for (const auto &pair : MaxUsages[i]) { 6302 dbgs() << "LV(REG): RegisterClass: " 6303 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6304 << " registers\n"; 6305 } 6306 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6307 << " item\n"; 6308 for (const auto &pair : Invariant) { 6309 dbgs() << "LV(REG): RegisterClass: " 6310 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6311 << " registers\n"; 6312 } 6313 }); 6314 6315 RU.LoopInvariantRegs = Invariant; 6316 RU.MaxLocalUsers = MaxUsages[i]; 6317 RUs[i] = RU; 6318 } 6319 6320 return RUs; 6321 } 6322 6323 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6324 // TODO: Cost model for emulated masked load/store is completely 6325 // broken. This hack guides the cost model to use an artificially 6326 // high enough value to practically disable vectorization with such 6327 // operations, except where previously deployed legality hack allowed 6328 // using very low cost values. This is to avoid regressions coming simply 6329 // from moving "masked load/store" check from legality to cost model. 6330 // Masked Load/Gather emulation was previously never allowed. 6331 // Limited number of Masked Store/Scatter emulation was allowed. 6332 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 6333 return isa<LoadInst>(I) || 6334 (isa<StoreInst>(I) && 6335 NumPredStores > NumberOfStoresToPredicate); 6336 } 6337 6338 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6339 // If we aren't vectorizing the loop, or if we've already collected the 6340 // instructions to scalarize, there's nothing to do. Collection may already 6341 // have occurred if we have a user-selected VF and are now computing the 6342 // expected cost for interleaving. 6343 if (VF.isScalar() || VF.isZero() || 6344 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6345 return; 6346 6347 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6348 // not profitable to scalarize any instructions, the presence of VF in the 6349 // map will indicate that we've analyzed it already. 6350 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6351 6352 // Find all the instructions that are scalar with predication in the loop and 6353 // determine if it would be better to not if-convert the blocks they are in. 6354 // If so, we also record the instructions to scalarize. 6355 for (BasicBlock *BB : TheLoop->blocks()) { 6356 if (!blockNeedsPredication(BB)) 6357 continue; 6358 for (Instruction &I : *BB) 6359 if (isScalarWithPredication(&I)) { 6360 ScalarCostsTy ScalarCosts; 6361 // Do not apply discount logic if hacked cost is needed 6362 // for emulated masked memrefs. 6363 if (!useEmulatedMaskMemRefHack(&I) && 6364 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6365 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6366 // Remember that BB will remain after vectorization. 6367 PredicatedBBsAfterVectorization.insert(BB); 6368 } 6369 } 6370 } 6371 6372 int LoopVectorizationCostModel::computePredInstDiscount( 6373 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6374 assert(!isUniformAfterVectorization(PredInst, VF) && 6375 "Instruction marked uniform-after-vectorization will be predicated"); 6376 6377 // Initialize the discount to zero, meaning that the scalar version and the 6378 // vector version cost the same. 6379 InstructionCost Discount = 0; 6380 6381 // Holds instructions to analyze. The instructions we visit are mapped in 6382 // ScalarCosts. Those instructions are the ones that would be scalarized if 6383 // we find that the scalar version costs less. 6384 SmallVector<Instruction *, 8> Worklist; 6385 6386 // Returns true if the given instruction can be scalarized. 6387 auto canBeScalarized = [&](Instruction *I) -> bool { 6388 // We only attempt to scalarize instructions forming a single-use chain 6389 // from the original predicated block that would otherwise be vectorized. 6390 // Although not strictly necessary, we give up on instructions we know will 6391 // already be scalar to avoid traversing chains that are unlikely to be 6392 // beneficial. 6393 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6394 isScalarAfterVectorization(I, VF)) 6395 return false; 6396 6397 // If the instruction is scalar with predication, it will be analyzed 6398 // separately. We ignore it within the context of PredInst. 6399 if (isScalarWithPredication(I)) 6400 return false; 6401 6402 // If any of the instruction's operands are uniform after vectorization, 6403 // the instruction cannot be scalarized. This prevents, for example, a 6404 // masked load from being scalarized. 6405 // 6406 // We assume we will only emit a value for lane zero of an instruction 6407 // marked uniform after vectorization, rather than VF identical values. 6408 // Thus, if we scalarize an instruction that uses a uniform, we would 6409 // create uses of values corresponding to the lanes we aren't emitting code 6410 // for. This behavior can be changed by allowing getScalarValue to clone 6411 // the lane zero values for uniforms rather than asserting. 6412 for (Use &U : I->operands()) 6413 if (auto *J = dyn_cast<Instruction>(U.get())) 6414 if (isUniformAfterVectorization(J, VF)) 6415 return false; 6416 6417 // Otherwise, we can scalarize the instruction. 6418 return true; 6419 }; 6420 6421 // Compute the expected cost discount from scalarizing the entire expression 6422 // feeding the predicated instruction. We currently only consider expressions 6423 // that are single-use instruction chains. 6424 Worklist.push_back(PredInst); 6425 while (!Worklist.empty()) { 6426 Instruction *I = Worklist.pop_back_val(); 6427 6428 // If we've already analyzed the instruction, there's nothing to do. 6429 if (ScalarCosts.find(I) != ScalarCosts.end()) 6430 continue; 6431 6432 // Compute the cost of the vector instruction. Note that this cost already 6433 // includes the scalarization overhead of the predicated instruction. 6434 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6435 6436 // Compute the cost of the scalarized instruction. This cost is the cost of 6437 // the instruction as if it wasn't if-converted and instead remained in the 6438 // predicated block. We will scale this cost by block probability after 6439 // computing the scalarization overhead. 6440 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6441 InstructionCost ScalarCost = 6442 VF.getKnownMinValue() * 6443 getInstructionCost(I, ElementCount::getFixed(1)).first; 6444 6445 // Compute the scalarization overhead of needed insertelement instructions 6446 // and phi nodes. 6447 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6448 ScalarCost += TTI.getScalarizationOverhead( 6449 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6450 APInt::getAllOnesValue(VF.getKnownMinValue()), true, false); 6451 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6452 ScalarCost += 6453 VF.getKnownMinValue() * 6454 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6455 } 6456 6457 // Compute the scalarization overhead of needed extractelement 6458 // instructions. For each of the instruction's operands, if the operand can 6459 // be scalarized, add it to the worklist; otherwise, account for the 6460 // overhead. 6461 for (Use &U : I->operands()) 6462 if (auto *J = dyn_cast<Instruction>(U.get())) { 6463 assert(VectorType::isValidElementType(J->getType()) && 6464 "Instruction has non-scalar type"); 6465 if (canBeScalarized(J)) 6466 Worklist.push_back(J); 6467 else if (needsExtract(J, VF)) { 6468 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6469 ScalarCost += TTI.getScalarizationOverhead( 6470 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6471 APInt::getAllOnesValue(VF.getKnownMinValue()), false, true); 6472 } 6473 } 6474 6475 // Scale the total scalar cost by block probability. 6476 ScalarCost /= getReciprocalPredBlockProb(); 6477 6478 // Compute the discount. A non-negative discount means the vector version 6479 // of the instruction costs more, and scalarizing would be beneficial. 6480 Discount += VectorCost - ScalarCost; 6481 ScalarCosts[I] = ScalarCost; 6482 } 6483 6484 return *Discount.getValue(); 6485 } 6486 6487 LoopVectorizationCostModel::VectorizationCostTy 6488 LoopVectorizationCostModel::expectedCost(ElementCount VF) { 6489 VectorizationCostTy Cost; 6490 6491 // For each block. 6492 for (BasicBlock *BB : TheLoop->blocks()) { 6493 VectorizationCostTy BlockCost; 6494 6495 // For each instruction in the old loop. 6496 for (Instruction &I : BB->instructionsWithoutDebug()) { 6497 // Skip ignored values. 6498 if (ValuesToIgnore.count(&I) || 6499 (VF.isVector() && VecValuesToIgnore.count(&I))) 6500 continue; 6501 6502 VectorizationCostTy C = getInstructionCost(&I, VF); 6503 6504 // Check if we should override the cost. 6505 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6506 C.first = InstructionCost(ForceTargetInstructionCost); 6507 6508 BlockCost.first += C.first; 6509 BlockCost.second |= C.second; 6510 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6511 << " for VF " << VF << " For instruction: " << I 6512 << '\n'); 6513 } 6514 6515 // If we are vectorizing a predicated block, it will have been 6516 // if-converted. This means that the block's instructions (aside from 6517 // stores and instructions that may divide by zero) will now be 6518 // unconditionally executed. For the scalar case, we may not always execute 6519 // the predicated block, if it is an if-else block. Thus, scale the block's 6520 // cost by the probability of executing it. blockNeedsPredication from 6521 // Legal is used so as to not include all blocks in tail folded loops. 6522 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6523 BlockCost.first /= getReciprocalPredBlockProb(); 6524 6525 Cost.first += BlockCost.first; 6526 Cost.second |= BlockCost.second; 6527 } 6528 6529 return Cost; 6530 } 6531 6532 /// Gets Address Access SCEV after verifying that the access pattern 6533 /// is loop invariant except the induction variable dependence. 6534 /// 6535 /// This SCEV can be sent to the Target in order to estimate the address 6536 /// calculation cost. 6537 static const SCEV *getAddressAccessSCEV( 6538 Value *Ptr, 6539 LoopVectorizationLegality *Legal, 6540 PredicatedScalarEvolution &PSE, 6541 const Loop *TheLoop) { 6542 6543 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6544 if (!Gep) 6545 return nullptr; 6546 6547 // We are looking for a gep with all loop invariant indices except for one 6548 // which should be an induction variable. 6549 auto SE = PSE.getSE(); 6550 unsigned NumOperands = Gep->getNumOperands(); 6551 for (unsigned i = 1; i < NumOperands; ++i) { 6552 Value *Opd = Gep->getOperand(i); 6553 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6554 !Legal->isInductionVariable(Opd)) 6555 return nullptr; 6556 } 6557 6558 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6559 return PSE.getSCEV(Ptr); 6560 } 6561 6562 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6563 return Legal->hasStride(I->getOperand(0)) || 6564 Legal->hasStride(I->getOperand(1)); 6565 } 6566 6567 InstructionCost 6568 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6569 ElementCount VF) { 6570 assert(VF.isVector() && 6571 "Scalarization cost of instruction implies vectorization."); 6572 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6573 Type *ValTy = getMemInstValueType(I); 6574 auto SE = PSE.getSE(); 6575 6576 unsigned AS = getLoadStoreAddressSpace(I); 6577 Value *Ptr = getLoadStorePointerOperand(I); 6578 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6579 6580 // Figure out whether the access is strided and get the stride value 6581 // if it's known in compile time 6582 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6583 6584 // Get the cost of the scalar memory instruction and address computation. 6585 InstructionCost Cost = 6586 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6587 6588 // Don't pass *I here, since it is scalar but will actually be part of a 6589 // vectorized loop where the user of it is a vectorized instruction. 6590 const Align Alignment = getLoadStoreAlignment(I); 6591 Cost += VF.getKnownMinValue() * 6592 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6593 AS, TTI::TCK_RecipThroughput); 6594 6595 // Get the overhead of the extractelement and insertelement instructions 6596 // we might create due to scalarization. 6597 Cost += getScalarizationOverhead(I, VF); 6598 6599 // If we have a predicated store, it may not be executed for each vector 6600 // lane. Scale the cost by the probability of executing the predicated 6601 // block. 6602 if (isPredicatedInst(I)) { 6603 Cost /= getReciprocalPredBlockProb(); 6604 6605 if (useEmulatedMaskMemRefHack(I)) 6606 // Artificially setting to a high enough value to practically disable 6607 // vectorization with such operations. 6608 Cost = 3000000; 6609 } 6610 6611 return Cost; 6612 } 6613 6614 InstructionCost 6615 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6616 ElementCount VF) { 6617 Type *ValTy = getMemInstValueType(I); 6618 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6619 Value *Ptr = getLoadStorePointerOperand(I); 6620 unsigned AS = getLoadStoreAddressSpace(I); 6621 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6622 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6623 6624 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6625 "Stride should be 1 or -1 for consecutive memory access"); 6626 const Align Alignment = getLoadStoreAlignment(I); 6627 InstructionCost Cost = 0; 6628 if (Legal->isMaskRequired(I)) 6629 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6630 CostKind); 6631 else 6632 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6633 CostKind, I); 6634 6635 bool Reverse = ConsecutiveStride < 0; 6636 if (Reverse) 6637 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6638 return Cost; 6639 } 6640 6641 InstructionCost 6642 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6643 ElementCount VF) { 6644 assert(Legal->isUniformMemOp(*I)); 6645 6646 Type *ValTy = getMemInstValueType(I); 6647 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6648 const Align Alignment = getLoadStoreAlignment(I); 6649 unsigned AS = getLoadStoreAddressSpace(I); 6650 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6651 if (isa<LoadInst>(I)) { 6652 return TTI.getAddressComputationCost(ValTy) + 6653 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6654 CostKind) + 6655 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6656 } 6657 StoreInst *SI = cast<StoreInst>(I); 6658 6659 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6660 return TTI.getAddressComputationCost(ValTy) + 6661 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6662 CostKind) + 6663 (isLoopInvariantStoreValue 6664 ? 0 6665 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6666 VF.getKnownMinValue() - 1)); 6667 } 6668 6669 InstructionCost 6670 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6671 ElementCount VF) { 6672 Type *ValTy = getMemInstValueType(I); 6673 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6674 const Align Alignment = getLoadStoreAlignment(I); 6675 const Value *Ptr = getLoadStorePointerOperand(I); 6676 6677 return TTI.getAddressComputationCost(VectorTy) + 6678 TTI.getGatherScatterOpCost( 6679 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6680 TargetTransformInfo::TCK_RecipThroughput, I); 6681 } 6682 6683 InstructionCost 6684 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6685 ElementCount VF) { 6686 // TODO: Once we have support for interleaving with scalable vectors 6687 // we can calculate the cost properly here. 6688 if (VF.isScalable()) 6689 return InstructionCost::getInvalid(); 6690 6691 Type *ValTy = getMemInstValueType(I); 6692 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6693 unsigned AS = getLoadStoreAddressSpace(I); 6694 6695 auto Group = getInterleavedAccessGroup(I); 6696 assert(Group && "Fail to get an interleaved access group."); 6697 6698 unsigned InterleaveFactor = Group->getFactor(); 6699 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6700 6701 // Holds the indices of existing members in an interleaved load group. 6702 // An interleaved store group doesn't need this as it doesn't allow gaps. 6703 SmallVector<unsigned, 4> Indices; 6704 if (isa<LoadInst>(I)) { 6705 for (unsigned i = 0; i < InterleaveFactor; i++) 6706 if (Group->getMember(i)) 6707 Indices.push_back(i); 6708 } 6709 6710 // Calculate the cost of the whole interleaved group. 6711 bool UseMaskForGaps = 6712 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 6713 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6714 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6715 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6716 6717 if (Group->isReverse()) { 6718 // TODO: Add support for reversed masked interleaved access. 6719 assert(!Legal->isMaskRequired(I) && 6720 "Reverse masked interleaved access not supported."); 6721 Cost += Group->getNumMembers() * 6722 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6723 } 6724 return Cost; 6725 } 6726 6727 InstructionCost LoopVectorizationCostModel::getReductionPatternCost( 6728 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6729 // Early exit for no inloop reductions 6730 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6731 return InstructionCost::getInvalid(); 6732 auto *VectorTy = cast<VectorType>(Ty); 6733 6734 // We are looking for a pattern of, and finding the minimal acceptable cost: 6735 // reduce(mul(ext(A), ext(B))) or 6736 // reduce(mul(A, B)) or 6737 // reduce(ext(A)) or 6738 // reduce(A). 6739 // The basic idea is that we walk down the tree to do that, finding the root 6740 // reduction instruction in InLoopReductionImmediateChains. From there we find 6741 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6742 // of the components. If the reduction cost is lower then we return it for the 6743 // reduction instruction and 0 for the other instructions in the pattern. If 6744 // it is not we return an invalid cost specifying the orignal cost method 6745 // should be used. 6746 Instruction *RetI = I; 6747 if ((RetI->getOpcode() == Instruction::SExt || 6748 RetI->getOpcode() == Instruction::ZExt)) { 6749 if (!RetI->hasOneUser()) 6750 return InstructionCost::getInvalid(); 6751 RetI = RetI->user_back(); 6752 } 6753 if (RetI->getOpcode() == Instruction::Mul && 6754 RetI->user_back()->getOpcode() == Instruction::Add) { 6755 if (!RetI->hasOneUser()) 6756 return InstructionCost::getInvalid(); 6757 RetI = RetI->user_back(); 6758 } 6759 6760 // Test if the found instruction is a reduction, and if not return an invalid 6761 // cost specifying the parent to use the original cost modelling. 6762 if (!InLoopReductionImmediateChains.count(RetI)) 6763 return InstructionCost::getInvalid(); 6764 6765 // Find the reduction this chain is a part of and calculate the basic cost of 6766 // the reduction on its own. 6767 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6768 Instruction *ReductionPhi = LastChain; 6769 while (!isa<PHINode>(ReductionPhi)) 6770 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6771 6772 RecurrenceDescriptor RdxDesc = 6773 Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; 6774 unsigned BaseCost = TTI.getArithmeticReductionCost(RdxDesc.getOpcode(), 6775 VectorTy, false, CostKind); 6776 6777 // Get the operand that was not the reduction chain and match it to one of the 6778 // patterns, returning the better cost if it is found. 6779 Instruction *RedOp = RetI->getOperand(1) == LastChain 6780 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6781 : dyn_cast<Instruction>(RetI->getOperand(1)); 6782 6783 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6784 6785 if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) && 6786 !TheLoop->isLoopInvariant(RedOp)) { 6787 bool IsUnsigned = isa<ZExtInst>(RedOp); 6788 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6789 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6790 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6791 CostKind); 6792 6793 unsigned ExtCost = 6794 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6795 TTI::CastContextHint::None, CostKind, RedOp); 6796 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6797 return I == RetI ? *RedCost.getValue() : 0; 6798 } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) { 6799 Instruction *Mul = RedOp; 6800 Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0)); 6801 Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1)); 6802 if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) && 6803 Op0->getOpcode() == Op1->getOpcode() && 6804 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6805 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 6806 bool IsUnsigned = isa<ZExtInst>(Op0); 6807 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6808 // reduce(mul(ext, ext)) 6809 unsigned ExtCost = 6810 TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, 6811 TTI::CastContextHint::None, CostKind, Op0); 6812 unsigned MulCost = 6813 TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); 6814 6815 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6816 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6817 CostKind); 6818 6819 if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) 6820 return I == RetI ? *RedCost.getValue() : 0; 6821 } else { 6822 unsigned MulCost = 6823 TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); 6824 6825 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6826 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 6827 CostKind); 6828 6829 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 6830 return I == RetI ? *RedCost.getValue() : 0; 6831 } 6832 } 6833 6834 return I == RetI ? BaseCost : InstructionCost::getInvalid(); 6835 } 6836 6837 InstructionCost 6838 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6839 ElementCount VF) { 6840 // Calculate scalar cost only. Vectorization cost should be ready at this 6841 // moment. 6842 if (VF.isScalar()) { 6843 Type *ValTy = getMemInstValueType(I); 6844 const Align Alignment = getLoadStoreAlignment(I); 6845 unsigned AS = getLoadStoreAddressSpace(I); 6846 6847 return TTI.getAddressComputationCost(ValTy) + 6848 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 6849 TTI::TCK_RecipThroughput, I); 6850 } 6851 return getWideningCost(I, VF); 6852 } 6853 6854 LoopVectorizationCostModel::VectorizationCostTy 6855 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6856 ElementCount VF) { 6857 // If we know that this instruction will remain uniform, check the cost of 6858 // the scalar version. 6859 if (isUniformAfterVectorization(I, VF)) 6860 VF = ElementCount::getFixed(1); 6861 6862 if (VF.isVector() && isProfitableToScalarize(I, VF)) 6863 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6864 6865 // Forced scalars do not have any scalarization overhead. 6866 auto ForcedScalar = ForcedScalars.find(VF); 6867 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 6868 auto InstSet = ForcedScalar->second; 6869 if (InstSet.count(I)) 6870 return VectorizationCostTy( 6871 (getInstructionCost(I, ElementCount::getFixed(1)).first * 6872 VF.getKnownMinValue()), 6873 false); 6874 } 6875 6876 Type *VectorTy; 6877 InstructionCost C = getInstructionCost(I, VF, VectorTy); 6878 6879 bool TypeNotScalarized = 6880 VF.isVector() && VectorTy->isVectorTy() && 6881 TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue(); 6882 return VectorizationCostTy(C, TypeNotScalarized); 6883 } 6884 6885 InstructionCost 6886 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6887 ElementCount VF) { 6888 6889 if (VF.isScalable()) 6890 return InstructionCost::getInvalid(); 6891 6892 if (VF.isScalar()) 6893 return 0; 6894 6895 InstructionCost Cost = 0; 6896 Type *RetTy = ToVectorTy(I->getType(), VF); 6897 if (!RetTy->isVoidTy() && 6898 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 6899 Cost += TTI.getScalarizationOverhead( 6900 cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()), 6901 true, false); 6902 6903 // Some targets keep addresses scalar. 6904 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 6905 return Cost; 6906 6907 // Some targets support efficient element stores. 6908 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 6909 return Cost; 6910 6911 // Collect operands to consider. 6912 CallInst *CI = dyn_cast<CallInst>(I); 6913 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 6914 6915 // Skip operands that do not require extraction/scalarization and do not incur 6916 // any overhead. 6917 SmallVector<Type *> Tys; 6918 for (auto *V : filterExtractingOperands(Ops, VF)) 6919 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 6920 return Cost + TTI.getOperandsScalarizationOverhead( 6921 filterExtractingOperands(Ops, VF), Tys); 6922 } 6923 6924 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 6925 if (VF.isScalar()) 6926 return; 6927 NumPredStores = 0; 6928 for (BasicBlock *BB : TheLoop->blocks()) { 6929 // For each instruction in the old loop. 6930 for (Instruction &I : *BB) { 6931 Value *Ptr = getLoadStorePointerOperand(&I); 6932 if (!Ptr) 6933 continue; 6934 6935 // TODO: We should generate better code and update the cost model for 6936 // predicated uniform stores. Today they are treated as any other 6937 // predicated store (see added test cases in 6938 // invariant-store-vectorization.ll). 6939 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 6940 NumPredStores++; 6941 6942 if (Legal->isUniformMemOp(I)) { 6943 // TODO: Avoid replicating loads and stores instead of 6944 // relying on instcombine to remove them. 6945 // Load: Scalar load + broadcast 6946 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 6947 InstructionCost Cost = getUniformMemOpCost(&I, VF); 6948 setWideningDecision(&I, VF, CM_Scalarize, Cost); 6949 continue; 6950 } 6951 6952 // We assume that widening is the best solution when possible. 6953 if (memoryInstructionCanBeWidened(&I, VF)) { 6954 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 6955 int ConsecutiveStride = 6956 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 6957 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6958 "Expected consecutive stride."); 6959 InstWidening Decision = 6960 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 6961 setWideningDecision(&I, VF, Decision, Cost); 6962 continue; 6963 } 6964 6965 // Choose between Interleaving, Gather/Scatter or Scalarization. 6966 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 6967 unsigned NumAccesses = 1; 6968 if (isAccessInterleaved(&I)) { 6969 auto Group = getInterleavedAccessGroup(&I); 6970 assert(Group && "Fail to get an interleaved access group."); 6971 6972 // Make one decision for the whole group. 6973 if (getWideningDecision(&I, VF) != CM_Unknown) 6974 continue; 6975 6976 NumAccesses = Group->getNumMembers(); 6977 if (interleavedAccessCanBeWidened(&I, VF)) 6978 InterleaveCost = getInterleaveGroupCost(&I, VF); 6979 } 6980 6981 InstructionCost GatherScatterCost = 6982 isLegalGatherOrScatter(&I) 6983 ? getGatherScatterCost(&I, VF) * NumAccesses 6984 : InstructionCost::getInvalid(); 6985 6986 InstructionCost ScalarizationCost = 6987 !VF.isScalable() ? getMemInstScalarizationCost(&I, VF) * NumAccesses 6988 : InstructionCost::getInvalid(); 6989 6990 // Choose better solution for the current VF, 6991 // write down this decision and use it during vectorization. 6992 InstructionCost Cost; 6993 InstWidening Decision; 6994 if (InterleaveCost <= GatherScatterCost && 6995 InterleaveCost < ScalarizationCost) { 6996 Decision = CM_Interleave; 6997 Cost = InterleaveCost; 6998 } else if (GatherScatterCost < ScalarizationCost) { 6999 Decision = CM_GatherScatter; 7000 Cost = GatherScatterCost; 7001 } else { 7002 assert(!VF.isScalable() && 7003 "We cannot yet scalarise for scalable vectors"); 7004 Decision = CM_Scalarize; 7005 Cost = ScalarizationCost; 7006 } 7007 // If the instructions belongs to an interleave group, the whole group 7008 // receives the same decision. The whole group receives the cost, but 7009 // the cost will actually be assigned to one instruction. 7010 if (auto Group = getInterleavedAccessGroup(&I)) 7011 setWideningDecision(Group, VF, Decision, Cost); 7012 else 7013 setWideningDecision(&I, VF, Decision, Cost); 7014 } 7015 } 7016 7017 // Make sure that any load of address and any other address computation 7018 // remains scalar unless there is gather/scatter support. This avoids 7019 // inevitable extracts into address registers, and also has the benefit of 7020 // activating LSR more, since that pass can't optimize vectorized 7021 // addresses. 7022 if (TTI.prefersVectorizedAddressing()) 7023 return; 7024 7025 // Start with all scalar pointer uses. 7026 SmallPtrSet<Instruction *, 8> AddrDefs; 7027 for (BasicBlock *BB : TheLoop->blocks()) 7028 for (Instruction &I : *BB) { 7029 Instruction *PtrDef = 7030 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7031 if (PtrDef && TheLoop->contains(PtrDef) && 7032 getWideningDecision(&I, VF) != CM_GatherScatter) 7033 AddrDefs.insert(PtrDef); 7034 } 7035 7036 // Add all instructions used to generate the addresses. 7037 SmallVector<Instruction *, 4> Worklist; 7038 append_range(Worklist, AddrDefs); 7039 while (!Worklist.empty()) { 7040 Instruction *I = Worklist.pop_back_val(); 7041 for (auto &Op : I->operands()) 7042 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7043 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7044 AddrDefs.insert(InstOp).second) 7045 Worklist.push_back(InstOp); 7046 } 7047 7048 for (auto *I : AddrDefs) { 7049 if (isa<LoadInst>(I)) { 7050 // Setting the desired widening decision should ideally be handled in 7051 // by cost functions, but since this involves the task of finding out 7052 // if the loaded register is involved in an address computation, it is 7053 // instead changed here when we know this is the case. 7054 InstWidening Decision = getWideningDecision(I, VF); 7055 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7056 // Scalarize a widened load of address. 7057 setWideningDecision( 7058 I, VF, CM_Scalarize, 7059 (VF.getKnownMinValue() * 7060 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7061 else if (auto Group = getInterleavedAccessGroup(I)) { 7062 // Scalarize an interleave group of address loads. 7063 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7064 if (Instruction *Member = Group->getMember(I)) 7065 setWideningDecision( 7066 Member, VF, CM_Scalarize, 7067 (VF.getKnownMinValue() * 7068 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7069 } 7070 } 7071 } else 7072 // Make sure I gets scalarized and a cost estimate without 7073 // scalarization overhead. 7074 ForcedScalars[VF].insert(I); 7075 } 7076 } 7077 7078 InstructionCost 7079 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7080 Type *&VectorTy) { 7081 Type *RetTy = I->getType(); 7082 if (canTruncateToMinimalBitwidth(I, VF)) 7083 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7084 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 7085 auto SE = PSE.getSE(); 7086 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7087 7088 // TODO: We need to estimate the cost of intrinsic calls. 7089 switch (I->getOpcode()) { 7090 case Instruction::GetElementPtr: 7091 // We mark this instruction as zero-cost because the cost of GEPs in 7092 // vectorized code depends on whether the corresponding memory instruction 7093 // is scalarized or not. Therefore, we handle GEPs with the memory 7094 // instruction cost. 7095 return 0; 7096 case Instruction::Br: { 7097 // In cases of scalarized and predicated instructions, there will be VF 7098 // predicated blocks in the vectorized loop. Each branch around these 7099 // blocks requires also an extract of its vector compare i1 element. 7100 bool ScalarPredicatedBB = false; 7101 BranchInst *BI = cast<BranchInst>(I); 7102 if (VF.isVector() && BI->isConditional() && 7103 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7104 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7105 ScalarPredicatedBB = true; 7106 7107 if (ScalarPredicatedBB) { 7108 // Return cost for branches around scalarized and predicated blocks. 7109 assert(!VF.isScalable() && "scalable vectors not yet supported."); 7110 auto *Vec_i1Ty = 7111 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7112 return (TTI.getScalarizationOverhead( 7113 Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), 7114 false, true) + 7115 (TTI.getCFInstrCost(Instruction::Br, CostKind) * 7116 VF.getKnownMinValue())); 7117 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7118 // The back-edge branch will remain, as will all scalar branches. 7119 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7120 else 7121 // This branch will be eliminated by if-conversion. 7122 return 0; 7123 // Note: We currently assume zero cost for an unconditional branch inside 7124 // a predicated block since it will become a fall-through, although we 7125 // may decide in the future to call TTI for all branches. 7126 } 7127 case Instruction::PHI: { 7128 auto *Phi = cast<PHINode>(I); 7129 7130 // First-order recurrences are replaced by vector shuffles inside the loop. 7131 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7132 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7133 return TTI.getShuffleCost( 7134 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7135 VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7136 7137 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7138 // converted into select instructions. We require N - 1 selects per phi 7139 // node, where N is the number of incoming values. 7140 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7141 return (Phi->getNumIncomingValues() - 1) * 7142 TTI.getCmpSelInstrCost( 7143 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7144 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7145 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7146 7147 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7148 } 7149 case Instruction::UDiv: 7150 case Instruction::SDiv: 7151 case Instruction::URem: 7152 case Instruction::SRem: 7153 // If we have a predicated instruction, it may not be executed for each 7154 // vector lane. Get the scalarization cost and scale this amount by the 7155 // probability of executing the predicated block. If the instruction is not 7156 // predicated, we fall through to the next case. 7157 if (VF.isVector() && isScalarWithPredication(I)) { 7158 InstructionCost Cost = 0; 7159 7160 // These instructions have a non-void type, so account for the phi nodes 7161 // that we will create. This cost is likely to be zero. The phi node 7162 // cost, if any, should be scaled by the block probability because it 7163 // models a copy at the end of each predicated block. 7164 Cost += VF.getKnownMinValue() * 7165 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7166 7167 // The cost of the non-predicated instruction. 7168 Cost += VF.getKnownMinValue() * 7169 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7170 7171 // The cost of insertelement and extractelement instructions needed for 7172 // scalarization. 7173 Cost += getScalarizationOverhead(I, VF); 7174 7175 // Scale the cost by the probability of executing the predicated blocks. 7176 // This assumes the predicated block for each vector lane is equally 7177 // likely. 7178 return Cost / getReciprocalPredBlockProb(); 7179 } 7180 LLVM_FALLTHROUGH; 7181 case Instruction::Add: 7182 case Instruction::FAdd: 7183 case Instruction::Sub: 7184 case Instruction::FSub: 7185 case Instruction::Mul: 7186 case Instruction::FMul: 7187 case Instruction::FDiv: 7188 case Instruction::FRem: 7189 case Instruction::Shl: 7190 case Instruction::LShr: 7191 case Instruction::AShr: 7192 case Instruction::And: 7193 case Instruction::Or: 7194 case Instruction::Xor: { 7195 // Since we will replace the stride by 1 the multiplication should go away. 7196 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7197 return 0; 7198 7199 // Detect reduction patterns 7200 InstructionCost RedCost; 7201 if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7202 .isValid()) 7203 return RedCost; 7204 7205 // Certain instructions can be cheaper to vectorize if they have a constant 7206 // second vector operand. One example of this are shifts on x86. 7207 Value *Op2 = I->getOperand(1); 7208 TargetTransformInfo::OperandValueProperties Op2VP; 7209 TargetTransformInfo::OperandValueKind Op2VK = 7210 TTI.getOperandInfo(Op2, Op2VP); 7211 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7212 Op2VK = TargetTransformInfo::OK_UniformValue; 7213 7214 SmallVector<const Value *, 4> Operands(I->operand_values()); 7215 unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1; 7216 return N * TTI.getArithmeticInstrCost( 7217 I->getOpcode(), VectorTy, CostKind, 7218 TargetTransformInfo::OK_AnyValue, 7219 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7220 } 7221 case Instruction::FNeg: { 7222 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 7223 unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1; 7224 return N * TTI.getArithmeticInstrCost( 7225 I->getOpcode(), VectorTy, CostKind, 7226 TargetTransformInfo::OK_AnyValue, 7227 TargetTransformInfo::OK_AnyValue, 7228 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None, 7229 I->getOperand(0), I); 7230 } 7231 case Instruction::Select: { 7232 SelectInst *SI = cast<SelectInst>(I); 7233 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7234 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7235 Type *CondTy = SI->getCondition()->getType(); 7236 if (!ScalarCond) 7237 CondTy = VectorType::get(CondTy, VF); 7238 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 7239 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7240 } 7241 case Instruction::ICmp: 7242 case Instruction::FCmp: { 7243 Type *ValTy = I->getOperand(0)->getType(); 7244 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7245 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7246 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7247 VectorTy = ToVectorTy(ValTy, VF); 7248 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7249 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7250 } 7251 case Instruction::Store: 7252 case Instruction::Load: { 7253 ElementCount Width = VF; 7254 if (Width.isVector()) { 7255 InstWidening Decision = getWideningDecision(I, Width); 7256 assert(Decision != CM_Unknown && 7257 "CM decision should be taken at this point"); 7258 if (Decision == CM_Scalarize) 7259 Width = ElementCount::getFixed(1); 7260 } 7261 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 7262 return getMemoryInstructionCost(I, VF); 7263 } 7264 case Instruction::ZExt: 7265 case Instruction::SExt: 7266 case Instruction::FPToUI: 7267 case Instruction::FPToSI: 7268 case Instruction::FPExt: 7269 case Instruction::PtrToInt: 7270 case Instruction::IntToPtr: 7271 case Instruction::SIToFP: 7272 case Instruction::UIToFP: 7273 case Instruction::Trunc: 7274 case Instruction::FPTrunc: 7275 case Instruction::BitCast: { 7276 // Computes the CastContextHint from a Load/Store instruction. 7277 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7278 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7279 "Expected a load or a store!"); 7280 7281 if (VF.isScalar() || !TheLoop->contains(I)) 7282 return TTI::CastContextHint::Normal; 7283 7284 switch (getWideningDecision(I, VF)) { 7285 case LoopVectorizationCostModel::CM_GatherScatter: 7286 return TTI::CastContextHint::GatherScatter; 7287 case LoopVectorizationCostModel::CM_Interleave: 7288 return TTI::CastContextHint::Interleave; 7289 case LoopVectorizationCostModel::CM_Scalarize: 7290 case LoopVectorizationCostModel::CM_Widen: 7291 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7292 : TTI::CastContextHint::Normal; 7293 case LoopVectorizationCostModel::CM_Widen_Reverse: 7294 return TTI::CastContextHint::Reversed; 7295 case LoopVectorizationCostModel::CM_Unknown: 7296 llvm_unreachable("Instr did not go through cost modelling?"); 7297 } 7298 7299 llvm_unreachable("Unhandled case!"); 7300 }; 7301 7302 unsigned Opcode = I->getOpcode(); 7303 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7304 // For Trunc, the context is the only user, which must be a StoreInst. 7305 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7306 if (I->hasOneUse()) 7307 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7308 CCH = ComputeCCH(Store); 7309 } 7310 // For Z/Sext, the context is the operand, which must be a LoadInst. 7311 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7312 Opcode == Instruction::FPExt) { 7313 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7314 CCH = ComputeCCH(Load); 7315 } 7316 7317 // We optimize the truncation of induction variables having constant 7318 // integer steps. The cost of these truncations is the same as the scalar 7319 // operation. 7320 if (isOptimizableIVTruncate(I, VF)) { 7321 auto *Trunc = cast<TruncInst>(I); 7322 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7323 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7324 } 7325 7326 // Detect reduction patterns 7327 InstructionCost RedCost; 7328 if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7329 .isValid()) 7330 return RedCost; 7331 7332 Type *SrcScalarTy = I->getOperand(0)->getType(); 7333 Type *SrcVecTy = 7334 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7335 if (canTruncateToMinimalBitwidth(I, VF)) { 7336 // This cast is going to be shrunk. This may remove the cast or it might 7337 // turn it into slightly different cast. For example, if MinBW == 16, 7338 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7339 // 7340 // Calculate the modified src and dest types. 7341 Type *MinVecTy = VectorTy; 7342 if (Opcode == Instruction::Trunc) { 7343 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7344 VectorTy = 7345 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7346 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7347 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7348 VectorTy = 7349 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7350 } 7351 } 7352 7353 unsigned N; 7354 if (isScalarAfterVectorization(I, VF)) { 7355 assert(!VF.isScalable() && "VF is assumed to be non scalable"); 7356 N = VF.getKnownMinValue(); 7357 } else 7358 N = 1; 7359 return N * 7360 TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7361 } 7362 case Instruction::Call: { 7363 bool NeedToScalarize; 7364 CallInst *CI = cast<CallInst>(I); 7365 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7366 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7367 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7368 return std::min(CallCost, IntrinsicCost); 7369 } 7370 return CallCost; 7371 } 7372 case Instruction::ExtractValue: 7373 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7374 default: 7375 // The cost of executing VF copies of the scalar instruction. This opcode 7376 // is unknown. Assume that it is the same as 'mul'. 7377 return VF.getKnownMinValue() * TTI.getArithmeticInstrCost( 7378 Instruction::Mul, VectorTy, CostKind) + 7379 getScalarizationOverhead(I, VF); 7380 } // end of switch. 7381 } 7382 7383 char LoopVectorize::ID = 0; 7384 7385 static const char lv_name[] = "Loop Vectorization"; 7386 7387 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7388 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7389 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7390 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7391 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7392 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7393 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7394 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7395 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7396 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7397 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7398 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7399 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7400 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7401 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7402 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7403 7404 namespace llvm { 7405 7406 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7407 7408 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7409 bool VectorizeOnlyWhenForced) { 7410 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7411 } 7412 7413 } // end namespace llvm 7414 7415 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7416 // Check if the pointer operand of a load or store instruction is 7417 // consecutive. 7418 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7419 return Legal->isConsecutivePtr(Ptr); 7420 return false; 7421 } 7422 7423 void LoopVectorizationCostModel::collectValuesToIgnore() { 7424 // Ignore ephemeral values. 7425 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7426 7427 // Ignore type-promoting instructions we identified during reduction 7428 // detection. 7429 for (auto &Reduction : Legal->getReductionVars()) { 7430 RecurrenceDescriptor &RedDes = Reduction.second; 7431 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7432 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7433 } 7434 // Ignore type-casting instructions we identified during induction 7435 // detection. 7436 for (auto &Induction : Legal->getInductionVars()) { 7437 InductionDescriptor &IndDes = Induction.second; 7438 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7439 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7440 } 7441 } 7442 7443 void LoopVectorizationCostModel::collectInLoopReductions() { 7444 for (auto &Reduction : Legal->getReductionVars()) { 7445 PHINode *Phi = Reduction.first; 7446 RecurrenceDescriptor &RdxDesc = Reduction.second; 7447 7448 // We don't collect reductions that are type promoted (yet). 7449 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7450 continue; 7451 7452 // If the target would prefer this reduction to happen "in-loop", then we 7453 // want to record it as such. 7454 unsigned Opcode = RdxDesc.getOpcode(); 7455 if (!PreferInLoopReductions && 7456 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7457 TargetTransformInfo::ReductionFlags())) 7458 continue; 7459 7460 // Check that we can correctly put the reductions into the loop, by 7461 // finding the chain of operations that leads from the phi to the loop 7462 // exit value. 7463 SmallVector<Instruction *, 4> ReductionOperations = 7464 RdxDesc.getReductionOpChain(Phi, TheLoop); 7465 bool InLoop = !ReductionOperations.empty(); 7466 if (InLoop) { 7467 InLoopReductionChains[Phi] = ReductionOperations; 7468 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7469 Instruction *LastChain = Phi; 7470 for (auto *I : ReductionOperations) { 7471 InLoopReductionImmediateChains[I] = LastChain; 7472 LastChain = I; 7473 } 7474 } 7475 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7476 << " reduction for phi: " << *Phi << "\n"); 7477 } 7478 } 7479 7480 // TODO: we could return a pair of values that specify the max VF and 7481 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7482 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7483 // doesn't have a cost model that can choose which plan to execute if 7484 // more than one is generated. 7485 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7486 LoopVectorizationCostModel &CM) { 7487 unsigned WidestType; 7488 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7489 return WidestVectorRegBits / WidestType; 7490 } 7491 7492 VectorizationFactor 7493 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7494 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7495 ElementCount VF = UserVF; 7496 // Outer loop handling: They may require CFG and instruction level 7497 // transformations before even evaluating whether vectorization is profitable. 7498 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7499 // the vectorization pipeline. 7500 if (!OrigLoop->isInnermost()) { 7501 // If the user doesn't provide a vectorization factor, determine a 7502 // reasonable one. 7503 if (UserVF.isZero()) { 7504 VF = ElementCount::getFixed( 7505 determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM)); 7506 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7507 7508 // Make sure we have a VF > 1 for stress testing. 7509 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7510 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7511 << "overriding computed VF.\n"); 7512 VF = ElementCount::getFixed(4); 7513 } 7514 } 7515 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7516 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7517 "VF needs to be a power of two"); 7518 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7519 << "VF " << VF << " to build VPlans.\n"); 7520 buildVPlans(VF, VF); 7521 7522 // For VPlan build stress testing, we bail out after VPlan construction. 7523 if (VPlanBuildStressTest) 7524 return VectorizationFactor::Disabled(); 7525 7526 return {VF, 0 /*Cost*/}; 7527 } 7528 7529 LLVM_DEBUG( 7530 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7531 "VPlan-native path.\n"); 7532 return VectorizationFactor::Disabled(); 7533 } 7534 7535 Optional<VectorizationFactor> 7536 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7537 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7538 Optional<ElementCount> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC); 7539 if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved. 7540 return None; 7541 7542 // Invalidate interleave groups if all blocks of loop will be predicated. 7543 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 7544 !useMaskedInterleavedAccesses(*TTI)) { 7545 LLVM_DEBUG( 7546 dbgs() 7547 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7548 "which requires masked-interleaved support.\n"); 7549 if (CM.InterleaveInfo.invalidateGroups()) 7550 // Invalidating interleave groups also requires invalidating all decisions 7551 // based on them, which includes widening decisions and uniform and scalar 7552 // values. 7553 CM.invalidateCostModelingDecisions(); 7554 } 7555 7556 ElementCount MaxVF = MaybeMaxVF.getValue(); 7557 assert(MaxVF.isNonZero() && "MaxVF is zero."); 7558 7559 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxVF); 7560 if (!UserVF.isZero() && 7561 (UserVFIsLegal || (UserVF.isScalable() && MaxVF.isScalable()))) { 7562 // FIXME: MaxVF is temporarily used inplace of UserVF for illegal scalable 7563 // VFs here, this should be reverted to only use legal UserVFs once the 7564 // loop below supports scalable VFs. 7565 ElementCount VF = UserVFIsLegal ? UserVF : MaxVF; 7566 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max") 7567 << " VF " << VF << ".\n"); 7568 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7569 "VF needs to be a power of two"); 7570 // Collect the instructions (and their associated costs) that will be more 7571 // profitable to scalarize. 7572 CM.selectUserVectorizationFactor(VF); 7573 CM.collectInLoopReductions(); 7574 buildVPlansWithVPRecipes(VF, VF); 7575 LLVM_DEBUG(printPlans(dbgs())); 7576 return {{VF, 0}}; 7577 } 7578 7579 assert(!MaxVF.isScalable() && 7580 "Scalable vectors not yet supported beyond this point"); 7581 7582 for (ElementCount VF = ElementCount::getFixed(1); 7583 ElementCount::isKnownLE(VF, MaxVF); VF *= 2) { 7584 // Collect Uniform and Scalar instructions after vectorization with VF. 7585 CM.collectUniformsAndScalars(VF); 7586 7587 // Collect the instructions (and their associated costs) that will be more 7588 // profitable to scalarize. 7589 if (VF.isVector()) 7590 CM.collectInstsToScalarize(VF); 7591 } 7592 7593 CM.collectInLoopReductions(); 7594 7595 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxVF); 7596 LLVM_DEBUG(printPlans(dbgs())); 7597 if (MaxVF.isScalar()) 7598 return VectorizationFactor::Disabled(); 7599 7600 // Select the optimal vectorization factor. 7601 return CM.selectVectorizationFactor(MaxVF); 7602 } 7603 7604 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) { 7605 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 7606 << '\n'); 7607 BestVF = VF; 7608 BestUF = UF; 7609 7610 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 7611 return !Plan->hasVF(VF); 7612 }); 7613 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 7614 } 7615 7616 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 7617 DominatorTree *DT) { 7618 // Perform the actual loop transformation. 7619 7620 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7621 assert(BestVF.hasValue() && "Vectorization Factor is missing"); 7622 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 7623 7624 VPTransformState State{ 7625 *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()}; 7626 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 7627 State.TripCount = ILV.getOrCreateTripCount(nullptr); 7628 State.CanonicalIV = ILV.Induction; 7629 7630 ILV.printDebugTracesAtStart(); 7631 7632 //===------------------------------------------------===// 7633 // 7634 // Notice: any optimization or new instruction that go 7635 // into the code below should also be implemented in 7636 // the cost-model. 7637 // 7638 //===------------------------------------------------===// 7639 7640 // 2. Copy and widen instructions from the old loop into the new loop. 7641 VPlans.front()->execute(&State); 7642 7643 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7644 // predication, updating analyses. 7645 ILV.fixVectorizedLoop(State); 7646 7647 ILV.printDebugTracesAtEnd(); 7648 } 7649 7650 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7651 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7652 7653 // We create new control-flow for the vectorized loop, so the original exit 7654 // conditions will be dead after vectorization if it's only used by the 7655 // terminator 7656 SmallVector<BasicBlock*> ExitingBlocks; 7657 OrigLoop->getExitingBlocks(ExitingBlocks); 7658 for (auto *BB : ExitingBlocks) { 7659 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 7660 if (!Cmp || !Cmp->hasOneUse()) 7661 continue; 7662 7663 // TODO: we should introduce a getUniqueExitingBlocks on Loop 7664 if (!DeadInstructions.insert(Cmp).second) 7665 continue; 7666 7667 // The operands of the icmp is often a dead trunc, used by IndUpdate. 7668 // TODO: can recurse through operands in general 7669 for (Value *Op : Cmp->operands()) { 7670 if (isa<TruncInst>(Op) && Op->hasOneUse()) 7671 DeadInstructions.insert(cast<Instruction>(Op)); 7672 } 7673 } 7674 7675 // We create new "steps" for induction variable updates to which the original 7676 // induction variables map. An original update instruction will be dead if 7677 // all its users except the induction variable are dead. 7678 auto *Latch = OrigLoop->getLoopLatch(); 7679 for (auto &Induction : Legal->getInductionVars()) { 7680 PHINode *Ind = Induction.first; 7681 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7682 7683 // If the tail is to be folded by masking, the primary induction variable, 7684 // if exists, isn't dead: it will be used for masking. Don't kill it. 7685 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 7686 continue; 7687 7688 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7689 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7690 })) 7691 DeadInstructions.insert(IndUpdate); 7692 7693 // We record as "Dead" also the type-casting instructions we had identified 7694 // during induction analysis. We don't need any handling for them in the 7695 // vectorized loop because we have proven that, under a proper runtime 7696 // test guarding the vectorized loop, the value of the phi, and the casted 7697 // value of the phi, are the same. The last instruction in this casting chain 7698 // will get its scalar/vector/widened def from the scalar/vector/widened def 7699 // of the respective phi node. Any other casts in the induction def-use chain 7700 // have no other uses outside the phi update chain, and will be ignored. 7701 InductionDescriptor &IndDes = Induction.second; 7702 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7703 DeadInstructions.insert(Casts.begin(), Casts.end()); 7704 } 7705 } 7706 7707 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 7708 7709 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7710 7711 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 7712 Instruction::BinaryOps BinOp) { 7713 // When unrolling and the VF is 1, we only need to add a simple scalar. 7714 Type *Ty = Val->getType(); 7715 assert(!Ty->isVectorTy() && "Val must be a scalar"); 7716 7717 if (Ty->isFloatingPointTy()) { 7718 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 7719 7720 // Floating point operations had to be 'fast' to enable the unrolling. 7721 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 7722 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 7723 } 7724 Constant *C = ConstantInt::get(Ty, StartIdx); 7725 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 7726 } 7727 7728 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7729 SmallVector<Metadata *, 4> MDs; 7730 // Reserve first location for self reference to the LoopID metadata node. 7731 MDs.push_back(nullptr); 7732 bool IsUnrollMetadata = false; 7733 MDNode *LoopID = L->getLoopID(); 7734 if (LoopID) { 7735 // First find existing loop unrolling disable metadata. 7736 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7737 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7738 if (MD) { 7739 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7740 IsUnrollMetadata = 7741 S && S->getString().startswith("llvm.loop.unroll.disable"); 7742 } 7743 MDs.push_back(LoopID->getOperand(i)); 7744 } 7745 } 7746 7747 if (!IsUnrollMetadata) { 7748 // Add runtime unroll disable metadata. 7749 LLVMContext &Context = L->getHeader()->getContext(); 7750 SmallVector<Metadata *, 1> DisableOperands; 7751 DisableOperands.push_back( 7752 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7753 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7754 MDs.push_back(DisableNode); 7755 MDNode *NewLoopID = MDNode::get(Context, MDs); 7756 // Set operand 0 to refer to the loop id itself. 7757 NewLoopID->replaceOperandWith(0, NewLoopID); 7758 L->setLoopID(NewLoopID); 7759 } 7760 } 7761 7762 //===--------------------------------------------------------------------===// 7763 // EpilogueVectorizerMainLoop 7764 //===--------------------------------------------------------------------===// 7765 7766 /// This function is partially responsible for generating the control flow 7767 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7768 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 7769 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7770 Loop *Lp = createVectorLoopSkeleton(""); 7771 7772 // Generate the code to check the minimum iteration count of the vector 7773 // epilogue (see below). 7774 EPI.EpilogueIterationCountCheck = 7775 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 7776 EPI.EpilogueIterationCountCheck->setName("iter.check"); 7777 7778 // Generate the code to check any assumptions that we've made for SCEV 7779 // expressions. 7780 BasicBlock *SavedPreHeader = LoopVectorPreHeader; 7781 emitSCEVChecks(Lp, LoopScalarPreHeader); 7782 7783 // If a safety check was generated save it. 7784 if (SavedPreHeader != LoopVectorPreHeader) 7785 EPI.SCEVSafetyCheck = SavedPreHeader; 7786 7787 // Generate the code that checks at runtime if arrays overlap. We put the 7788 // checks into a separate block to make the more common case of few elements 7789 // faster. 7790 SavedPreHeader = LoopVectorPreHeader; 7791 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 7792 7793 // If a safety check was generated save/overwite it. 7794 if (SavedPreHeader != LoopVectorPreHeader) 7795 EPI.MemSafetyCheck = SavedPreHeader; 7796 7797 // Generate the iteration count check for the main loop, *after* the check 7798 // for the epilogue loop, so that the path-length is shorter for the case 7799 // that goes directly through the vector epilogue. The longer-path length for 7800 // the main loop is compensated for, by the gain from vectorizing the larger 7801 // trip count. Note: the branch will get updated later on when we vectorize 7802 // the epilogue. 7803 EPI.MainLoopIterationCountCheck = 7804 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 7805 7806 // Generate the induction variable. 7807 OldInduction = Legal->getPrimaryInduction(); 7808 Type *IdxTy = Legal->getWidestInductionType(); 7809 Value *StartIdx = ConstantInt::get(IdxTy, 0); 7810 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 7811 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 7812 EPI.VectorTripCount = CountRoundDown; 7813 Induction = 7814 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 7815 getDebugLocFromInstOrOperands(OldInduction)); 7816 7817 // Skip induction resume value creation here because they will be created in 7818 // the second pass. If we created them here, they wouldn't be used anyway, 7819 // because the vplan in the second pass still contains the inductions from the 7820 // original loop. 7821 7822 return completeLoopSkeleton(Lp, OrigLoopID); 7823 } 7824 7825 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 7826 LLVM_DEBUG({ 7827 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 7828 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 7829 << ", Main Loop UF:" << EPI.MainLoopUF 7830 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 7831 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7832 }); 7833 } 7834 7835 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 7836 DEBUG_WITH_TYPE(VerboseDebug, { 7837 dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; 7838 }); 7839 } 7840 7841 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 7842 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 7843 assert(L && "Expected valid Loop."); 7844 assert(Bypass && "Expected valid bypass basic block."); 7845 unsigned VFactor = 7846 ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue(); 7847 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 7848 Value *Count = getOrCreateTripCount(L); 7849 // Reuse existing vector loop preheader for TC checks. 7850 // Note that new preheader block is generated for vector loop. 7851 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 7852 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 7853 7854 // Generate code to check if the loop's trip count is less than VF * UF of the 7855 // main vector loop. 7856 auto P = 7857 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7858 7859 Value *CheckMinIters = Builder.CreateICmp( 7860 P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor), 7861 "min.iters.check"); 7862 7863 if (!ForEpilogue) 7864 TCCheckBlock->setName("vector.main.loop.iter.check"); 7865 7866 // Create new preheader for vector loop. 7867 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 7868 DT, LI, nullptr, "vector.ph"); 7869 7870 if (ForEpilogue) { 7871 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 7872 DT->getNode(Bypass)->getIDom()) && 7873 "TC check is expected to dominate Bypass"); 7874 7875 // Update dominator for Bypass & LoopExit. 7876 DT->changeImmediateDominator(Bypass, TCCheckBlock); 7877 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 7878 7879 LoopBypassBlocks.push_back(TCCheckBlock); 7880 7881 // Save the trip count so we don't have to regenerate it in the 7882 // vec.epilog.iter.check. This is safe to do because the trip count 7883 // generated here dominates the vector epilog iter check. 7884 EPI.TripCount = Count; 7885 } 7886 7887 ReplaceInstWithInst( 7888 TCCheckBlock->getTerminator(), 7889 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7890 7891 return TCCheckBlock; 7892 } 7893 7894 //===--------------------------------------------------------------------===// 7895 // EpilogueVectorizerEpilogueLoop 7896 //===--------------------------------------------------------------------===// 7897 7898 /// This function is partially responsible for generating the control flow 7899 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7900 BasicBlock * 7901 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 7902 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7903 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 7904 7905 // Now, compare the remaining count and if there aren't enough iterations to 7906 // execute the vectorized epilogue skip to the scalar part. 7907 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 7908 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 7909 LoopVectorPreHeader = 7910 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 7911 LI, nullptr, "vec.epilog.ph"); 7912 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 7913 VecEpilogueIterationCountCheck); 7914 7915 // Adjust the control flow taking the state info from the main loop 7916 // vectorization into account. 7917 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 7918 "expected this to be saved from the previous pass."); 7919 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 7920 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 7921 7922 DT->changeImmediateDominator(LoopVectorPreHeader, 7923 EPI.MainLoopIterationCountCheck); 7924 7925 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 7926 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7927 7928 if (EPI.SCEVSafetyCheck) 7929 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 7930 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7931 if (EPI.MemSafetyCheck) 7932 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 7933 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7934 7935 DT->changeImmediateDominator( 7936 VecEpilogueIterationCountCheck, 7937 VecEpilogueIterationCountCheck->getSinglePredecessor()); 7938 7939 DT->changeImmediateDominator(LoopScalarPreHeader, 7940 EPI.EpilogueIterationCountCheck); 7941 DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck); 7942 7943 // Keep track of bypass blocks, as they feed start values to the induction 7944 // phis in the scalar loop preheader. 7945 if (EPI.SCEVSafetyCheck) 7946 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 7947 if (EPI.MemSafetyCheck) 7948 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 7949 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 7950 7951 // Generate a resume induction for the vector epilogue and put it in the 7952 // vector epilogue preheader 7953 Type *IdxTy = Legal->getWidestInductionType(); 7954 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 7955 LoopVectorPreHeader->getFirstNonPHI()); 7956 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 7957 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 7958 EPI.MainLoopIterationCountCheck); 7959 7960 // Generate the induction variable. 7961 OldInduction = Legal->getPrimaryInduction(); 7962 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 7963 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 7964 Value *StartIdx = EPResumeVal; 7965 Induction = 7966 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 7967 getDebugLocFromInstOrOperands(OldInduction)); 7968 7969 // Generate induction resume values. These variables save the new starting 7970 // indexes for the scalar loop. They are used to test if there are any tail 7971 // iterations left once the vector loop has completed. 7972 // Note that when the vectorized epilogue is skipped due to iteration count 7973 // check, then the resume value for the induction variable comes from 7974 // the trip count of the main vector loop, hence passing the AdditionalBypass 7975 // argument. 7976 createInductionResumeValues(Lp, CountRoundDown, 7977 {VecEpilogueIterationCountCheck, 7978 EPI.VectorTripCount} /* AdditionalBypass */); 7979 7980 AddRuntimeUnrollDisableMetaData(Lp); 7981 return completeLoopSkeleton(Lp, OrigLoopID); 7982 } 7983 7984 BasicBlock * 7985 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 7986 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 7987 7988 assert(EPI.TripCount && 7989 "Expected trip count to have been safed in the first pass."); 7990 assert( 7991 (!isa<Instruction>(EPI.TripCount) || 7992 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 7993 "saved trip count does not dominate insertion point."); 7994 Value *TC = EPI.TripCount; 7995 IRBuilder<> Builder(Insert->getTerminator()); 7996 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 7997 7998 // Generate code to check if the loop's trip count is less than VF * UF of the 7999 // vector epilogue loop. 8000 auto P = 8001 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8002 8003 Value *CheckMinIters = Builder.CreateICmp( 8004 P, Count, 8005 ConstantInt::get(Count->getType(), 8006 EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF), 8007 "min.epilog.iters.check"); 8008 8009 ReplaceInstWithInst( 8010 Insert->getTerminator(), 8011 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8012 8013 LoopBypassBlocks.push_back(Insert); 8014 return Insert; 8015 } 8016 8017 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8018 LLVM_DEBUG({ 8019 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8020 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 8021 << ", Main Loop UF:" << EPI.MainLoopUF 8022 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 8023 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8024 }); 8025 } 8026 8027 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8028 DEBUG_WITH_TYPE(VerboseDebug, { 8029 dbgs() << "final fn:\n" << *Induction->getFunction() << "\n"; 8030 }); 8031 } 8032 8033 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8034 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8035 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8036 bool PredicateAtRangeStart = Predicate(Range.Start); 8037 8038 for (ElementCount TmpVF = Range.Start * 2; 8039 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8040 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8041 Range.End = TmpVF; 8042 break; 8043 } 8044 8045 return PredicateAtRangeStart; 8046 } 8047 8048 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8049 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8050 /// of VF's starting at a given VF and extending it as much as possible. Each 8051 /// vectorization decision can potentially shorten this sub-range during 8052 /// buildVPlan(). 8053 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8054 ElementCount MaxVF) { 8055 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8056 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8057 VFRange SubRange = {VF, MaxVFPlusOne}; 8058 VPlans.push_back(buildVPlan(SubRange)); 8059 VF = SubRange.End; 8060 } 8061 } 8062 8063 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8064 VPlanPtr &Plan) { 8065 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8066 8067 // Look for cached value. 8068 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8069 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8070 if (ECEntryIt != EdgeMaskCache.end()) 8071 return ECEntryIt->second; 8072 8073 VPValue *SrcMask = createBlockInMask(Src, Plan); 8074 8075 // The terminator has to be a branch inst! 8076 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8077 assert(BI && "Unexpected terminator found"); 8078 8079 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8080 return EdgeMaskCache[Edge] = SrcMask; 8081 8082 // If source is an exiting block, we know the exit edge is dynamically dead 8083 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8084 // adding uses of an otherwise potentially dead instruction. 8085 if (OrigLoop->isLoopExiting(Src)) 8086 return EdgeMaskCache[Edge] = SrcMask; 8087 8088 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8089 assert(EdgeMask && "No Edge Mask found for condition"); 8090 8091 if (BI->getSuccessor(0) != Dst) 8092 EdgeMask = Builder.createNot(EdgeMask); 8093 8094 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8095 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8096 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8097 // The select version does not introduce new UB if SrcMask is false and 8098 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8099 VPValue *False = Plan->getOrAddVPValue( 8100 ConstantInt::getFalse(BI->getCondition()->getType())); 8101 EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False); 8102 } 8103 8104 return EdgeMaskCache[Edge] = EdgeMask; 8105 } 8106 8107 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8108 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8109 8110 // Look for cached value. 8111 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8112 if (BCEntryIt != BlockMaskCache.end()) 8113 return BCEntryIt->second; 8114 8115 // All-one mask is modelled as no-mask following the convention for masked 8116 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8117 VPValue *BlockMask = nullptr; 8118 8119 if (OrigLoop->getHeader() == BB) { 8120 if (!CM.blockNeedsPredication(BB)) 8121 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8122 8123 // Create the block in mask as the first non-phi instruction in the block. 8124 VPBuilder::InsertPointGuard Guard(Builder); 8125 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8126 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8127 8128 // Introduce the early-exit compare IV <= BTC to form header block mask. 8129 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8130 // Start by constructing the desired canonical IV. 8131 VPValue *IV = nullptr; 8132 if (Legal->getPrimaryInduction()) 8133 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8134 else { 8135 auto IVRecipe = new VPWidenCanonicalIVRecipe(); 8136 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8137 IV = IVRecipe->getVPValue(); 8138 } 8139 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8140 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8141 8142 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8143 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8144 // as a second argument, we only pass the IV here and extract the 8145 // tripcount from the transform state where codegen of the VP instructions 8146 // happen. 8147 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8148 } else { 8149 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8150 } 8151 return BlockMaskCache[BB] = BlockMask; 8152 } 8153 8154 // This is the block mask. We OR all incoming edges. 8155 for (auto *Predecessor : predecessors(BB)) { 8156 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8157 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8158 return BlockMaskCache[BB] = EdgeMask; 8159 8160 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8161 BlockMask = EdgeMask; 8162 continue; 8163 } 8164 8165 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8166 } 8167 8168 return BlockMaskCache[BB] = BlockMask; 8169 } 8170 8171 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range, 8172 VPlanPtr &Plan) { 8173 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8174 "Must be called with either a load or store"); 8175 8176 auto willWiden = [&](ElementCount VF) -> bool { 8177 if (VF.isScalar()) 8178 return false; 8179 LoopVectorizationCostModel::InstWidening Decision = 8180 CM.getWideningDecision(I, VF); 8181 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8182 "CM decision should be taken at this point."); 8183 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8184 return true; 8185 if (CM.isScalarAfterVectorization(I, VF) || 8186 CM.isProfitableToScalarize(I, VF)) 8187 return false; 8188 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8189 }; 8190 8191 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8192 return nullptr; 8193 8194 VPValue *Mask = nullptr; 8195 if (Legal->isMaskRequired(I)) 8196 Mask = createBlockInMask(I->getParent(), Plan); 8197 8198 VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I)); 8199 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8200 return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask); 8201 8202 StoreInst *Store = cast<StoreInst>(I); 8203 VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand()); 8204 return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask); 8205 } 8206 8207 VPWidenIntOrFpInductionRecipe * 8208 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, VPlan &Plan) const { 8209 // Check if this is an integer or fp induction. If so, build the recipe that 8210 // produces its scalar and vector values. 8211 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8212 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8213 II.getKind() == InductionDescriptor::IK_FpInduction) { 8214 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8215 const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); 8216 return new VPWidenIntOrFpInductionRecipe( 8217 Phi, Start, Casts.empty() ? nullptr : Casts.front()); 8218 } 8219 8220 return nullptr; 8221 } 8222 8223 VPWidenIntOrFpInductionRecipe * 8224 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I, VFRange &Range, 8225 VPlan &Plan) const { 8226 // Optimize the special case where the source is a constant integer 8227 // induction variable. Notice that we can only optimize the 'trunc' case 8228 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8229 // (c) other casts depend on pointer size. 8230 8231 // Determine whether \p K is a truncation based on an induction variable that 8232 // can be optimized. 8233 auto isOptimizableIVTruncate = 8234 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8235 return [=](ElementCount VF) -> bool { 8236 return CM.isOptimizableIVTruncate(K, VF); 8237 }; 8238 }; 8239 8240 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8241 isOptimizableIVTruncate(I), Range)) { 8242 8243 InductionDescriptor II = 8244 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 8245 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8246 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8247 Start, nullptr, I); 8248 } 8249 return nullptr; 8250 } 8251 8252 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) { 8253 // We know that all PHIs in non-header blocks are converted into selects, so 8254 // we don't have to worry about the insertion order and we can just use the 8255 // builder. At this point we generate the predication tree. There may be 8256 // duplications since this is a simple recursive scan, but future 8257 // optimizations will clean it up. 8258 8259 SmallVector<VPValue *, 2> Operands; 8260 unsigned NumIncoming = Phi->getNumIncomingValues(); 8261 for (unsigned In = 0; In < NumIncoming; In++) { 8262 VPValue *EdgeMask = 8263 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8264 assert((EdgeMask || NumIncoming == 1) && 8265 "Multiple predecessors with one having a full mask"); 8266 Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In))); 8267 if (EdgeMask) 8268 Operands.push_back(EdgeMask); 8269 } 8270 return new VPBlendRecipe(Phi, Operands); 8271 } 8272 8273 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range, 8274 VPlan &Plan) const { 8275 8276 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8277 [this, CI](ElementCount VF) { 8278 return CM.isScalarWithPredication(CI, VF); 8279 }, 8280 Range); 8281 8282 if (IsPredicated) 8283 return nullptr; 8284 8285 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8286 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8287 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8288 ID == Intrinsic::pseudoprobe || 8289 ID == Intrinsic::experimental_noalias_scope_decl)) 8290 return nullptr; 8291 8292 auto willWiden = [&](ElementCount VF) -> bool { 8293 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8294 // The following case may be scalarized depending on the VF. 8295 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8296 // version of the instruction. 8297 // Is it beneficial to perform intrinsic call compared to lib call? 8298 bool NeedToScalarize = false; 8299 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8300 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8301 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8302 assert(IntrinsicCost.isValid() && CallCost.isValid() && 8303 "Cannot have invalid costs while widening"); 8304 return UseVectorIntrinsic || !NeedToScalarize; 8305 }; 8306 8307 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8308 return nullptr; 8309 8310 return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands())); 8311 } 8312 8313 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8314 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8315 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8316 // Instruction should be widened, unless it is scalar after vectorization, 8317 // scalarization is profitable or it is predicated. 8318 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8319 return CM.isScalarAfterVectorization(I, VF) || 8320 CM.isProfitableToScalarize(I, VF) || 8321 CM.isScalarWithPredication(I, VF); 8322 }; 8323 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8324 Range); 8325 } 8326 8327 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const { 8328 auto IsVectorizableOpcode = [](unsigned Opcode) { 8329 switch (Opcode) { 8330 case Instruction::Add: 8331 case Instruction::And: 8332 case Instruction::AShr: 8333 case Instruction::BitCast: 8334 case Instruction::FAdd: 8335 case Instruction::FCmp: 8336 case Instruction::FDiv: 8337 case Instruction::FMul: 8338 case Instruction::FNeg: 8339 case Instruction::FPExt: 8340 case Instruction::FPToSI: 8341 case Instruction::FPToUI: 8342 case Instruction::FPTrunc: 8343 case Instruction::FRem: 8344 case Instruction::FSub: 8345 case Instruction::ICmp: 8346 case Instruction::IntToPtr: 8347 case Instruction::LShr: 8348 case Instruction::Mul: 8349 case Instruction::Or: 8350 case Instruction::PtrToInt: 8351 case Instruction::SDiv: 8352 case Instruction::Select: 8353 case Instruction::SExt: 8354 case Instruction::Shl: 8355 case Instruction::SIToFP: 8356 case Instruction::SRem: 8357 case Instruction::Sub: 8358 case Instruction::Trunc: 8359 case Instruction::UDiv: 8360 case Instruction::UIToFP: 8361 case Instruction::URem: 8362 case Instruction::Xor: 8363 case Instruction::ZExt: 8364 return true; 8365 } 8366 return false; 8367 }; 8368 8369 if (!IsVectorizableOpcode(I->getOpcode())) 8370 return nullptr; 8371 8372 // Success: widen this instruction. 8373 return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands())); 8374 } 8375 8376 VPBasicBlock *VPRecipeBuilder::handleReplication( 8377 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8378 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 8379 VPlanPtr &Plan) { 8380 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8381 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8382 Range); 8383 8384 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8385 [&](ElementCount VF) { return CM.isScalarWithPredication(I, VF); }, 8386 Range); 8387 8388 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8389 IsUniform, IsPredicated); 8390 setRecipe(I, Recipe); 8391 Plan->addVPValue(I, Recipe); 8392 8393 // Find if I uses a predicated instruction. If so, it will use its scalar 8394 // value. Avoid hoisting the insert-element which packs the scalar value into 8395 // a vector value, as that happens iff all users use the vector value. 8396 for (auto &Op : I->operands()) 8397 if (auto *PredInst = dyn_cast<Instruction>(Op)) 8398 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 8399 PredInst2Recipe[PredInst]->setAlsoPack(false); 8400 8401 // Finalize the recipe for Instr, first if it is not predicated. 8402 if (!IsPredicated) { 8403 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8404 VPBB->appendRecipe(Recipe); 8405 return VPBB; 8406 } 8407 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8408 assert(VPBB->getSuccessors().empty() && 8409 "VPBB has successors when handling predicated replication."); 8410 // Record predicated instructions for above packing optimizations. 8411 PredInst2Recipe[I] = Recipe; 8412 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8413 VPBlockUtils::insertBlockAfter(Region, VPBB); 8414 auto *RegSucc = new VPBasicBlock(); 8415 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8416 return RegSucc; 8417 } 8418 8419 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8420 VPRecipeBase *PredRecipe, 8421 VPlanPtr &Plan) { 8422 // Instructions marked for predication are replicated and placed under an 8423 // if-then construct to prevent side-effects. 8424 8425 // Generate recipes to compute the block mask for this region. 8426 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8427 8428 // Build the triangular if-then region. 8429 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8430 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8431 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8432 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8433 auto *PHIRecipe = Instr->getType()->isVoidTy() 8434 ? nullptr 8435 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8436 if (PHIRecipe) { 8437 Plan->removeVPValueFor(Instr); 8438 Plan->addVPValue(Instr, PHIRecipe); 8439 } 8440 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8441 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8442 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8443 8444 // Note: first set Entry as region entry and then connect successors starting 8445 // from it in order, to propagate the "parent" of each VPBasicBlock. 8446 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8447 VPBlockUtils::connectBlocks(Pred, Exit); 8448 8449 return Region; 8450 } 8451 8452 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8453 VFRange &Range, 8454 VPlanPtr &Plan) { 8455 // First, check for specific widening recipes that deal with calls, memory 8456 // operations, inductions and Phi nodes. 8457 if (auto *CI = dyn_cast<CallInst>(Instr)) 8458 return tryToWidenCall(CI, Range, *Plan); 8459 8460 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8461 return tryToWidenMemory(Instr, Range, Plan); 8462 8463 VPRecipeBase *Recipe; 8464 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8465 if (Phi->getParent() != OrigLoop->getHeader()) 8466 return tryToBlend(Phi, Plan); 8467 if ((Recipe = tryToOptimizeInductionPHI(Phi, *Plan))) 8468 return Recipe; 8469 8470 if (Legal->isReductionVariable(Phi)) { 8471 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 8472 VPValue *StartV = 8473 Plan->getOrAddVPValue(RdxDesc.getRecurrenceStartValue()); 8474 return new VPWidenPHIRecipe(Phi, RdxDesc, *StartV); 8475 } 8476 8477 return new VPWidenPHIRecipe(Phi); 8478 } 8479 8480 if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate( 8481 cast<TruncInst>(Instr), Range, *Plan))) 8482 return Recipe; 8483 8484 if (!shouldWiden(Instr, Range)) 8485 return nullptr; 8486 8487 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8488 return new VPWidenGEPRecipe(GEP, Plan->mapToVPValues(GEP->operands()), 8489 OrigLoop); 8490 8491 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8492 bool InvariantCond = 8493 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8494 return new VPWidenSelectRecipe(*SI, Plan->mapToVPValues(SI->operands()), 8495 InvariantCond); 8496 } 8497 8498 return tryToWiden(Instr, *Plan); 8499 } 8500 8501 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8502 ElementCount MaxVF) { 8503 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8504 8505 // Collect instructions from the original loop that will become trivially dead 8506 // in the vectorized loop. We don't need to vectorize these instructions. For 8507 // example, original induction update instructions can become dead because we 8508 // separately emit induction "steps" when generating code for the new loop. 8509 // Similarly, we create a new latch condition when setting up the structure 8510 // of the new loop, so the old one can become dead. 8511 SmallPtrSet<Instruction *, 4> DeadInstructions; 8512 collectTriviallyDeadInstructions(DeadInstructions); 8513 8514 // Add assume instructions we need to drop to DeadInstructions, to prevent 8515 // them from being added to the VPlan. 8516 // TODO: We only need to drop assumes in blocks that get flattend. If the 8517 // control flow is preserved, we should keep them. 8518 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8519 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8520 8521 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8522 // Dead instructions do not need sinking. Remove them from SinkAfter. 8523 for (Instruction *I : DeadInstructions) 8524 SinkAfter.erase(I); 8525 8526 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8527 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8528 VFRange SubRange = {VF, MaxVFPlusOne}; 8529 VPlans.push_back( 8530 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8531 VF = SubRange.End; 8532 } 8533 } 8534 8535 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8536 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8537 const DenseMap<Instruction *, Instruction *> &SinkAfter) { 8538 8539 // Hold a mapping from predicated instructions to their recipes, in order to 8540 // fix their AlsoPack behavior if a user is determined to replicate and use a 8541 // scalar instead of vector value. 8542 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 8543 8544 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8545 8546 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8547 8548 // --------------------------------------------------------------------------- 8549 // Pre-construction: record ingredients whose recipes we'll need to further 8550 // process after constructing the initial VPlan. 8551 // --------------------------------------------------------------------------- 8552 8553 // Mark instructions we'll need to sink later and their targets as 8554 // ingredients whose recipe we'll need to record. 8555 for (auto &Entry : SinkAfter) { 8556 RecipeBuilder.recordRecipeOf(Entry.first); 8557 RecipeBuilder.recordRecipeOf(Entry.second); 8558 } 8559 for (auto &Reduction : CM.getInLoopReductionChains()) { 8560 PHINode *Phi = Reduction.first; 8561 RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); 8562 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8563 8564 RecipeBuilder.recordRecipeOf(Phi); 8565 for (auto &R : ReductionOperations) { 8566 RecipeBuilder.recordRecipeOf(R); 8567 // For min/max reducitons, where we have a pair of icmp/select, we also 8568 // need to record the ICmp recipe, so it can be removed later. 8569 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8570 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8571 } 8572 } 8573 8574 // For each interleave group which is relevant for this (possibly trimmed) 8575 // Range, add it to the set of groups to be later applied to the VPlan and add 8576 // placeholders for its members' Recipes which we'll be replacing with a 8577 // single VPInterleaveRecipe. 8578 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8579 auto applyIG = [IG, this](ElementCount VF) -> bool { 8580 return (VF.isVector() && // Query is illegal for VF == 1 8581 CM.getWideningDecision(IG->getInsertPos(), VF) == 8582 LoopVectorizationCostModel::CM_Interleave); 8583 }; 8584 if (!getDecisionAndClampRange(applyIG, Range)) 8585 continue; 8586 InterleaveGroups.insert(IG); 8587 for (unsigned i = 0; i < IG->getFactor(); i++) 8588 if (Instruction *Member = IG->getMember(i)) 8589 RecipeBuilder.recordRecipeOf(Member); 8590 }; 8591 8592 // --------------------------------------------------------------------------- 8593 // Build initial VPlan: Scan the body of the loop in a topological order to 8594 // visit each basic block after having visited its predecessor basic blocks. 8595 // --------------------------------------------------------------------------- 8596 8597 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 8598 auto Plan = std::make_unique<VPlan>(); 8599 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 8600 Plan->setEntry(VPBB); 8601 8602 // Scan the body of the loop in a topological order to visit each basic block 8603 // after having visited its predecessor basic blocks. 8604 LoopBlocksDFS DFS(OrigLoop); 8605 DFS.perform(LI); 8606 8607 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8608 // Relevant instructions from basic block BB will be grouped into VPRecipe 8609 // ingredients and fill a new VPBasicBlock. 8610 unsigned VPBBsForBB = 0; 8611 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 8612 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 8613 VPBB = FirstVPBBForBB; 8614 Builder.setInsertPoint(VPBB); 8615 8616 // Introduce each ingredient into VPlan. 8617 // TODO: Model and preserve debug instrinsics in VPlan. 8618 for (Instruction &I : BB->instructionsWithoutDebug()) { 8619 Instruction *Instr = &I; 8620 8621 // First filter out irrelevant instructions, to ensure no recipes are 8622 // built for them. 8623 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 8624 continue; 8625 8626 if (auto Recipe = 8627 RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) { 8628 8629 // VPBlendRecipes with a single incoming (value, mask) pair are no-ops. 8630 // Use the incoming value directly. 8631 if (isa<VPBlendRecipe>(Recipe) && Recipe->getNumOperands() <= 2) { 8632 Plan->removeVPValueFor(Instr); 8633 Plan->addVPValue(Instr, Recipe->getOperand(0)); 8634 delete Recipe; 8635 continue; 8636 } 8637 for (auto *Def : Recipe->definedValues()) { 8638 auto *UV = Def->getUnderlyingValue(); 8639 Plan->addVPValue(UV, Def); 8640 } 8641 8642 RecipeBuilder.setRecipe(Instr, Recipe); 8643 VPBB->appendRecipe(Recipe); 8644 continue; 8645 } 8646 8647 // Otherwise, if all widening options failed, Instruction is to be 8648 // replicated. This may create a successor for VPBB. 8649 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication( 8650 Instr, Range, VPBB, PredInst2Recipe, Plan); 8651 if (NextVPBB != VPBB) { 8652 VPBB = NextVPBB; 8653 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8654 : ""); 8655 } 8656 } 8657 } 8658 8659 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 8660 // may also be empty, such as the last one VPBB, reflecting original 8661 // basic-blocks with no recipes. 8662 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 8663 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 8664 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 8665 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 8666 delete PreEntry; 8667 8668 // --------------------------------------------------------------------------- 8669 // Transform initial VPlan: Apply previously taken decisions, in order, to 8670 // bring the VPlan to its final state. 8671 // --------------------------------------------------------------------------- 8672 8673 // Apply Sink-After legal constraints. 8674 for (auto &Entry : SinkAfter) { 8675 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 8676 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 8677 // If the target is in a replication region, make sure to move Sink to the 8678 // block after it, not into the replication region itself. 8679 if (auto *Region = 8680 dyn_cast_or_null<VPRegionBlock>(Target->getParent()->getParent())) { 8681 if (Region->isReplicator()) { 8682 assert(Region->getNumSuccessors() == 1 && "Expected SESE region!"); 8683 VPBasicBlock *NextBlock = 8684 cast<VPBasicBlock>(Region->getSuccessors().front()); 8685 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 8686 continue; 8687 } 8688 } 8689 Sink->moveAfter(Target); 8690 } 8691 8692 // Interleave memory: for each Interleave Group we marked earlier as relevant 8693 // for this VPlan, replace the Recipes widening its memory instructions with a 8694 // single VPInterleaveRecipe at its insertion point. 8695 for (auto IG : InterleaveGroups) { 8696 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 8697 RecipeBuilder.getRecipe(IG->getInsertPos())); 8698 SmallVector<VPValue *, 4> StoredValues; 8699 for (unsigned i = 0; i < IG->getFactor(); ++i) 8700 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) 8701 StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0))); 8702 8703 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 8704 Recipe->getMask()); 8705 VPIG->insertBefore(Recipe); 8706 unsigned J = 0; 8707 for (unsigned i = 0; i < IG->getFactor(); ++i) 8708 if (Instruction *Member = IG->getMember(i)) { 8709 if (!Member->getType()->isVoidTy()) { 8710 VPValue *OriginalV = Plan->getVPValue(Member); 8711 Plan->removeVPValueFor(Member); 8712 Plan->addVPValue(Member, VPIG->getVPValue(J)); 8713 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 8714 J++; 8715 } 8716 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 8717 } 8718 } 8719 8720 // Adjust the recipes for any inloop reductions. 8721 if (Range.Start.isVector()) 8722 adjustRecipesForInLoopReductions(Plan, RecipeBuilder); 8723 8724 // Finally, if tail is folded by masking, introduce selects between the phi 8725 // and the live-out instruction of each reduction, at the end of the latch. 8726 if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) { 8727 Builder.setInsertPoint(VPBB); 8728 auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 8729 for (auto &Reduction : Legal->getReductionVars()) { 8730 if (CM.isInLoopReduction(Reduction.first)) 8731 continue; 8732 VPValue *Phi = Plan->getOrAddVPValue(Reduction.first); 8733 VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr()); 8734 Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi}); 8735 } 8736 } 8737 8738 std::string PlanName; 8739 raw_string_ostream RSO(PlanName); 8740 ElementCount VF = Range.Start; 8741 Plan->addVF(VF); 8742 RSO << "Initial VPlan for VF={" << VF; 8743 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 8744 Plan->addVF(VF); 8745 RSO << "," << VF; 8746 } 8747 RSO << "},UF>=1"; 8748 RSO.flush(); 8749 Plan->setName(PlanName); 8750 8751 return Plan; 8752 } 8753 8754 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 8755 // Outer loop handling: They may require CFG and instruction level 8756 // transformations before even evaluating whether vectorization is profitable. 8757 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 8758 // the vectorization pipeline. 8759 assert(!OrigLoop->isInnermost()); 8760 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 8761 8762 // Create new empty VPlan 8763 auto Plan = std::make_unique<VPlan>(); 8764 8765 // Build hierarchical CFG 8766 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 8767 HCFGBuilder.buildHierarchicalCFG(); 8768 8769 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 8770 VF *= 2) 8771 Plan->addVF(VF); 8772 8773 if (EnableVPlanPredication) { 8774 VPlanPredicator VPP(*Plan); 8775 VPP.predicate(); 8776 8777 // Avoid running transformation to recipes until masked code generation in 8778 // VPlan-native path is in place. 8779 return Plan; 8780 } 8781 8782 SmallPtrSet<Instruction *, 1> DeadInstructions; 8783 VPlanTransforms::VPInstructionsToVPRecipes( 8784 OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions); 8785 return Plan; 8786 } 8787 8788 // Adjust the recipes for any inloop reductions. The chain of instructions 8789 // leading from the loop exit instr to the phi need to be converted to 8790 // reductions, with one operand being vector and the other being the scalar 8791 // reduction chain. 8792 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions( 8793 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) { 8794 for (auto &Reduction : CM.getInLoopReductionChains()) { 8795 PHINode *Phi = Reduction.first; 8796 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 8797 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8798 8799 // ReductionOperations are orders top-down from the phi's use to the 8800 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 8801 // which of the two operands will remain scalar and which will be reduced. 8802 // For minmax the chain will be the select instructions. 8803 Instruction *Chain = Phi; 8804 for (Instruction *R : ReductionOperations) { 8805 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 8806 RecurKind Kind = RdxDesc.getRecurrenceKind(); 8807 8808 VPValue *ChainOp = Plan->getVPValue(Chain); 8809 unsigned FirstOpId; 8810 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 8811 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 8812 "Expected to replace a VPWidenSelectSC"); 8813 FirstOpId = 1; 8814 } else { 8815 assert(isa<VPWidenRecipe>(WidenRecipe) && 8816 "Expected to replace a VPWidenSC"); 8817 FirstOpId = 0; 8818 } 8819 unsigned VecOpId = 8820 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 8821 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 8822 8823 auto *CondOp = CM.foldTailByMasking() 8824 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 8825 : nullptr; 8826 VPReductionRecipe *RedRecipe = new VPReductionRecipe( 8827 &RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 8828 WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe); 8829 Plan->removeVPValueFor(R); 8830 Plan->addVPValue(R, RedRecipe); 8831 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 8832 WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe); 8833 WidenRecipe->eraseFromParent(); 8834 8835 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 8836 VPRecipeBase *CompareRecipe = 8837 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 8838 assert(isa<VPWidenRecipe>(CompareRecipe) && 8839 "Expected to replace a VPWidenSC"); 8840 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 8841 "Expected no remaining users"); 8842 CompareRecipe->eraseFromParent(); 8843 } 8844 Chain = R; 8845 } 8846 } 8847 } 8848 8849 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 8850 VPSlotTracker &SlotTracker) const { 8851 O << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 8852 IG->getInsertPos()->printAsOperand(O, false); 8853 O << ", "; 8854 getAddr()->printAsOperand(O, SlotTracker); 8855 VPValue *Mask = getMask(); 8856 if (Mask) { 8857 O << ", "; 8858 Mask->printAsOperand(O, SlotTracker); 8859 } 8860 for (unsigned i = 0; i < IG->getFactor(); ++i) 8861 if (Instruction *I = IG->getMember(i)) 8862 O << "\\l\" +\n" << Indent << "\" " << VPlanIngredient(I) << " " << i; 8863 } 8864 8865 void VPWidenCallRecipe::execute(VPTransformState &State) { 8866 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 8867 *this, State); 8868 } 8869 8870 void VPWidenSelectRecipe::execute(VPTransformState &State) { 8871 State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), 8872 this, *this, InvariantCond, State); 8873 } 8874 8875 void VPWidenRecipe::execute(VPTransformState &State) { 8876 State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State); 8877 } 8878 8879 void VPWidenGEPRecipe::execute(VPTransformState &State) { 8880 State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, 8881 *this, State.UF, State.VF, IsPtrLoopInvariant, 8882 IsIndexLoopInvariant, State); 8883 } 8884 8885 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 8886 assert(!State.Instance && "Int or FP induction being replicated."); 8887 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 8888 getTruncInst(), getVPValue(0), 8889 getCastValue(), State); 8890 } 8891 8892 void VPWidenPHIRecipe::execute(VPTransformState &State) { 8893 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), RdxDesc, 8894 getStartValue(), this, State); 8895 } 8896 8897 void VPBlendRecipe::execute(VPTransformState &State) { 8898 State.ILV->setDebugLocFromInst(State.Builder, Phi); 8899 // We know that all PHIs in non-header blocks are converted into 8900 // selects, so we don't have to worry about the insertion order and we 8901 // can just use the builder. 8902 // At this point we generate the predication tree. There may be 8903 // duplications since this is a simple recursive scan, but future 8904 // optimizations will clean it up. 8905 8906 unsigned NumIncoming = getNumIncomingValues(); 8907 8908 // Generate a sequence of selects of the form: 8909 // SELECT(Mask3, In3, 8910 // SELECT(Mask2, In2, 8911 // SELECT(Mask1, In1, 8912 // In0))) 8913 // Note that Mask0 is never used: lanes for which no path reaches this phi and 8914 // are essentially undef are taken from In0. 8915 InnerLoopVectorizer::VectorParts Entry(State.UF); 8916 for (unsigned In = 0; In < NumIncoming; ++In) { 8917 for (unsigned Part = 0; Part < State.UF; ++Part) { 8918 // We might have single edge PHIs (blocks) - use an identity 8919 // 'select' for the first PHI operand. 8920 Value *In0 = State.get(getIncomingValue(In), Part); 8921 if (In == 0) 8922 Entry[Part] = In0; // Initialize with the first incoming value. 8923 else { 8924 // Select between the current value and the previous incoming edge 8925 // based on the incoming mask. 8926 Value *Cond = State.get(getMask(In), Part); 8927 Entry[Part] = 8928 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 8929 } 8930 } 8931 } 8932 for (unsigned Part = 0; Part < State.UF; ++Part) 8933 State.set(this, Entry[Part], Part); 8934 } 8935 8936 void VPInterleaveRecipe::execute(VPTransformState &State) { 8937 assert(!State.Instance && "Interleave group being replicated."); 8938 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 8939 getStoredValues(), getMask()); 8940 } 8941 8942 void VPReductionRecipe::execute(VPTransformState &State) { 8943 assert(!State.Instance && "Reduction being replicated."); 8944 for (unsigned Part = 0; Part < State.UF; ++Part) { 8945 RecurKind Kind = RdxDesc->getRecurrenceKind(); 8946 Value *NewVecOp = State.get(getVecOp(), Part); 8947 if (VPValue *Cond = getCondOp()) { 8948 Value *NewCond = State.get(Cond, Part); 8949 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 8950 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 8951 Kind, VecTy->getElementType()); 8952 Constant *IdenVec = 8953 ConstantVector::getSplat(VecTy->getElementCount(), Iden); 8954 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 8955 NewVecOp = Select; 8956 } 8957 Value *NewRed = 8958 createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 8959 Value *PrevInChain = State.get(getChainOp(), Part); 8960 Value *NextInChain; 8961 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 8962 NextInChain = 8963 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 8964 NewRed, PrevInChain); 8965 } else { 8966 NextInChain = State.Builder.CreateBinOp( 8967 (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed, 8968 PrevInChain); 8969 } 8970 State.set(this, NextInChain, Part); 8971 } 8972 } 8973 8974 void VPReplicateRecipe::execute(VPTransformState &State) { 8975 if (State.Instance) { // Generate a single instance. 8976 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 8977 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 8978 *State.Instance, IsPredicated, State); 8979 // Insert scalar instance packing it into a vector. 8980 if (AlsoPack && State.VF.isVector()) { 8981 // If we're constructing lane 0, initialize to start from poison. 8982 if (State.Instance->Lane == 0) { 8983 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 8984 Value *Poison = PoisonValue::get( 8985 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 8986 State.set(this, Poison, State.Instance->Part); 8987 } 8988 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 8989 } 8990 return; 8991 } 8992 8993 // Generate scalar instances for all VF lanes of all UF parts, unless the 8994 // instruction is uniform inwhich case generate only the first lane for each 8995 // of the UF parts. 8996 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 8997 assert((!State.VF.isScalable() || IsUniform) && 8998 "Can't scalarize a scalable vector"); 8999 for (unsigned Part = 0; Part < State.UF; ++Part) 9000 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9001 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9002 VPIteration(Part, Lane), IsPredicated, 9003 State); 9004 } 9005 9006 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9007 assert(State.Instance && "Branch on Mask works only on single instance."); 9008 9009 unsigned Part = State.Instance->Part; 9010 unsigned Lane = State.Instance->Lane; 9011 9012 Value *ConditionBit = nullptr; 9013 VPValue *BlockInMask = getMask(); 9014 if (BlockInMask) { 9015 ConditionBit = State.get(BlockInMask, Part); 9016 if (ConditionBit->getType()->isVectorTy()) 9017 ConditionBit = State.Builder.CreateExtractElement( 9018 ConditionBit, State.Builder.getInt32(Lane)); 9019 } else // Block in mask is all-one. 9020 ConditionBit = State.Builder.getTrue(); 9021 9022 // Replace the temporary unreachable terminator with a new conditional branch, 9023 // whose two destinations will be set later when they are created. 9024 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9025 assert(isa<UnreachableInst>(CurrentTerminator) && 9026 "Expected to replace unreachable terminator with conditional branch."); 9027 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9028 CondBr->setSuccessor(0, nullptr); 9029 ReplaceInstWithInst(CurrentTerminator, CondBr); 9030 } 9031 9032 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9033 assert(State.Instance && "Predicated instruction PHI works per instance."); 9034 Instruction *ScalarPredInst = 9035 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9036 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9037 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9038 assert(PredicatingBB && "Predicated block has no single predecessor."); 9039 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9040 "operand must be VPReplicateRecipe"); 9041 9042 // By current pack/unpack logic we need to generate only a single phi node: if 9043 // a vector value for the predicated instruction exists at this point it means 9044 // the instruction has vector users only, and a phi for the vector value is 9045 // needed. In this case the recipe of the predicated instruction is marked to 9046 // also do that packing, thereby "hoisting" the insert-element sequence. 9047 // Otherwise, a phi node for the scalar value is needed. 9048 unsigned Part = State.Instance->Part; 9049 if (State.hasVectorValue(getOperand(0), Part)) { 9050 Value *VectorValue = State.get(getOperand(0), Part); 9051 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9052 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9053 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9054 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9055 if (State.hasVectorValue(this, Part)) 9056 State.reset(this, VPhi, Part); 9057 else 9058 State.set(this, VPhi, Part); 9059 // NOTE: Currently we need to update the value of the operand, so the next 9060 // predicated iteration inserts its generated value in the correct vector. 9061 State.reset(getOperand(0), VPhi, Part); 9062 } else { 9063 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9064 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9065 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9066 PredicatingBB); 9067 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9068 if (State.hasScalarValue(this, *State.Instance)) 9069 State.reset(this, Phi, *State.Instance); 9070 else 9071 State.set(this, Phi, *State.Instance); 9072 // NOTE: Currently we need to update the value of the operand, so the next 9073 // predicated iteration inserts its generated value in the correct vector. 9074 State.reset(getOperand(0), Phi, *State.Instance); 9075 } 9076 } 9077 9078 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9079 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9080 State.ILV->vectorizeMemoryInstruction(&Ingredient, State, 9081 StoredValue ? nullptr : getVPValue(), 9082 getAddr(), StoredValue, getMask()); 9083 } 9084 9085 // Determine how to lower the scalar epilogue, which depends on 1) optimising 9086 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 9087 // predication, and 4) a TTI hook that analyses whether the loop is suitable 9088 // for predication. 9089 static ScalarEpilogueLowering getScalarEpilogueLowering( 9090 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 9091 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 9092 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 9093 LoopVectorizationLegality &LVL) { 9094 // 1) OptSize takes precedence over all other options, i.e. if this is set, 9095 // don't look at hints or options, and don't request a scalar epilogue. 9096 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 9097 // LoopAccessInfo (due to code dependency and not being able to reliably get 9098 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 9099 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 9100 // versioning when the vectorization is forced, unlike hasOptSize. So revert 9101 // back to the old way and vectorize with versioning when forced. See D81345.) 9102 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 9103 PGSOQueryType::IRPass) && 9104 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 9105 return CM_ScalarEpilogueNotAllowedOptSize; 9106 9107 // 2) If set, obey the directives 9108 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 9109 switch (PreferPredicateOverEpilogue) { 9110 case PreferPredicateTy::ScalarEpilogue: 9111 return CM_ScalarEpilogueAllowed; 9112 case PreferPredicateTy::PredicateElseScalarEpilogue: 9113 return CM_ScalarEpilogueNotNeededUsePredicate; 9114 case PreferPredicateTy::PredicateOrDontVectorize: 9115 return CM_ScalarEpilogueNotAllowedUsePredicate; 9116 }; 9117 } 9118 9119 // 3) If set, obey the hints 9120 switch (Hints.getPredicate()) { 9121 case LoopVectorizeHints::FK_Enabled: 9122 return CM_ScalarEpilogueNotNeededUsePredicate; 9123 case LoopVectorizeHints::FK_Disabled: 9124 return CM_ScalarEpilogueAllowed; 9125 }; 9126 9127 // 4) if the TTI hook indicates this is profitable, request predication. 9128 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 9129 LVL.getLAI())) 9130 return CM_ScalarEpilogueNotNeededUsePredicate; 9131 9132 return CM_ScalarEpilogueAllowed; 9133 } 9134 9135 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 9136 // If Values have been set for this Def return the one relevant for \p Part. 9137 if (hasVectorValue(Def, Part)) 9138 return Data.PerPartOutput[Def][Part]; 9139 9140 if (!hasScalarValue(Def, {Part, 0})) { 9141 Value *IRV = Def->getLiveInIRValue(); 9142 Value *B = ILV->getBroadcastInstrs(IRV); 9143 set(Def, B, Part); 9144 return B; 9145 } 9146 9147 Value *ScalarValue = get(Def, {Part, 0}); 9148 // If we aren't vectorizing, we can just copy the scalar map values over 9149 // to the vector map. 9150 if (VF.isScalar()) { 9151 set(Def, ScalarValue, Part); 9152 return ScalarValue; 9153 } 9154 9155 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 9156 bool IsUniform = RepR && RepR->isUniform(); 9157 9158 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 9159 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 9160 9161 // Set the insert point after the last scalarized instruction. This 9162 // ensures the insertelement sequence will directly follow the scalar 9163 // definitions. 9164 auto OldIP = Builder.saveIP(); 9165 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 9166 Builder.SetInsertPoint(&*NewIP); 9167 9168 // However, if we are vectorizing, we need to construct the vector values. 9169 // If the value is known to be uniform after vectorization, we can just 9170 // broadcast the scalar value corresponding to lane zero for each unroll 9171 // iteration. Otherwise, we construct the vector values using 9172 // insertelement instructions. Since the resulting vectors are stored in 9173 // State, we will only generate the insertelements once. 9174 Value *VectorValue = nullptr; 9175 if (IsUniform) { 9176 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 9177 set(Def, VectorValue, Part); 9178 } else { 9179 // Initialize packing with insertelements to start from undef. 9180 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 9181 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 9182 set(Def, Undef, Part); 9183 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 9184 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 9185 VectorValue = get(Def, Part); 9186 } 9187 Builder.restoreIP(OldIP); 9188 return VectorValue; 9189 } 9190 9191 // Process the loop in the VPlan-native vectorization path. This path builds 9192 // VPlan upfront in the vectorization pipeline, which allows to apply 9193 // VPlan-to-VPlan transformations from the very beginning without modifying the 9194 // input LLVM IR. 9195 static bool processLoopInVPlanNativePath( 9196 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 9197 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 9198 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 9199 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 9200 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) { 9201 9202 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 9203 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 9204 return false; 9205 } 9206 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 9207 Function *F = L->getHeader()->getParent(); 9208 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 9209 9210 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9211 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 9212 9213 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 9214 &Hints, IAI); 9215 // Use the planner for outer loop vectorization. 9216 // TODO: CM is not used at this point inside the planner. Turn CM into an 9217 // optional argument if we don't need it in the future. 9218 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE); 9219 9220 // Get user vectorization factor. 9221 ElementCount UserVF = Hints.getWidth(); 9222 9223 // Plan how to best vectorize, return the best VF and its cost. 9224 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 9225 9226 // If we are stress testing VPlan builds, do not attempt to generate vector 9227 // code. Masked vector code generation support will follow soon. 9228 // Also, do not attempt to vectorize if no vector code will be produced. 9229 if (VPlanBuildStressTest || EnableVPlanPredication || 9230 VectorizationFactor::Disabled() == VF) 9231 return false; 9232 9233 LVP.setBestPlan(VF.Width, 1); 9234 9235 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 9236 &CM, BFI, PSI); 9237 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 9238 << L->getHeader()->getParent()->getName() << "\"\n"); 9239 LVP.executePlan(LB, DT); 9240 9241 // Mark the loop as already vectorized to avoid vectorizing again. 9242 Hints.setAlreadyVectorized(); 9243 9244 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9245 return true; 9246 } 9247 9248 // Emit a remark if there are stores to floats that required a floating point 9249 // extension. If the vectorized loop was generated with floating point there 9250 // will be a performance penalty from the conversion overhead and the change in 9251 // the vector width. 9252 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 9253 SmallVector<Instruction *, 4> Worklist; 9254 for (BasicBlock *BB : L->getBlocks()) { 9255 for (Instruction &Inst : *BB) { 9256 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 9257 if (S->getValueOperand()->getType()->isFloatTy()) 9258 Worklist.push_back(S); 9259 } 9260 } 9261 } 9262 9263 // Traverse the floating point stores upwards searching, for floating point 9264 // conversions. 9265 SmallPtrSet<const Instruction *, 4> Visited; 9266 SmallPtrSet<const Instruction *, 4> EmittedRemark; 9267 while (!Worklist.empty()) { 9268 auto *I = Worklist.pop_back_val(); 9269 if (!L->contains(I)) 9270 continue; 9271 if (!Visited.insert(I).second) 9272 continue; 9273 9274 // Emit a remark if the floating point store required a floating 9275 // point conversion. 9276 // TODO: More work could be done to identify the root cause such as a 9277 // constant or a function return type and point the user to it. 9278 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 9279 ORE->emit([&]() { 9280 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 9281 I->getDebugLoc(), L->getHeader()) 9282 << "floating point conversion changes vector width. " 9283 << "Mixed floating point precision requires an up/down " 9284 << "cast that will negatively impact performance."; 9285 }); 9286 9287 for (Use &Op : I->operands()) 9288 if (auto *OpI = dyn_cast<Instruction>(Op)) 9289 Worklist.push_back(OpI); 9290 } 9291 } 9292 9293 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 9294 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 9295 !EnableLoopInterleaving), 9296 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 9297 !EnableLoopVectorization) {} 9298 9299 bool LoopVectorizePass::processLoop(Loop *L) { 9300 assert((EnableVPlanNativePath || L->isInnermost()) && 9301 "VPlan-native path is not enabled. Only process inner loops."); 9302 9303 #ifndef NDEBUG 9304 const std::string DebugLocStr = getDebugLocString(L); 9305 #endif /* NDEBUG */ 9306 9307 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 9308 << L->getHeader()->getParent()->getName() << "\" from " 9309 << DebugLocStr << "\n"); 9310 9311 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 9312 9313 LLVM_DEBUG( 9314 dbgs() << "LV: Loop hints:" 9315 << " force=" 9316 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 9317 ? "disabled" 9318 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 9319 ? "enabled" 9320 : "?")) 9321 << " width=" << Hints.getWidth() 9322 << " unroll=" << Hints.getInterleave() << "\n"); 9323 9324 // Function containing loop 9325 Function *F = L->getHeader()->getParent(); 9326 9327 // Looking at the diagnostic output is the only way to determine if a loop 9328 // was vectorized (other than looking at the IR or machine code), so it 9329 // is important to generate an optimization remark for each loop. Most of 9330 // these messages are generated as OptimizationRemarkAnalysis. Remarks 9331 // generated as OptimizationRemark and OptimizationRemarkMissed are 9332 // less verbose reporting vectorized loops and unvectorized loops that may 9333 // benefit from vectorization, respectively. 9334 9335 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 9336 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 9337 return false; 9338 } 9339 9340 PredicatedScalarEvolution PSE(*SE, *L); 9341 9342 // Check if it is legal to vectorize the loop. 9343 LoopVectorizationRequirements Requirements(*ORE); 9344 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 9345 &Requirements, &Hints, DB, AC, BFI, PSI); 9346 if (!LVL.canVectorize(EnableVPlanNativePath)) { 9347 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 9348 Hints.emitRemarkWithHints(); 9349 return false; 9350 } 9351 9352 // Check the function attributes and profiles to find out if this function 9353 // should be optimized for size. 9354 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9355 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 9356 9357 // Entrance to the VPlan-native vectorization path. Outer loops are processed 9358 // here. They may require CFG and instruction level transformations before 9359 // even evaluating whether vectorization is profitable. Since we cannot modify 9360 // the incoming IR, we need to build VPlan upfront in the vectorization 9361 // pipeline. 9362 if (!L->isInnermost()) 9363 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 9364 ORE, BFI, PSI, Hints); 9365 9366 assert(L->isInnermost() && "Inner loop expected."); 9367 9368 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 9369 // count by optimizing for size, to minimize overheads. 9370 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 9371 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 9372 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 9373 << "This loop is worth vectorizing only if no scalar " 9374 << "iteration overheads are incurred."); 9375 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 9376 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 9377 else { 9378 LLVM_DEBUG(dbgs() << "\n"); 9379 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 9380 } 9381 } 9382 9383 // Check the function attributes to see if implicit floats are allowed. 9384 // FIXME: This check doesn't seem possibly correct -- what if the loop is 9385 // an integer loop and the vector instructions selected are purely integer 9386 // vector instructions? 9387 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 9388 reportVectorizationFailure( 9389 "Can't vectorize when the NoImplicitFloat attribute is used", 9390 "loop not vectorized due to NoImplicitFloat attribute", 9391 "NoImplicitFloat", ORE, L); 9392 Hints.emitRemarkWithHints(); 9393 return false; 9394 } 9395 9396 // Check if the target supports potentially unsafe FP vectorization. 9397 // FIXME: Add a check for the type of safety issue (denormal, signaling) 9398 // for the target we're vectorizing for, to make sure none of the 9399 // additional fp-math flags can help. 9400 if (Hints.isPotentiallyUnsafe() && 9401 TTI->isFPVectorizationPotentiallyUnsafe()) { 9402 reportVectorizationFailure( 9403 "Potentially unsafe FP op prevents vectorization", 9404 "loop not vectorized due to unsafe FP support.", 9405 "UnsafeFP", ORE, L); 9406 Hints.emitRemarkWithHints(); 9407 return false; 9408 } 9409 9410 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 9411 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 9412 9413 // If an override option has been passed in for interleaved accesses, use it. 9414 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 9415 UseInterleaved = EnableInterleavedMemAccesses; 9416 9417 // Analyze interleaved memory accesses. 9418 if (UseInterleaved) { 9419 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 9420 } 9421 9422 // Use the cost model. 9423 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 9424 F, &Hints, IAI); 9425 CM.collectValuesToIgnore(); 9426 9427 // Use the planner for vectorization. 9428 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE); 9429 9430 // Get user vectorization factor and interleave count. 9431 ElementCount UserVF = Hints.getWidth(); 9432 unsigned UserIC = Hints.getInterleave(); 9433 9434 // Plan how to best vectorize, return the best VF and its cost. 9435 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 9436 9437 VectorizationFactor VF = VectorizationFactor::Disabled(); 9438 unsigned IC = 1; 9439 9440 if (MaybeVF) { 9441 VF = *MaybeVF; 9442 // Select the interleave count. 9443 IC = CM.selectInterleaveCount(VF.Width, VF.Cost); 9444 } 9445 9446 // Identify the diagnostic messages that should be produced. 9447 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 9448 bool VectorizeLoop = true, InterleaveLoop = true; 9449 if (Requirements.doesNotMeet(F, L, Hints)) { 9450 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 9451 "requirements.\n"); 9452 Hints.emitRemarkWithHints(); 9453 return false; 9454 } 9455 9456 if (VF.Width.isScalar()) { 9457 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 9458 VecDiagMsg = std::make_pair( 9459 "VectorizationNotBeneficial", 9460 "the cost-model indicates that vectorization is not beneficial"); 9461 VectorizeLoop = false; 9462 } 9463 9464 if (!MaybeVF && UserIC > 1) { 9465 // Tell the user interleaving was avoided up-front, despite being explicitly 9466 // requested. 9467 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 9468 "interleaving should be avoided up front\n"); 9469 IntDiagMsg = std::make_pair( 9470 "InterleavingAvoided", 9471 "Ignoring UserIC, because interleaving was avoided up front"); 9472 InterleaveLoop = false; 9473 } else if (IC == 1 && UserIC <= 1) { 9474 // Tell the user interleaving is not beneficial. 9475 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 9476 IntDiagMsg = std::make_pair( 9477 "InterleavingNotBeneficial", 9478 "the cost-model indicates that interleaving is not beneficial"); 9479 InterleaveLoop = false; 9480 if (UserIC == 1) { 9481 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 9482 IntDiagMsg.second += 9483 " and is explicitly disabled or interleave count is set to 1"; 9484 } 9485 } else if (IC > 1 && UserIC == 1) { 9486 // Tell the user interleaving is beneficial, but it explicitly disabled. 9487 LLVM_DEBUG( 9488 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 9489 IntDiagMsg = std::make_pair( 9490 "InterleavingBeneficialButDisabled", 9491 "the cost-model indicates that interleaving is beneficial " 9492 "but is explicitly disabled or interleave count is set to 1"); 9493 InterleaveLoop = false; 9494 } 9495 9496 // Override IC if user provided an interleave count. 9497 IC = UserIC > 0 ? UserIC : IC; 9498 9499 // Emit diagnostic messages, if any. 9500 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 9501 if (!VectorizeLoop && !InterleaveLoop) { 9502 // Do not vectorize or interleaving the loop. 9503 ORE->emit([&]() { 9504 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 9505 L->getStartLoc(), L->getHeader()) 9506 << VecDiagMsg.second; 9507 }); 9508 ORE->emit([&]() { 9509 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 9510 L->getStartLoc(), L->getHeader()) 9511 << IntDiagMsg.second; 9512 }); 9513 return false; 9514 } else if (!VectorizeLoop && InterleaveLoop) { 9515 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 9516 ORE->emit([&]() { 9517 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 9518 L->getStartLoc(), L->getHeader()) 9519 << VecDiagMsg.second; 9520 }); 9521 } else if (VectorizeLoop && !InterleaveLoop) { 9522 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 9523 << ") in " << DebugLocStr << '\n'); 9524 ORE->emit([&]() { 9525 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 9526 L->getStartLoc(), L->getHeader()) 9527 << IntDiagMsg.second; 9528 }); 9529 } else if (VectorizeLoop && InterleaveLoop) { 9530 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 9531 << ") in " << DebugLocStr << '\n'); 9532 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 9533 } 9534 9535 LVP.setBestPlan(VF.Width, IC); 9536 9537 using namespace ore; 9538 bool DisableRuntimeUnroll = false; 9539 MDNode *OrigLoopID = L->getLoopID(); 9540 9541 if (!VectorizeLoop) { 9542 assert(IC > 1 && "interleave count should not be 1 or 0"); 9543 // If we decided that it is not legal to vectorize the loop, then 9544 // interleave it. 9545 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, &CM, 9546 BFI, PSI); 9547 LVP.executePlan(Unroller, DT); 9548 9549 ORE->emit([&]() { 9550 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 9551 L->getHeader()) 9552 << "interleaved loop (interleaved count: " 9553 << NV("InterleaveCount", IC) << ")"; 9554 }); 9555 } else { 9556 // If we decided that it is *legal* to vectorize the loop, then do it. 9557 9558 // Consider vectorizing the epilogue too if it's profitable. 9559 VectorizationFactor EpilogueVF = 9560 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 9561 if (EpilogueVF.Width.isVector()) { 9562 9563 // The first pass vectorizes the main loop and creates a scalar epilogue 9564 // to be vectorized by executing the plan (potentially with a different 9565 // factor) again shortly afterwards. 9566 EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC, 9567 EpilogueVF.Width.getKnownMinValue(), 1); 9568 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, EPI, 9569 &LVL, &CM, BFI, PSI); 9570 9571 LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF); 9572 LVP.executePlan(MainILV, DT); 9573 ++LoopsVectorized; 9574 9575 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 9576 formLCSSARecursively(*L, *DT, LI, SE); 9577 9578 // Second pass vectorizes the epilogue and adjusts the control flow 9579 // edges from the first pass. 9580 LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF); 9581 EPI.MainLoopVF = EPI.EpilogueVF; 9582 EPI.MainLoopUF = EPI.EpilogueUF; 9583 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 9584 ORE, EPI, &LVL, &CM, BFI, PSI); 9585 LVP.executePlan(EpilogILV, DT); 9586 ++LoopsEpilogueVectorized; 9587 9588 if (!MainILV.areSafetyChecksAdded()) 9589 DisableRuntimeUnroll = true; 9590 } else { 9591 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 9592 &LVL, &CM, BFI, PSI); 9593 LVP.executePlan(LB, DT); 9594 ++LoopsVectorized; 9595 9596 // Add metadata to disable runtime unrolling a scalar loop when there are 9597 // no runtime checks about strides and memory. A scalar loop that is 9598 // rarely used is not worth unrolling. 9599 if (!LB.areSafetyChecksAdded()) 9600 DisableRuntimeUnroll = true; 9601 } 9602 9603 // Report the vectorization decision. 9604 ORE->emit([&]() { 9605 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 9606 L->getHeader()) 9607 << "vectorized loop (vectorization width: " 9608 << NV("VectorizationFactor", VF.Width) 9609 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 9610 }); 9611 9612 if (ORE->allowExtraAnalysis(LV_NAME)) 9613 checkMixedPrecision(L, ORE); 9614 } 9615 9616 Optional<MDNode *> RemainderLoopID = 9617 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 9618 LLVMLoopVectorizeFollowupEpilogue}); 9619 if (RemainderLoopID.hasValue()) { 9620 L->setLoopID(RemainderLoopID.getValue()); 9621 } else { 9622 if (DisableRuntimeUnroll) 9623 AddRuntimeUnrollDisableMetaData(L); 9624 9625 // Mark the loop as already vectorized to avoid vectorizing again. 9626 Hints.setAlreadyVectorized(); 9627 } 9628 9629 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9630 return true; 9631 } 9632 9633 LoopVectorizeResult LoopVectorizePass::runImpl( 9634 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 9635 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 9636 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 9637 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 9638 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 9639 SE = &SE_; 9640 LI = &LI_; 9641 TTI = &TTI_; 9642 DT = &DT_; 9643 BFI = &BFI_; 9644 TLI = TLI_; 9645 AA = &AA_; 9646 AC = &AC_; 9647 GetLAA = &GetLAA_; 9648 DB = &DB_; 9649 ORE = &ORE_; 9650 PSI = PSI_; 9651 9652 // Don't attempt if 9653 // 1. the target claims to have no vector registers, and 9654 // 2. interleaving won't help ILP. 9655 // 9656 // The second condition is necessary because, even if the target has no 9657 // vector registers, loop vectorization may still enable scalar 9658 // interleaving. 9659 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 9660 TTI->getMaxInterleaveFactor(1) < 2) 9661 return LoopVectorizeResult(false, false); 9662 9663 bool Changed = false, CFGChanged = false; 9664 9665 // The vectorizer requires loops to be in simplified form. 9666 // Since simplification may add new inner loops, it has to run before the 9667 // legality and profitability checks. This means running the loop vectorizer 9668 // will simplify all loops, regardless of whether anything end up being 9669 // vectorized. 9670 for (auto &L : *LI) 9671 Changed |= CFGChanged |= 9672 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 9673 9674 // Build up a worklist of inner-loops to vectorize. This is necessary as 9675 // the act of vectorizing or partially unrolling a loop creates new loops 9676 // and can invalidate iterators across the loops. 9677 SmallVector<Loop *, 8> Worklist; 9678 9679 for (Loop *L : *LI) 9680 collectSupportedLoops(*L, LI, ORE, Worklist); 9681 9682 LoopsAnalyzed += Worklist.size(); 9683 9684 // Now walk the identified inner loops. 9685 while (!Worklist.empty()) { 9686 Loop *L = Worklist.pop_back_val(); 9687 9688 // For the inner loops we actually process, form LCSSA to simplify the 9689 // transform. 9690 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 9691 9692 Changed |= CFGChanged |= processLoop(L); 9693 } 9694 9695 // Process each loop nest in the function. 9696 return LoopVectorizeResult(Changed, CFGChanged); 9697 } 9698 9699 PreservedAnalyses LoopVectorizePass::run(Function &F, 9700 FunctionAnalysisManager &AM) { 9701 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 9702 auto &LI = AM.getResult<LoopAnalysis>(F); 9703 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 9704 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 9705 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 9706 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 9707 auto &AA = AM.getResult<AAManager>(F); 9708 auto &AC = AM.getResult<AssumptionAnalysis>(F); 9709 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 9710 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 9711 MemorySSA *MSSA = EnableMSSALoopDependency 9712 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 9713 : nullptr; 9714 9715 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 9716 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 9717 [&](Loop &L) -> const LoopAccessInfo & { 9718 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 9719 TLI, TTI, nullptr, MSSA}; 9720 return LAM.getResult<LoopAccessAnalysis>(L, AR); 9721 }; 9722 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 9723 ProfileSummaryInfo *PSI = 9724 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 9725 LoopVectorizeResult Result = 9726 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 9727 if (!Result.MadeAnyChange) 9728 return PreservedAnalyses::all(); 9729 PreservedAnalyses PA; 9730 9731 // We currently do not preserve loopinfo/dominator analyses with outer loop 9732 // vectorization. Until this is addressed, mark these analyses as preserved 9733 // only for non-VPlan-native path. 9734 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 9735 if (!EnableVPlanNativePath) { 9736 PA.preserve<LoopAnalysis>(); 9737 PA.preserve<DominatorTreeAnalysis>(); 9738 } 9739 PA.preserve<BasicAA>(); 9740 PA.preserve<GlobalsAA>(); 9741 if (!Result.MadeCFGChange) 9742 PA.preserveSet<CFGAnalyses>(); 9743 return PA; 9744 } 9745