1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/MemorySSA.h" 91 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 92 #include "llvm/Analysis/ProfileSummaryInfo.h" 93 #include "llvm/Analysis/ScalarEvolution.h" 94 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 95 #include "llvm/Analysis/TargetLibraryInfo.h" 96 #include "llvm/Analysis/TargetTransformInfo.h" 97 #include "llvm/Analysis/VectorUtils.h" 98 #include "llvm/IR/Attributes.h" 99 #include "llvm/IR/BasicBlock.h" 100 #include "llvm/IR/CFG.h" 101 #include "llvm/IR/Constant.h" 102 #include "llvm/IR/Constants.h" 103 #include "llvm/IR/DataLayout.h" 104 #include "llvm/IR/DebugInfoMetadata.h" 105 #include "llvm/IR/DebugLoc.h" 106 #include "llvm/IR/DerivedTypes.h" 107 #include "llvm/IR/DiagnosticInfo.h" 108 #include "llvm/IR/Dominators.h" 109 #include "llvm/IR/Function.h" 110 #include "llvm/IR/IRBuilder.h" 111 #include "llvm/IR/InstrTypes.h" 112 #include "llvm/IR/Instruction.h" 113 #include "llvm/IR/Instructions.h" 114 #include "llvm/IR/IntrinsicInst.h" 115 #include "llvm/IR/Intrinsics.h" 116 #include "llvm/IR/LLVMContext.h" 117 #include "llvm/IR/Metadata.h" 118 #include "llvm/IR/Module.h" 119 #include "llvm/IR/Operator.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 202 // that predication is preferred, and this lists all options. I.e., the 203 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 204 // and predicate the instructions accordingly. If tail-folding fails, there are 205 // different fallback strategies depending on these values: 206 namespace PreferPredicateTy { 207 enum Option { 208 ScalarEpilogue = 0, 209 PredicateElseScalarEpilogue, 210 PredicateOrDontVectorize 211 }; 212 } // namespace PreferPredicateTy 213 214 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 215 "prefer-predicate-over-epilogue", 216 cl::init(PreferPredicateTy::ScalarEpilogue), 217 cl::Hidden, 218 cl::desc("Tail-folding and predication preferences over creating a scalar " 219 "epilogue loop."), 220 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 221 "scalar-epilogue", 222 "Don't tail-predicate loops, create scalar epilogue"), 223 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 224 "predicate-else-scalar-epilogue", 225 "prefer tail-folding, create scalar epilogue if tail " 226 "folding fails."), 227 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 228 "predicate-dont-vectorize", 229 "prefers tail-folding, don't attempt vectorization if " 230 "tail-folding fails."))); 231 232 static cl::opt<bool> MaximizeBandwidth( 233 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 234 cl::desc("Maximize bandwidth when selecting vectorization factor which " 235 "will be determined by the smallest type in loop.")); 236 237 static cl::opt<bool> EnableInterleavedMemAccesses( 238 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 239 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 240 241 /// An interleave-group may need masking if it resides in a block that needs 242 /// predication, or in order to mask away gaps. 243 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 244 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 245 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 246 247 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 248 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 249 cl::desc("We don't interleave loops with a estimated constant trip count " 250 "below this number")); 251 252 static cl::opt<unsigned> ForceTargetNumScalarRegs( 253 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 254 cl::desc("A flag that overrides the target's number of scalar registers.")); 255 256 static cl::opt<unsigned> ForceTargetNumVectorRegs( 257 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 258 cl::desc("A flag that overrides the target's number of vector registers.")); 259 260 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 261 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 262 cl::desc("A flag that overrides the target's max interleave factor for " 263 "scalar loops.")); 264 265 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 266 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "vectorized loops.")); 269 270 static cl::opt<unsigned> ForceTargetInstructionCost( 271 "force-target-instruction-cost", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's expected cost for " 273 "an instruction to a single constant value. Mostly " 274 "useful for getting consistent testing.")); 275 276 static cl::opt<bool> ForceTargetSupportsScalableVectors( 277 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 278 cl::desc( 279 "Pretend that scalable vectors are supported, even if the target does " 280 "not support them. This flag should only be used for testing.")); 281 282 static cl::opt<unsigned> SmallLoopCost( 283 "small-loop-cost", cl::init(20), cl::Hidden, 284 cl::desc( 285 "The cost of a loop that is considered 'small' by the interleaver.")); 286 287 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 288 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 289 cl::desc("Enable the use of the block frequency analysis to access PGO " 290 "heuristics minimizing code growth in cold regions and being more " 291 "aggressive in hot regions.")); 292 293 // Runtime interleave loops for load/store throughput. 294 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 295 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 296 cl::desc( 297 "Enable runtime interleaving until load/store ports are saturated")); 298 299 /// Interleave small loops with scalar reductions. 300 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 301 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 302 cl::desc("Enable interleaving for loops with small iteration counts that " 303 "contain scalar reductions to expose ILP.")); 304 305 /// The number of stores in a loop that are allowed to need predication. 306 static cl::opt<unsigned> NumberOfStoresToPredicate( 307 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 308 cl::desc("Max number of stores to be predicated behind an if.")); 309 310 static cl::opt<bool> EnableIndVarRegisterHeur( 311 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 312 cl::desc("Count the induction variable only once when interleaving")); 313 314 static cl::opt<bool> EnableCondStoresVectorization( 315 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 316 cl::desc("Enable if predication of stores during vectorization.")); 317 318 static cl::opt<unsigned> MaxNestedScalarReductionIC( 319 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 320 cl::desc("The maximum interleave count to use when interleaving a scalar " 321 "reduction in a nested loop.")); 322 323 static cl::opt<bool> 324 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 325 cl::Hidden, 326 cl::desc("Prefer in-loop vector reductions, " 327 "overriding the targets preference.")); 328 329 static cl::opt<bool> PreferPredicatedReductionSelect( 330 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 331 cl::desc( 332 "Prefer predicating a reduction operation over an after loop select.")); 333 334 cl::opt<bool> EnableVPlanNativePath( 335 "enable-vplan-native-path", cl::init(false), cl::Hidden, 336 cl::desc("Enable VPlan-native vectorization path with " 337 "support for outer loop vectorization.")); 338 339 // FIXME: Remove this switch once we have divergence analysis. Currently we 340 // assume divergent non-backedge branches when this switch is true. 341 cl::opt<bool> EnableVPlanPredication( 342 "enable-vplan-predication", cl::init(false), cl::Hidden, 343 cl::desc("Enable VPlan-native vectorization path predicator with " 344 "support for outer loop vectorization.")); 345 346 // This flag enables the stress testing of the VPlan H-CFG construction in the 347 // VPlan-native vectorization path. It must be used in conjuction with 348 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 349 // verification of the H-CFGs built. 350 static cl::opt<bool> VPlanBuildStressTest( 351 "vplan-build-stress-test", cl::init(false), cl::Hidden, 352 cl::desc( 353 "Build VPlan for every supported loop nest in the function and bail " 354 "out right after the build (stress test the VPlan H-CFG construction " 355 "in the VPlan-native vectorization path).")); 356 357 cl::opt<bool> llvm::EnableLoopInterleaving( 358 "interleave-loops", cl::init(true), cl::Hidden, 359 cl::desc("Enable loop interleaving in Loop vectorization passes")); 360 cl::opt<bool> llvm::EnableLoopVectorization( 361 "vectorize-loops", cl::init(true), cl::Hidden, 362 cl::desc("Run the Loop vectorization passes")); 363 364 /// A helper function that returns the type of loaded or stored value. 365 static Type *getMemInstValueType(Value *I) { 366 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 367 "Expected Load or Store instruction"); 368 if (auto *LI = dyn_cast<LoadInst>(I)) 369 return LI->getType(); 370 return cast<StoreInst>(I)->getValueOperand()->getType(); 371 } 372 373 /// A helper function that returns true if the given type is irregular. The 374 /// type is irregular if its allocated size doesn't equal the store size of an 375 /// element of the corresponding vector type at the given vectorization factor. 376 static bool hasIrregularType(Type *Ty, const DataLayout &DL, ElementCount VF) { 377 // Determine if an array of VF elements of type Ty is "bitcast compatible" 378 // with a <VF x Ty> vector. 379 if (VF.isVector()) { 380 auto *VectorTy = VectorType::get(Ty, VF); 381 return TypeSize::get(VF.getKnownMinValue() * 382 DL.getTypeAllocSize(Ty).getFixedValue(), 383 VF.isScalable()) != DL.getTypeStoreSize(VectorTy); 384 } 385 386 // If the vectorization factor is one, we just check if an array of type Ty 387 // requires padding between elements. 388 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 389 } 390 391 /// A helper function that returns the reciprocal of the block probability of 392 /// predicated blocks. If we return X, we are assuming the predicated block 393 /// will execute once for every X iterations of the loop header. 394 /// 395 /// TODO: We should use actual block probability here, if available. Currently, 396 /// we always assume predicated blocks have a 50% chance of executing. 397 static unsigned getReciprocalPredBlockProb() { return 2; } 398 399 /// A helper function that adds a 'fast' flag to floating-point operations. 400 static Value *addFastMathFlag(Value *V) { 401 if (isa<FPMathOperator>(V)) 402 cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast()); 403 return V; 404 } 405 406 /// A helper function that returns an integer or floating-point constant with 407 /// value C. 408 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 409 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 410 : ConstantFP::get(Ty, C); 411 } 412 413 /// Returns "best known" trip count for the specified loop \p L as defined by 414 /// the following procedure: 415 /// 1) Returns exact trip count if it is known. 416 /// 2) Returns expected trip count according to profile data if any. 417 /// 3) Returns upper bound estimate if it is known. 418 /// 4) Returns None if all of the above failed. 419 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 420 // Check if exact trip count is known. 421 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 422 return ExpectedTC; 423 424 // Check if there is an expected trip count available from profile data. 425 if (LoopVectorizeWithBlockFrequency) 426 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 427 return EstimatedTC; 428 429 // Check if upper bound estimate is known. 430 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 431 return ExpectedTC; 432 433 return None; 434 } 435 436 namespace llvm { 437 438 /// InnerLoopVectorizer vectorizes loops which contain only one basic 439 /// block to a specified vectorization factor (VF). 440 /// This class performs the widening of scalars into vectors, or multiple 441 /// scalars. This class also implements the following features: 442 /// * It inserts an epilogue loop for handling loops that don't have iteration 443 /// counts that are known to be a multiple of the vectorization factor. 444 /// * It handles the code generation for reduction variables. 445 /// * Scalarization (implementation using scalars) of un-vectorizable 446 /// instructions. 447 /// InnerLoopVectorizer does not perform any vectorization-legality 448 /// checks, and relies on the caller to check for the different legality 449 /// aspects. The InnerLoopVectorizer relies on the 450 /// LoopVectorizationLegality class to provide information about the induction 451 /// and reduction variables that were found to a given vectorization factor. 452 class InnerLoopVectorizer { 453 public: 454 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 455 LoopInfo *LI, DominatorTree *DT, 456 const TargetLibraryInfo *TLI, 457 const TargetTransformInfo *TTI, AssumptionCache *AC, 458 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 459 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 460 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 461 ProfileSummaryInfo *PSI) 462 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 463 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 464 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 465 PSI(PSI) { 466 // Query this against the original loop and save it here because the profile 467 // of the original loop header may change as the transformation happens. 468 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 469 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 470 } 471 472 virtual ~InnerLoopVectorizer() = default; 473 474 /// Create a new empty loop that will contain vectorized instructions later 475 /// on, while the old loop will be used as the scalar remainder. Control flow 476 /// is generated around the vectorized (and scalar epilogue) loops consisting 477 /// of various checks and bypasses. Return the pre-header block of the new 478 /// loop. 479 /// In the case of epilogue vectorization, this function is overriden to 480 /// handle the more complex control flow around the loops. 481 virtual BasicBlock *createVectorizedLoopSkeleton(); 482 483 /// Widen a single instruction within the innermost loop. 484 void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands, 485 VPTransformState &State); 486 487 /// Widen a single call instruction within the innermost loop. 488 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 489 VPTransformState &State); 490 491 /// Widen a single select instruction within the innermost loop. 492 void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, 493 bool InvariantCond, VPTransformState &State); 494 495 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 496 void fixVectorizedLoop(VPTransformState &State); 497 498 // Return true if any runtime check is added. 499 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 500 501 /// A type for vectorized values in the new loop. Each value from the 502 /// original loop, when vectorized, is represented by UF vector values in the 503 /// new unrolled loop, where UF is the unroll factor. 504 using VectorParts = SmallVector<Value *, 2>; 505 506 /// Vectorize a single GetElementPtrInst based on information gathered and 507 /// decisions taken during planning. 508 void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices, 509 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant, 510 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); 511 512 /// Vectorize a single PHINode in a block. This method handles the induction 513 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 514 /// arbitrary length vectors. 515 void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc, 516 VPValue *StartV, VPValue *Def, 517 VPTransformState &State); 518 519 /// A helper function to scalarize a single Instruction in the innermost loop. 520 /// Generates a sequence of scalar instances for each lane between \p MinLane 521 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 522 /// inclusive. Uses the VPValue operands from \p Operands instead of \p 523 /// Instr's operands. 524 void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands, 525 const VPIteration &Instance, bool IfPredicateInstr, 526 VPTransformState &State); 527 528 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 529 /// is provided, the integer induction variable will first be truncated to 530 /// the corresponding type. 531 void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, 532 VPValue *Def, VPValue *CastDef, 533 VPTransformState &State); 534 535 /// Construct the vector value of a scalarized value \p V one lane at a time. 536 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 537 VPTransformState &State); 538 539 /// Try to vectorize interleaved access group \p Group with the base address 540 /// given in \p Addr, optionally masking the vector operations if \p 541 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 542 /// values in the vectorized loop. 543 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 544 ArrayRef<VPValue *> VPDefs, 545 VPTransformState &State, VPValue *Addr, 546 ArrayRef<VPValue *> StoredValues, 547 VPValue *BlockInMask = nullptr); 548 549 /// Vectorize Load and Store instructions with the base address given in \p 550 /// Addr, optionally masking the vector operations if \p BlockInMask is 551 /// non-null. Use \p State to translate given VPValues to IR values in the 552 /// vectorized loop. 553 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 554 VPValue *Def, VPValue *Addr, 555 VPValue *StoredValue, VPValue *BlockInMask); 556 557 /// Set the debug location in the builder using the debug location in 558 /// the instruction. 559 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 560 561 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 562 void fixNonInductionPHIs(VPTransformState &State); 563 564 /// Create a broadcast instruction. This method generates a broadcast 565 /// instruction (shuffle) for loop invariant values and for the induction 566 /// value. If this is the induction variable then we extend it to N, N+1, ... 567 /// this is needed because each iteration in the loop corresponds to a SIMD 568 /// element. 569 virtual Value *getBroadcastInstrs(Value *V); 570 571 protected: 572 friend class LoopVectorizationPlanner; 573 574 /// A small list of PHINodes. 575 using PhiVector = SmallVector<PHINode *, 4>; 576 577 /// A type for scalarized values in the new loop. Each value from the 578 /// original loop, when scalarized, is represented by UF x VF scalar values 579 /// in the new unrolled loop, where UF is the unroll factor and VF is the 580 /// vectorization factor. 581 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 582 583 /// Set up the values of the IVs correctly when exiting the vector loop. 584 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 585 Value *CountRoundDown, Value *EndValue, 586 BasicBlock *MiddleBlock); 587 588 /// Create a new induction variable inside L. 589 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 590 Value *Step, Instruction *DL); 591 592 /// Handle all cross-iteration phis in the header. 593 void fixCrossIterationPHIs(VPTransformState &State); 594 595 /// Fix a first-order recurrence. This is the second phase of vectorizing 596 /// this phi node. 597 void fixFirstOrderRecurrence(PHINode *Phi, VPTransformState &State); 598 599 /// Fix a reduction cross-iteration phi. This is the second phase of 600 /// vectorizing this phi node. 601 void fixReduction(PHINode *Phi, VPTransformState &State); 602 603 /// Clear NSW/NUW flags from reduction instructions if necessary. 604 void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc, 605 VPTransformState &State); 606 607 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 608 /// means we need to add the appropriate incoming value from the middle 609 /// block as exiting edges from the scalar epilogue loop (if present) are 610 /// already in place, and we exit the vector loop exclusively to the middle 611 /// block. 612 void fixLCSSAPHIs(VPTransformState &State); 613 614 /// Iteratively sink the scalarized operands of a predicated instruction into 615 /// the block that was created for it. 616 void sinkScalarOperands(Instruction *PredInst); 617 618 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 619 /// represented as. 620 void truncateToMinimalBitwidths(VPTransformState &State); 621 622 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 623 /// to each vector element of Val. The sequence starts at StartIndex. 624 /// \p Opcode is relevant for FP induction variable. 625 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 626 Instruction::BinaryOps Opcode = 627 Instruction::BinaryOpsEnd); 628 629 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 630 /// variable on which to base the steps, \p Step is the size of the step, and 631 /// \p EntryVal is the value from the original loop that maps to the steps. 632 /// Note that \p EntryVal doesn't have to be an induction variable - it 633 /// can also be a truncate instruction. 634 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 635 const InductionDescriptor &ID, VPValue *Def, 636 VPValue *CastDef, VPTransformState &State); 637 638 /// Create a vector induction phi node based on an existing scalar one. \p 639 /// EntryVal is the value from the original loop that maps to the vector phi 640 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 641 /// truncate instruction, instead of widening the original IV, we widen a 642 /// version of the IV truncated to \p EntryVal's type. 643 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 644 Value *Step, Value *Start, 645 Instruction *EntryVal, VPValue *Def, 646 VPValue *CastDef, 647 VPTransformState &State); 648 649 /// Returns true if an instruction \p I should be scalarized instead of 650 /// vectorized for the chosen vectorization factor. 651 bool shouldScalarizeInstruction(Instruction *I) const; 652 653 /// Returns true if we should generate a scalar version of \p IV. 654 bool needsScalarInduction(Instruction *IV) const; 655 656 /// If there is a cast involved in the induction variable \p ID, which should 657 /// be ignored in the vectorized loop body, this function records the 658 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 659 /// cast. We had already proved that the casted Phi is equal to the uncasted 660 /// Phi in the vectorized loop (under a runtime guard), and therefore 661 /// there is no need to vectorize the cast - the same value can be used in the 662 /// vector loop for both the Phi and the cast. 663 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 664 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 665 /// 666 /// \p EntryVal is the value from the original loop that maps to the vector 667 /// phi node and is used to distinguish what is the IV currently being 668 /// processed - original one (if \p EntryVal is a phi corresponding to the 669 /// original IV) or the "newly-created" one based on the proof mentioned above 670 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 671 /// latter case \p EntryVal is a TruncInst and we must not record anything for 672 /// that IV, but it's error-prone to expect callers of this routine to care 673 /// about that, hence this explicit parameter. 674 void recordVectorLoopValueForInductionCast( 675 const InductionDescriptor &ID, const Instruction *EntryVal, 676 Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, 677 unsigned Part, unsigned Lane = UINT_MAX); 678 679 /// Generate a shuffle sequence that will reverse the vector Vec. 680 virtual Value *reverseVector(Value *Vec); 681 682 /// Returns (and creates if needed) the original loop trip count. 683 Value *getOrCreateTripCount(Loop *NewLoop); 684 685 /// Returns (and creates if needed) the trip count of the widened loop. 686 Value *getOrCreateVectorTripCount(Loop *NewLoop); 687 688 /// Returns a bitcasted value to the requested vector type. 689 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 690 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 691 const DataLayout &DL); 692 693 /// Emit a bypass check to see if the vector trip count is zero, including if 694 /// it overflows. 695 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 696 697 /// Emit a bypass check to see if all of the SCEV assumptions we've 698 /// had to make are correct. 699 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 700 701 /// Emit bypass checks to check any memory assumptions we may have made. 702 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 703 704 /// Compute the transformed value of Index at offset StartValue using step 705 /// StepValue. 706 /// For integer induction, returns StartValue + Index * StepValue. 707 /// For pointer induction, returns StartValue[Index * StepValue]. 708 /// FIXME: The newly created binary instructions should contain nsw/nuw 709 /// flags, which can be found from the original scalar operations. 710 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 711 const DataLayout &DL, 712 const InductionDescriptor &ID) const; 713 714 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 715 /// vector loop preheader, middle block and scalar preheader. Also 716 /// allocate a loop object for the new vector loop and return it. 717 Loop *createVectorLoopSkeleton(StringRef Prefix); 718 719 /// Create new phi nodes for the induction variables to resume iteration count 720 /// in the scalar epilogue, from where the vectorized loop left off (given by 721 /// \p VectorTripCount). 722 /// In cases where the loop skeleton is more complicated (eg. epilogue 723 /// vectorization) and the resume values can come from an additional bypass 724 /// block, the \p AdditionalBypass pair provides information about the bypass 725 /// block and the end value on the edge from bypass to this loop. 726 void createInductionResumeValues( 727 Loop *L, Value *VectorTripCount, 728 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 729 730 /// Complete the loop skeleton by adding debug MDs, creating appropriate 731 /// conditional branches in the middle block, preparing the builder and 732 /// running the verifier. Take in the vector loop \p L as argument, and return 733 /// the preheader of the completed vector loop. 734 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 735 736 /// Add additional metadata to \p To that was not present on \p Orig. 737 /// 738 /// Currently this is used to add the noalias annotations based on the 739 /// inserted memchecks. Use this for instructions that are *cloned* into the 740 /// vector loop. 741 void addNewMetadata(Instruction *To, const Instruction *Orig); 742 743 /// Add metadata from one instruction to another. 744 /// 745 /// This includes both the original MDs from \p From and additional ones (\see 746 /// addNewMetadata). Use this for *newly created* instructions in the vector 747 /// loop. 748 void addMetadata(Instruction *To, Instruction *From); 749 750 /// Similar to the previous function but it adds the metadata to a 751 /// vector of instructions. 752 void addMetadata(ArrayRef<Value *> To, Instruction *From); 753 754 /// Allow subclasses to override and print debug traces before/after vplan 755 /// execution, when trace information is requested. 756 virtual void printDebugTracesAtStart(){}; 757 virtual void printDebugTracesAtEnd(){}; 758 759 /// The original loop. 760 Loop *OrigLoop; 761 762 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 763 /// dynamic knowledge to simplify SCEV expressions and converts them to a 764 /// more usable form. 765 PredicatedScalarEvolution &PSE; 766 767 /// Loop Info. 768 LoopInfo *LI; 769 770 /// Dominator Tree. 771 DominatorTree *DT; 772 773 /// Alias Analysis. 774 AAResults *AA; 775 776 /// Target Library Info. 777 const TargetLibraryInfo *TLI; 778 779 /// Target Transform Info. 780 const TargetTransformInfo *TTI; 781 782 /// Assumption Cache. 783 AssumptionCache *AC; 784 785 /// Interface to emit optimization remarks. 786 OptimizationRemarkEmitter *ORE; 787 788 /// LoopVersioning. It's only set up (non-null) if memchecks were 789 /// used. 790 /// 791 /// This is currently only used to add no-alias metadata based on the 792 /// memchecks. The actually versioning is performed manually. 793 std::unique_ptr<LoopVersioning> LVer; 794 795 /// The vectorization SIMD factor to use. Each vector will have this many 796 /// vector elements. 797 ElementCount VF; 798 799 /// The vectorization unroll factor to use. Each scalar is vectorized to this 800 /// many different vector instructions. 801 unsigned UF; 802 803 /// The builder that we use 804 IRBuilder<> Builder; 805 806 // --- Vectorization state --- 807 808 /// The vector-loop preheader. 809 BasicBlock *LoopVectorPreHeader; 810 811 /// The scalar-loop preheader. 812 BasicBlock *LoopScalarPreHeader; 813 814 /// Middle Block between the vector and the scalar. 815 BasicBlock *LoopMiddleBlock; 816 817 /// The (unique) ExitBlock of the scalar loop. Note that 818 /// there can be multiple exiting edges reaching this block. 819 BasicBlock *LoopExitBlock; 820 821 /// The vector loop body. 822 BasicBlock *LoopVectorBody; 823 824 /// The scalar loop body. 825 BasicBlock *LoopScalarBody; 826 827 /// A list of all bypass blocks. The first block is the entry of the loop. 828 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 829 830 /// The new Induction variable which was added to the new block. 831 PHINode *Induction = nullptr; 832 833 /// The induction variable of the old basic block. 834 PHINode *OldInduction = nullptr; 835 836 /// Store instructions that were predicated. 837 SmallVector<Instruction *, 4> PredicatedInstructions; 838 839 /// Trip count of the original loop. 840 Value *TripCount = nullptr; 841 842 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 843 Value *VectorTripCount = nullptr; 844 845 /// The legality analysis. 846 LoopVectorizationLegality *Legal; 847 848 /// The profitablity analysis. 849 LoopVectorizationCostModel *Cost; 850 851 // Record whether runtime checks are added. 852 bool AddedSafetyChecks = false; 853 854 // Holds the end values for each induction variable. We save the end values 855 // so we can later fix-up the external users of the induction variables. 856 DenseMap<PHINode *, Value *> IVEndValues; 857 858 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 859 // fixed up at the end of vector code generation. 860 SmallVector<PHINode *, 8> OrigPHIsToFix; 861 862 /// BFI and PSI are used to check for profile guided size optimizations. 863 BlockFrequencyInfo *BFI; 864 ProfileSummaryInfo *PSI; 865 866 // Whether this loop should be optimized for size based on profile guided size 867 // optimizatios. 868 bool OptForSizeBasedOnProfile; 869 }; 870 871 class InnerLoopUnroller : public InnerLoopVectorizer { 872 public: 873 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 874 LoopInfo *LI, DominatorTree *DT, 875 const TargetLibraryInfo *TLI, 876 const TargetTransformInfo *TTI, AssumptionCache *AC, 877 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 878 LoopVectorizationLegality *LVL, 879 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 880 ProfileSummaryInfo *PSI) 881 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 882 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 883 BFI, PSI) {} 884 885 private: 886 Value *getBroadcastInstrs(Value *V) override; 887 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 888 Instruction::BinaryOps Opcode = 889 Instruction::BinaryOpsEnd) override; 890 Value *reverseVector(Value *Vec) override; 891 }; 892 893 /// Encapsulate information regarding vectorization of a loop and its epilogue. 894 /// This information is meant to be updated and used across two stages of 895 /// epilogue vectorization. 896 struct EpilogueLoopVectorizationInfo { 897 ElementCount MainLoopVF = ElementCount::getFixed(0); 898 unsigned MainLoopUF = 0; 899 ElementCount EpilogueVF = ElementCount::getFixed(0); 900 unsigned EpilogueUF = 0; 901 BasicBlock *MainLoopIterationCountCheck = nullptr; 902 BasicBlock *EpilogueIterationCountCheck = nullptr; 903 BasicBlock *SCEVSafetyCheck = nullptr; 904 BasicBlock *MemSafetyCheck = nullptr; 905 Value *TripCount = nullptr; 906 Value *VectorTripCount = nullptr; 907 908 EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF, 909 unsigned EUF) 910 : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF), 911 EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) { 912 assert(EUF == 1 && 913 "A high UF for the epilogue loop is likely not beneficial."); 914 } 915 }; 916 917 /// An extension of the inner loop vectorizer that creates a skeleton for a 918 /// vectorized loop that has its epilogue (residual) also vectorized. 919 /// The idea is to run the vplan on a given loop twice, firstly to setup the 920 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 921 /// from the first step and vectorize the epilogue. This is achieved by 922 /// deriving two concrete strategy classes from this base class and invoking 923 /// them in succession from the loop vectorizer planner. 924 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 925 public: 926 InnerLoopAndEpilogueVectorizer( 927 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 928 DominatorTree *DT, const TargetLibraryInfo *TLI, 929 const TargetTransformInfo *TTI, AssumptionCache *AC, 930 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 931 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 932 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 933 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 934 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI), 935 EPI(EPI) {} 936 937 // Override this function to handle the more complex control flow around the 938 // three loops. 939 BasicBlock *createVectorizedLoopSkeleton() final override { 940 return createEpilogueVectorizedLoopSkeleton(); 941 } 942 943 /// The interface for creating a vectorized skeleton using one of two 944 /// different strategies, each corresponding to one execution of the vplan 945 /// as described above. 946 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 947 948 /// Holds and updates state information required to vectorize the main loop 949 /// and its epilogue in two separate passes. This setup helps us avoid 950 /// regenerating and recomputing runtime safety checks. It also helps us to 951 /// shorten the iteration-count-check path length for the cases where the 952 /// iteration count of the loop is so small that the main vector loop is 953 /// completely skipped. 954 EpilogueLoopVectorizationInfo &EPI; 955 }; 956 957 /// A specialized derived class of inner loop vectorizer that performs 958 /// vectorization of *main* loops in the process of vectorizing loops and their 959 /// epilogues. 960 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 961 public: 962 EpilogueVectorizerMainLoop( 963 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 964 DominatorTree *DT, const TargetLibraryInfo *TLI, 965 const TargetTransformInfo *TTI, AssumptionCache *AC, 966 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 967 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 968 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 969 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 970 EPI, LVL, CM, BFI, PSI) {} 971 /// Implements the interface for creating a vectorized skeleton using the 972 /// *main loop* strategy (ie the first pass of vplan execution). 973 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 974 975 protected: 976 /// Emits an iteration count bypass check once for the main loop (when \p 977 /// ForEpilogue is false) and once for the epilogue loop (when \p 978 /// ForEpilogue is true). 979 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 980 bool ForEpilogue); 981 void printDebugTracesAtStart() override; 982 void printDebugTracesAtEnd() override; 983 }; 984 985 // A specialized derived class of inner loop vectorizer that performs 986 // vectorization of *epilogue* loops in the process of vectorizing loops and 987 // their epilogues. 988 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 989 public: 990 EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 991 LoopInfo *LI, DominatorTree *DT, 992 const TargetLibraryInfo *TLI, 993 const TargetTransformInfo *TTI, AssumptionCache *AC, 994 OptimizationRemarkEmitter *ORE, 995 EpilogueLoopVectorizationInfo &EPI, 996 LoopVectorizationLegality *LVL, 997 llvm::LoopVectorizationCostModel *CM, 998 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 999 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1000 EPI, LVL, CM, BFI, PSI) {} 1001 /// Implements the interface for creating a vectorized skeleton using the 1002 /// *epilogue loop* strategy (ie the second pass of vplan execution). 1003 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1004 1005 protected: 1006 /// Emits an iteration count bypass check after the main vector loop has 1007 /// finished to see if there are any iterations left to execute by either 1008 /// the vector epilogue or the scalar epilogue. 1009 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1010 BasicBlock *Bypass, 1011 BasicBlock *Insert); 1012 void printDebugTracesAtStart() override; 1013 void printDebugTracesAtEnd() override; 1014 }; 1015 } // end namespace llvm 1016 1017 /// Look for a meaningful debug location on the instruction or it's 1018 /// operands. 1019 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1020 if (!I) 1021 return I; 1022 1023 DebugLoc Empty; 1024 if (I->getDebugLoc() != Empty) 1025 return I; 1026 1027 for (Use &Op : I->operands()) { 1028 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1029 if (OpInst->getDebugLoc() != Empty) 1030 return OpInst; 1031 } 1032 1033 return I; 1034 } 1035 1036 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 1037 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 1038 const DILocation *DIL = Inst->getDebugLoc(); 1039 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1040 !isa<DbgInfoIntrinsic>(Inst)) { 1041 assert(!VF.isScalable() && "scalable vectors not yet supported."); 1042 auto NewDIL = 1043 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1044 if (NewDIL) 1045 B.SetCurrentDebugLocation(NewDIL.getValue()); 1046 else 1047 LLVM_DEBUG(dbgs() 1048 << "Failed to create new discriminator: " 1049 << DIL->getFilename() << " Line: " << DIL->getLine()); 1050 } 1051 else 1052 B.SetCurrentDebugLocation(DIL); 1053 } else 1054 B.SetCurrentDebugLocation(DebugLoc()); 1055 } 1056 1057 /// Write a record \p DebugMsg about vectorization failure to the debug 1058 /// output stream. If \p I is passed, it is an instruction that prevents 1059 /// vectorization. 1060 #ifndef NDEBUG 1061 static void debugVectorizationFailure(const StringRef DebugMsg, 1062 Instruction *I) { 1063 dbgs() << "LV: Not vectorizing: " << DebugMsg; 1064 if (I != nullptr) 1065 dbgs() << " " << *I; 1066 else 1067 dbgs() << '.'; 1068 dbgs() << '\n'; 1069 } 1070 #endif 1071 1072 /// Create an analysis remark that explains why vectorization failed 1073 /// 1074 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1075 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1076 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1077 /// the location of the remark. \return the remark object that can be 1078 /// streamed to. 1079 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1080 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1081 Value *CodeRegion = TheLoop->getHeader(); 1082 DebugLoc DL = TheLoop->getStartLoc(); 1083 1084 if (I) { 1085 CodeRegion = I->getParent(); 1086 // If there is no debug location attached to the instruction, revert back to 1087 // using the loop's. 1088 if (I->getDebugLoc()) 1089 DL = I->getDebugLoc(); 1090 } 1091 1092 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 1093 R << "loop not vectorized: "; 1094 return R; 1095 } 1096 1097 /// Return a value for Step multiplied by VF. 1098 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) { 1099 assert(isa<ConstantInt>(Step) && "Expected an integer step"); 1100 Constant *StepVal = ConstantInt::get( 1101 Step->getType(), 1102 cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue()); 1103 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1104 } 1105 1106 namespace llvm { 1107 1108 void reportVectorizationFailure(const StringRef DebugMsg, 1109 const StringRef OREMsg, const StringRef ORETag, 1110 OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) { 1111 LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I)); 1112 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1113 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), 1114 ORETag, TheLoop, I) << OREMsg); 1115 } 1116 1117 } // end namespace llvm 1118 1119 #ifndef NDEBUG 1120 /// \return string containing a file name and a line # for the given loop. 1121 static std::string getDebugLocString(const Loop *L) { 1122 std::string Result; 1123 if (L) { 1124 raw_string_ostream OS(Result); 1125 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1126 LoopDbgLoc.print(OS); 1127 else 1128 // Just print the module name. 1129 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1130 OS.flush(); 1131 } 1132 return Result; 1133 } 1134 #endif 1135 1136 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1137 const Instruction *Orig) { 1138 // If the loop was versioned with memchecks, add the corresponding no-alias 1139 // metadata. 1140 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1141 LVer->annotateInstWithNoAlias(To, Orig); 1142 } 1143 1144 void InnerLoopVectorizer::addMetadata(Instruction *To, 1145 Instruction *From) { 1146 propagateMetadata(To, From); 1147 addNewMetadata(To, From); 1148 } 1149 1150 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1151 Instruction *From) { 1152 for (Value *V : To) { 1153 if (Instruction *I = dyn_cast<Instruction>(V)) 1154 addMetadata(I, From); 1155 } 1156 } 1157 1158 namespace llvm { 1159 1160 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1161 // lowered. 1162 enum ScalarEpilogueLowering { 1163 1164 // The default: allowing scalar epilogues. 1165 CM_ScalarEpilogueAllowed, 1166 1167 // Vectorization with OptForSize: don't allow epilogues. 1168 CM_ScalarEpilogueNotAllowedOptSize, 1169 1170 // A special case of vectorisation with OptForSize: loops with a very small 1171 // trip count are considered for vectorization under OptForSize, thereby 1172 // making sure the cost of their loop body is dominant, free of runtime 1173 // guards and scalar iteration overheads. 1174 CM_ScalarEpilogueNotAllowedLowTripLoop, 1175 1176 // Loop hint predicate indicating an epilogue is undesired. 1177 CM_ScalarEpilogueNotNeededUsePredicate, 1178 1179 // Directive indicating we must either tail fold or not vectorize 1180 CM_ScalarEpilogueNotAllowedUsePredicate 1181 }; 1182 1183 /// LoopVectorizationCostModel - estimates the expected speedups due to 1184 /// vectorization. 1185 /// In many cases vectorization is not profitable. This can happen because of 1186 /// a number of reasons. In this class we mainly attempt to predict the 1187 /// expected speedup/slowdowns due to the supported instruction set. We use the 1188 /// TargetTransformInfo to query the different backends for the cost of 1189 /// different operations. 1190 class LoopVectorizationCostModel { 1191 public: 1192 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1193 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1194 LoopVectorizationLegality *Legal, 1195 const TargetTransformInfo &TTI, 1196 const TargetLibraryInfo *TLI, DemandedBits *DB, 1197 AssumptionCache *AC, 1198 OptimizationRemarkEmitter *ORE, const Function *F, 1199 const LoopVectorizeHints *Hints, 1200 InterleavedAccessInfo &IAI) 1201 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1202 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1203 Hints(Hints), InterleaveInfo(IAI) {} 1204 1205 /// \return An upper bound for the vectorization factor, or None if 1206 /// vectorization and interleaving should be avoided up front. 1207 Optional<ElementCount> computeMaxVF(ElementCount UserVF, unsigned UserIC); 1208 1209 /// \return True if runtime checks are required for vectorization, and false 1210 /// otherwise. 1211 bool runtimeChecksRequired(); 1212 1213 /// \return The most profitable vectorization factor and the cost of that VF. 1214 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 1215 /// then this vectorization factor will be selected if vectorization is 1216 /// possible. 1217 VectorizationFactor selectVectorizationFactor(ElementCount MaxVF); 1218 VectorizationFactor 1219 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1220 const LoopVectorizationPlanner &LVP); 1221 1222 /// Setup cost-based decisions for user vectorization factor. 1223 void selectUserVectorizationFactor(ElementCount UserVF) { 1224 collectUniformsAndScalars(UserVF); 1225 collectInstsToScalarize(UserVF); 1226 } 1227 1228 /// \return The size (in bits) of the smallest and widest types in the code 1229 /// that needs to be vectorized. We ignore values that remain scalar such as 1230 /// 64 bit loop indices. 1231 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1232 1233 /// \return The desired interleave count. 1234 /// If interleave count has been specified by metadata it will be returned. 1235 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1236 /// are the selected vectorization factor and the cost of the selected VF. 1237 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1238 1239 /// Memory access instruction may be vectorized in more than one way. 1240 /// Form of instruction after vectorization depends on cost. 1241 /// This function takes cost-based decisions for Load/Store instructions 1242 /// and collects them in a map. This decisions map is used for building 1243 /// the lists of loop-uniform and loop-scalar instructions. 1244 /// The calculated cost is saved with widening decision in order to 1245 /// avoid redundant calculations. 1246 void setCostBasedWideningDecision(ElementCount VF); 1247 1248 /// A struct that represents some properties of the register usage 1249 /// of a loop. 1250 struct RegisterUsage { 1251 /// Holds the number of loop invariant values that are used in the loop. 1252 /// The key is ClassID of target-provided register class. 1253 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1254 /// Holds the maximum number of concurrent live intervals in the loop. 1255 /// The key is ClassID of target-provided register class. 1256 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1257 }; 1258 1259 /// \return Returns information about the register usages of the loop for the 1260 /// given vectorization factors. 1261 SmallVector<RegisterUsage, 8> 1262 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1263 1264 /// Collect values we want to ignore in the cost model. 1265 void collectValuesToIgnore(); 1266 1267 /// Split reductions into those that happen in the loop, and those that happen 1268 /// outside. In loop reductions are collected into InLoopReductionChains. 1269 void collectInLoopReductions(); 1270 1271 /// \returns The smallest bitwidth each instruction can be represented with. 1272 /// The vector equivalents of these instructions should be truncated to this 1273 /// type. 1274 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1275 return MinBWs; 1276 } 1277 1278 /// \returns True if it is more profitable to scalarize instruction \p I for 1279 /// vectorization factor \p VF. 1280 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1281 assert(VF.isVector() && 1282 "Profitable to scalarize relevant only for VF > 1."); 1283 1284 // Cost model is not run in the VPlan-native path - return conservative 1285 // result until this changes. 1286 if (EnableVPlanNativePath) 1287 return false; 1288 1289 auto Scalars = InstsToScalarize.find(VF); 1290 assert(Scalars != InstsToScalarize.end() && 1291 "VF not yet analyzed for scalarization profitability"); 1292 return Scalars->second.find(I) != Scalars->second.end(); 1293 } 1294 1295 /// Returns true if \p I is known to be uniform after vectorization. 1296 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1297 if (VF.isScalar()) 1298 return true; 1299 1300 // Cost model is not run in the VPlan-native path - return conservative 1301 // result until this changes. 1302 if (EnableVPlanNativePath) 1303 return false; 1304 1305 auto UniformsPerVF = Uniforms.find(VF); 1306 assert(UniformsPerVF != Uniforms.end() && 1307 "VF not yet analyzed for uniformity"); 1308 return UniformsPerVF->second.count(I); 1309 } 1310 1311 /// Returns true if \p I is known to be scalar after vectorization. 1312 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1313 if (VF.isScalar()) 1314 return true; 1315 1316 // Cost model is not run in the VPlan-native path - return conservative 1317 // result until this changes. 1318 if (EnableVPlanNativePath) 1319 return false; 1320 1321 auto ScalarsPerVF = Scalars.find(VF); 1322 assert(ScalarsPerVF != Scalars.end() && 1323 "Scalar values are not calculated for VF"); 1324 return ScalarsPerVF->second.count(I); 1325 } 1326 1327 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1328 /// for vectorization factor \p VF. 1329 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1330 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1331 !isProfitableToScalarize(I, VF) && 1332 !isScalarAfterVectorization(I, VF); 1333 } 1334 1335 /// Decision that was taken during cost calculation for memory instruction. 1336 enum InstWidening { 1337 CM_Unknown, 1338 CM_Widen, // For consecutive accesses with stride +1. 1339 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1340 CM_Interleave, 1341 CM_GatherScatter, 1342 CM_Scalarize 1343 }; 1344 1345 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1346 /// instruction \p I and vector width \p VF. 1347 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1348 InstructionCost Cost) { 1349 assert(VF.isVector() && "Expected VF >=2"); 1350 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1351 } 1352 1353 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1354 /// interleaving group \p Grp and vector width \p VF. 1355 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1356 ElementCount VF, InstWidening W, 1357 InstructionCost Cost) { 1358 assert(VF.isVector() && "Expected VF >=2"); 1359 /// Broadcast this decicion to all instructions inside the group. 1360 /// But the cost will be assigned to one instruction only. 1361 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1362 if (auto *I = Grp->getMember(i)) { 1363 if (Grp->getInsertPos() == I) 1364 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1365 else 1366 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1367 } 1368 } 1369 } 1370 1371 /// Return the cost model decision for the given instruction \p I and vector 1372 /// width \p VF. Return CM_Unknown if this instruction did not pass 1373 /// through the cost modeling. 1374 InstWidening getWideningDecision(Instruction *I, ElementCount VF) { 1375 assert(VF.isVector() && "Expected VF to be a vector VF"); 1376 // Cost model is not run in the VPlan-native path - return conservative 1377 // result until this changes. 1378 if (EnableVPlanNativePath) 1379 return CM_GatherScatter; 1380 1381 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1382 auto Itr = WideningDecisions.find(InstOnVF); 1383 if (Itr == WideningDecisions.end()) 1384 return CM_Unknown; 1385 return Itr->second.first; 1386 } 1387 1388 /// Return the vectorization cost for the given instruction \p I and vector 1389 /// width \p VF. 1390 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1391 assert(VF.isVector() && "Expected VF >=2"); 1392 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1393 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1394 "The cost is not calculated"); 1395 return WideningDecisions[InstOnVF].second; 1396 } 1397 1398 /// Return True if instruction \p I is an optimizable truncate whose operand 1399 /// is an induction variable. Such a truncate will be removed by adding a new 1400 /// induction variable with the destination type. 1401 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1402 // If the instruction is not a truncate, return false. 1403 auto *Trunc = dyn_cast<TruncInst>(I); 1404 if (!Trunc) 1405 return false; 1406 1407 // Get the source and destination types of the truncate. 1408 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1409 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1410 1411 // If the truncate is free for the given types, return false. Replacing a 1412 // free truncate with an induction variable would add an induction variable 1413 // update instruction to each iteration of the loop. We exclude from this 1414 // check the primary induction variable since it will need an update 1415 // instruction regardless. 1416 Value *Op = Trunc->getOperand(0); 1417 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1418 return false; 1419 1420 // If the truncated value is not an induction variable, return false. 1421 return Legal->isInductionPhi(Op); 1422 } 1423 1424 /// Collects the instructions to scalarize for each predicated instruction in 1425 /// the loop. 1426 void collectInstsToScalarize(ElementCount VF); 1427 1428 /// Collect Uniform and Scalar values for the given \p VF. 1429 /// The sets depend on CM decision for Load/Store instructions 1430 /// that may be vectorized as interleave, gather-scatter or scalarized. 1431 void collectUniformsAndScalars(ElementCount VF) { 1432 // Do the analysis once. 1433 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1434 return; 1435 setCostBasedWideningDecision(VF); 1436 collectLoopUniforms(VF); 1437 collectLoopScalars(VF); 1438 } 1439 1440 /// Returns true if the target machine supports masked store operation 1441 /// for the given \p DataType and kind of access to \p Ptr. 1442 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) { 1443 return Legal->isConsecutivePtr(Ptr) && 1444 TTI.isLegalMaskedStore(DataType, Alignment); 1445 } 1446 1447 /// Returns true if the target machine supports masked load operation 1448 /// for the given \p DataType and kind of access to \p Ptr. 1449 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) { 1450 return Legal->isConsecutivePtr(Ptr) && 1451 TTI.isLegalMaskedLoad(DataType, Alignment); 1452 } 1453 1454 /// Returns true if the target machine supports masked scatter operation 1455 /// for the given \p DataType. 1456 bool isLegalMaskedScatter(Type *DataType, Align Alignment) { 1457 return TTI.isLegalMaskedScatter(DataType, Alignment); 1458 } 1459 1460 /// Returns true if the target machine supports masked gather operation 1461 /// for the given \p DataType. 1462 bool isLegalMaskedGather(Type *DataType, Align Alignment) { 1463 return TTI.isLegalMaskedGather(DataType, Alignment); 1464 } 1465 1466 /// Returns true if the target machine can represent \p V as a masked gather 1467 /// or scatter operation. 1468 bool isLegalGatherOrScatter(Value *V) { 1469 bool LI = isa<LoadInst>(V); 1470 bool SI = isa<StoreInst>(V); 1471 if (!LI && !SI) 1472 return false; 1473 auto *Ty = getMemInstValueType(V); 1474 Align Align = getLoadStoreAlignment(V); 1475 return (LI && isLegalMaskedGather(Ty, Align)) || 1476 (SI && isLegalMaskedScatter(Ty, Align)); 1477 } 1478 1479 /// Returns true if the target machine supports all of the reduction 1480 /// variables found for the given VF. 1481 bool canVectorizeReductions(ElementCount VF) { 1482 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1483 RecurrenceDescriptor RdxDesc = Reduction.second; 1484 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1485 })); 1486 } 1487 1488 /// Returns true if \p I is an instruction that will be scalarized with 1489 /// predication. Such instructions include conditional stores and 1490 /// instructions that may divide by zero. 1491 /// If a non-zero VF has been calculated, we check if I will be scalarized 1492 /// predication for that VF. 1493 bool isScalarWithPredication(Instruction *I, 1494 ElementCount VF = ElementCount::getFixed(1)); 1495 1496 // Returns true if \p I is an instruction that will be predicated either 1497 // through scalar predication or masked load/store or masked gather/scatter. 1498 // Superset of instructions that return true for isScalarWithPredication. 1499 bool isPredicatedInst(Instruction *I) { 1500 if (!blockNeedsPredication(I->getParent())) 1501 return false; 1502 // Loads and stores that need some form of masked operation are predicated 1503 // instructions. 1504 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1505 return Legal->isMaskRequired(I); 1506 return isScalarWithPredication(I); 1507 } 1508 1509 /// Returns true if \p I is a memory instruction with consecutive memory 1510 /// access that can be widened. 1511 bool 1512 memoryInstructionCanBeWidened(Instruction *I, 1513 ElementCount VF = ElementCount::getFixed(1)); 1514 1515 /// Returns true if \p I is a memory instruction in an interleaved-group 1516 /// of memory accesses that can be vectorized with wide vector loads/stores 1517 /// and shuffles. 1518 bool 1519 interleavedAccessCanBeWidened(Instruction *I, 1520 ElementCount VF = ElementCount::getFixed(1)); 1521 1522 /// Check if \p Instr belongs to any interleaved access group. 1523 bool isAccessInterleaved(Instruction *Instr) { 1524 return InterleaveInfo.isInterleaved(Instr); 1525 } 1526 1527 /// Get the interleaved access group that \p Instr belongs to. 1528 const InterleaveGroup<Instruction> * 1529 getInterleavedAccessGroup(Instruction *Instr) { 1530 return InterleaveInfo.getInterleaveGroup(Instr); 1531 } 1532 1533 /// Returns true if we're required to use a scalar epilogue for at least 1534 /// the final iteration of the original loop. 1535 bool requiresScalarEpilogue() const { 1536 if (!isScalarEpilogueAllowed()) 1537 return false; 1538 // If we might exit from anywhere but the latch, must run the exiting 1539 // iteration in scalar form. 1540 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1541 return true; 1542 return InterleaveInfo.requiresScalarEpilogue(); 1543 } 1544 1545 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1546 /// loop hint annotation. 1547 bool isScalarEpilogueAllowed() const { 1548 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1549 } 1550 1551 /// Returns true if all loop blocks should be masked to fold tail loop. 1552 bool foldTailByMasking() const { return FoldTailByMasking; } 1553 1554 bool blockNeedsPredication(BasicBlock *BB) { 1555 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1556 } 1557 1558 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1559 /// nodes to the chain of instructions representing the reductions. Uses a 1560 /// MapVector to ensure deterministic iteration order. 1561 using ReductionChainMap = 1562 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1563 1564 /// Return the chain of instructions representing an inloop reduction. 1565 const ReductionChainMap &getInLoopReductionChains() const { 1566 return InLoopReductionChains; 1567 } 1568 1569 /// Returns true if the Phi is part of an inloop reduction. 1570 bool isInLoopReduction(PHINode *Phi) const { 1571 return InLoopReductionChains.count(Phi); 1572 } 1573 1574 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1575 /// with factor VF. Return the cost of the instruction, including 1576 /// scalarization overhead if it's needed. 1577 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF); 1578 1579 /// Estimate cost of a call instruction CI if it were vectorized with factor 1580 /// VF. Return the cost of the instruction, including scalarization overhead 1581 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1582 /// scalarized - 1583 /// i.e. either vector version isn't available, or is too expensive. 1584 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1585 bool &NeedToScalarize); 1586 1587 /// Invalidates decisions already taken by the cost model. 1588 void invalidateCostModelingDecisions() { 1589 WideningDecisions.clear(); 1590 Uniforms.clear(); 1591 Scalars.clear(); 1592 } 1593 1594 private: 1595 unsigned NumPredStores = 0; 1596 1597 /// \return An upper bound for the vectorization factor, a power-of-2 larger 1598 /// than zero. One is returned if vectorization should best be avoided due 1599 /// to cost. 1600 ElementCount computeFeasibleMaxVF(unsigned ConstTripCount, 1601 ElementCount UserVF); 1602 1603 /// The vectorization cost is a combination of the cost itself and a boolean 1604 /// indicating whether any of the contributing operations will actually 1605 /// operate on 1606 /// vector values after type legalization in the backend. If this latter value 1607 /// is 1608 /// false, then all operations will be scalarized (i.e. no vectorization has 1609 /// actually taken place). 1610 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1611 1612 /// Returns the expected execution cost. The unit of the cost does 1613 /// not matter because we use the 'cost' units to compare different 1614 /// vector widths. The cost that is returned is *not* normalized by 1615 /// the factor width. 1616 VectorizationCostTy expectedCost(ElementCount VF); 1617 1618 /// Returns the execution time cost of an instruction for a given vector 1619 /// width. Vector width of one means scalar. 1620 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1621 1622 /// The cost-computation logic from getInstructionCost which provides 1623 /// the vector type as an output parameter. 1624 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1625 Type *&VectorTy); 1626 1627 /// Return the cost of instructions in an inloop reduction pattern, if I is 1628 /// part of that pattern. 1629 InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF, 1630 Type *VectorTy, 1631 TTI::TargetCostKind CostKind); 1632 1633 /// Calculate vectorization cost of memory instruction \p I. 1634 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1635 1636 /// The cost computation for scalarized memory instruction. 1637 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1638 1639 /// The cost computation for interleaving group of memory instructions. 1640 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1641 1642 /// The cost computation for Gather/Scatter instruction. 1643 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1644 1645 /// The cost computation for widening instruction \p I with consecutive 1646 /// memory access. 1647 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1648 1649 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1650 /// Load: scalar load + broadcast. 1651 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1652 /// element) 1653 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1654 1655 /// Estimate the overhead of scalarizing an instruction. This is a 1656 /// convenience wrapper for the type-based getScalarizationOverhead API. 1657 InstructionCost getScalarizationOverhead(Instruction *I, ElementCount VF); 1658 1659 /// Returns whether the instruction is a load or store and will be a emitted 1660 /// as a vector operation. 1661 bool isConsecutiveLoadOrStore(Instruction *I); 1662 1663 /// Returns true if an artificially high cost for emulated masked memrefs 1664 /// should be used. 1665 bool useEmulatedMaskMemRefHack(Instruction *I); 1666 1667 /// Map of scalar integer values to the smallest bitwidth they can be legally 1668 /// represented as. The vector equivalents of these values should be truncated 1669 /// to this type. 1670 MapVector<Instruction *, uint64_t> MinBWs; 1671 1672 /// A type representing the costs for instructions if they were to be 1673 /// scalarized rather than vectorized. The entries are Instruction-Cost 1674 /// pairs. 1675 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1676 1677 /// A set containing all BasicBlocks that are known to present after 1678 /// vectorization as a predicated block. 1679 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1680 1681 /// Records whether it is allowed to have the original scalar loop execute at 1682 /// least once. This may be needed as a fallback loop in case runtime 1683 /// aliasing/dependence checks fail, or to handle the tail/remainder 1684 /// iterations when the trip count is unknown or doesn't divide by the VF, 1685 /// or as a peel-loop to handle gaps in interleave-groups. 1686 /// Under optsize and when the trip count is very small we don't allow any 1687 /// iterations to execute in the scalar loop. 1688 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1689 1690 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1691 bool FoldTailByMasking = false; 1692 1693 /// A map holding scalar costs for different vectorization factors. The 1694 /// presence of a cost for an instruction in the mapping indicates that the 1695 /// instruction will be scalarized when vectorizing with the associated 1696 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1697 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1698 1699 /// Holds the instructions known to be uniform after vectorization. 1700 /// The data is collected per VF. 1701 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1702 1703 /// Holds the instructions known to be scalar after vectorization. 1704 /// The data is collected per VF. 1705 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1706 1707 /// Holds the instructions (address computations) that are forced to be 1708 /// scalarized. 1709 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1710 1711 /// PHINodes of the reductions that should be expanded in-loop along with 1712 /// their associated chains of reduction operations, in program order from top 1713 /// (PHI) to bottom 1714 ReductionChainMap InLoopReductionChains; 1715 1716 /// A Map of inloop reduction operations and their immediate chain operand. 1717 /// FIXME: This can be removed once reductions can be costed correctly in 1718 /// vplan. This was added to allow quick lookup to the inloop operations, 1719 /// without having to loop through InLoopReductionChains. 1720 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1721 1722 /// Returns the expected difference in cost from scalarizing the expression 1723 /// feeding a predicated instruction \p PredInst. The instructions to 1724 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1725 /// non-negative return value implies the expression will be scalarized. 1726 /// Currently, only single-use chains are considered for scalarization. 1727 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1728 ElementCount VF); 1729 1730 /// Collect the instructions that are uniform after vectorization. An 1731 /// instruction is uniform if we represent it with a single scalar value in 1732 /// the vectorized loop corresponding to each vector iteration. Examples of 1733 /// uniform instructions include pointer operands of consecutive or 1734 /// interleaved memory accesses. Note that although uniformity implies an 1735 /// instruction will be scalar, the reverse is not true. In general, a 1736 /// scalarized instruction will be represented by VF scalar values in the 1737 /// vectorized loop, each corresponding to an iteration of the original 1738 /// scalar loop. 1739 void collectLoopUniforms(ElementCount VF); 1740 1741 /// Collect the instructions that are scalar after vectorization. An 1742 /// instruction is scalar if it is known to be uniform or will be scalarized 1743 /// during vectorization. Non-uniform scalarized instructions will be 1744 /// represented by VF values in the vectorized loop, each corresponding to an 1745 /// iteration of the original scalar loop. 1746 void collectLoopScalars(ElementCount VF); 1747 1748 /// Keeps cost model vectorization decision and cost for instructions. 1749 /// Right now it is used for memory instructions only. 1750 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1751 std::pair<InstWidening, InstructionCost>>; 1752 1753 DecisionList WideningDecisions; 1754 1755 /// Returns true if \p V is expected to be vectorized and it needs to be 1756 /// extracted. 1757 bool needsExtract(Value *V, ElementCount VF) const { 1758 Instruction *I = dyn_cast<Instruction>(V); 1759 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1760 TheLoop->isLoopInvariant(I)) 1761 return false; 1762 1763 // Assume we can vectorize V (and hence we need extraction) if the 1764 // scalars are not computed yet. This can happen, because it is called 1765 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1766 // the scalars are collected. That should be a safe assumption in most 1767 // cases, because we check if the operands have vectorizable types 1768 // beforehand in LoopVectorizationLegality. 1769 return Scalars.find(VF) == Scalars.end() || 1770 !isScalarAfterVectorization(I, VF); 1771 }; 1772 1773 /// Returns a range containing only operands needing to be extracted. 1774 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1775 ElementCount VF) { 1776 return SmallVector<Value *, 4>(make_filter_range( 1777 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1778 } 1779 1780 /// Determines if we have the infrastructure to vectorize loop \p L and its 1781 /// epilogue, assuming the main loop is vectorized by \p VF. 1782 bool isCandidateForEpilogueVectorization(const Loop &L, 1783 const ElementCount VF) const; 1784 1785 /// Returns true if epilogue vectorization is considered profitable, and 1786 /// false otherwise. 1787 /// \p VF is the vectorization factor chosen for the original loop. 1788 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1789 1790 public: 1791 /// The loop that we evaluate. 1792 Loop *TheLoop; 1793 1794 /// Predicated scalar evolution analysis. 1795 PredicatedScalarEvolution &PSE; 1796 1797 /// Loop Info analysis. 1798 LoopInfo *LI; 1799 1800 /// Vectorization legality. 1801 LoopVectorizationLegality *Legal; 1802 1803 /// Vector target information. 1804 const TargetTransformInfo &TTI; 1805 1806 /// Target Library Info. 1807 const TargetLibraryInfo *TLI; 1808 1809 /// Demanded bits analysis. 1810 DemandedBits *DB; 1811 1812 /// Assumption cache. 1813 AssumptionCache *AC; 1814 1815 /// Interface to emit optimization remarks. 1816 OptimizationRemarkEmitter *ORE; 1817 1818 const Function *TheFunction; 1819 1820 /// Loop Vectorize Hint. 1821 const LoopVectorizeHints *Hints; 1822 1823 /// The interleave access information contains groups of interleaved accesses 1824 /// with the same stride and close to each other. 1825 InterleavedAccessInfo &InterleaveInfo; 1826 1827 /// Values to ignore in the cost model. 1828 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1829 1830 /// Values to ignore in the cost model when VF > 1. 1831 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1832 1833 /// Profitable vector factors. 1834 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1835 }; 1836 1837 } // end namespace llvm 1838 1839 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1840 // vectorization. The loop needs to be annotated with #pragma omp simd 1841 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1842 // vector length information is not provided, vectorization is not considered 1843 // explicit. Interleave hints are not allowed either. These limitations will be 1844 // relaxed in the future. 1845 // Please, note that we are currently forced to abuse the pragma 'clang 1846 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1847 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1848 // provides *explicit vectorization hints* (LV can bypass legal checks and 1849 // assume that vectorization is legal). However, both hints are implemented 1850 // using the same metadata (llvm.loop.vectorize, processed by 1851 // LoopVectorizeHints). This will be fixed in the future when the native IR 1852 // representation for pragma 'omp simd' is introduced. 1853 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1854 OptimizationRemarkEmitter *ORE) { 1855 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 1856 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1857 1858 // Only outer loops with an explicit vectorization hint are supported. 1859 // Unannotated outer loops are ignored. 1860 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1861 return false; 1862 1863 Function *Fn = OuterLp->getHeader()->getParent(); 1864 if (!Hints.allowVectorization(Fn, OuterLp, 1865 true /*VectorizeOnlyWhenForced*/)) { 1866 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1867 return false; 1868 } 1869 1870 if (Hints.getInterleave() > 1) { 1871 // TODO: Interleave support is future work. 1872 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1873 "outer loops.\n"); 1874 Hints.emitRemarkWithHints(); 1875 return false; 1876 } 1877 1878 return true; 1879 } 1880 1881 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1882 OptimizationRemarkEmitter *ORE, 1883 SmallVectorImpl<Loop *> &V) { 1884 // Collect inner loops and outer loops without irreducible control flow. For 1885 // now, only collect outer loops that have explicit vectorization hints. If we 1886 // are stress testing the VPlan H-CFG construction, we collect the outermost 1887 // loop of every loop nest. 1888 if (L.isInnermost() || VPlanBuildStressTest || 1889 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1890 LoopBlocksRPO RPOT(&L); 1891 RPOT.perform(LI); 1892 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1893 V.push_back(&L); 1894 // TODO: Collect inner loops inside marked outer loops in case 1895 // vectorization fails for the outer loop. Do not invoke 1896 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1897 // already known to be reducible. We can use an inherited attribute for 1898 // that. 1899 return; 1900 } 1901 } 1902 for (Loop *InnerL : L) 1903 collectSupportedLoops(*InnerL, LI, ORE, V); 1904 } 1905 1906 namespace { 1907 1908 /// The LoopVectorize Pass. 1909 struct LoopVectorize : public FunctionPass { 1910 /// Pass identification, replacement for typeid 1911 static char ID; 1912 1913 LoopVectorizePass Impl; 1914 1915 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 1916 bool VectorizeOnlyWhenForced = false) 1917 : FunctionPass(ID), 1918 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 1919 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1920 } 1921 1922 bool runOnFunction(Function &F) override { 1923 if (skipFunction(F)) 1924 return false; 1925 1926 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1927 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1928 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1929 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1930 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1931 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1932 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 1933 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1934 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1935 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1936 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1937 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1938 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 1939 1940 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1941 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1942 1943 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1944 GetLAA, *ORE, PSI).MadeAnyChange; 1945 } 1946 1947 void getAnalysisUsage(AnalysisUsage &AU) const override { 1948 AU.addRequired<AssumptionCacheTracker>(); 1949 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1950 AU.addRequired<DominatorTreeWrapperPass>(); 1951 AU.addRequired<LoopInfoWrapperPass>(); 1952 AU.addRequired<ScalarEvolutionWrapperPass>(); 1953 AU.addRequired<TargetTransformInfoWrapperPass>(); 1954 AU.addRequired<AAResultsWrapperPass>(); 1955 AU.addRequired<LoopAccessLegacyAnalysis>(); 1956 AU.addRequired<DemandedBitsWrapperPass>(); 1957 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1958 AU.addRequired<InjectTLIMappingsLegacy>(); 1959 1960 // We currently do not preserve loopinfo/dominator analyses with outer loop 1961 // vectorization. Until this is addressed, mark these analyses as preserved 1962 // only for non-VPlan-native path. 1963 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 1964 if (!EnableVPlanNativePath) { 1965 AU.addPreserved<LoopInfoWrapperPass>(); 1966 AU.addPreserved<DominatorTreeWrapperPass>(); 1967 } 1968 1969 AU.addPreserved<BasicAAWrapperPass>(); 1970 AU.addPreserved<GlobalsAAWrapperPass>(); 1971 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 1972 } 1973 }; 1974 1975 } // end anonymous namespace 1976 1977 //===----------------------------------------------------------------------===// 1978 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1979 // LoopVectorizationCostModel and LoopVectorizationPlanner. 1980 //===----------------------------------------------------------------------===// 1981 1982 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1983 // We need to place the broadcast of invariant variables outside the loop, 1984 // but only if it's proven safe to do so. Else, broadcast will be inside 1985 // vector loop body. 1986 Instruction *Instr = dyn_cast<Instruction>(V); 1987 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 1988 (!Instr || 1989 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 1990 // Place the code for broadcasting invariant variables in the new preheader. 1991 IRBuilder<>::InsertPointGuard Guard(Builder); 1992 if (SafeToHoist) 1993 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1994 1995 // Broadcast the scalar into all locations in the vector. 1996 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1997 1998 return Shuf; 1999 } 2000 2001 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2002 const InductionDescriptor &II, Value *Step, Value *Start, 2003 Instruction *EntryVal, VPValue *Def, VPValue *CastDef, 2004 VPTransformState &State) { 2005 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2006 "Expected either an induction phi-node or a truncate of it!"); 2007 2008 // Construct the initial value of the vector IV in the vector loop preheader 2009 auto CurrIP = Builder.saveIP(); 2010 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2011 if (isa<TruncInst>(EntryVal)) { 2012 assert(Start->getType()->isIntegerTy() && 2013 "Truncation requires an integer type"); 2014 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2015 Step = Builder.CreateTrunc(Step, TruncType); 2016 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2017 } 2018 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2019 Value *SteppedStart = 2020 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 2021 2022 // We create vector phi nodes for both integer and floating-point induction 2023 // variables. Here, we determine the kind of arithmetic we will perform. 2024 Instruction::BinaryOps AddOp; 2025 Instruction::BinaryOps MulOp; 2026 if (Step->getType()->isIntegerTy()) { 2027 AddOp = Instruction::Add; 2028 MulOp = Instruction::Mul; 2029 } else { 2030 AddOp = II.getInductionOpcode(); 2031 MulOp = Instruction::FMul; 2032 } 2033 2034 // Multiply the vectorization factor by the step using integer or 2035 // floating-point arithmetic as appropriate. 2036 Value *ConstVF = 2037 getSignedIntOrFpConstant(Step->getType(), VF.getKnownMinValue()); 2038 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 2039 2040 // Create a vector splat to use in the induction update. 2041 // 2042 // FIXME: If the step is non-constant, we create the vector splat with 2043 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2044 // handle a constant vector splat. 2045 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2046 Value *SplatVF = isa<Constant>(Mul) 2047 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2048 : Builder.CreateVectorSplat(VF, Mul); 2049 Builder.restoreIP(CurrIP); 2050 2051 // We may need to add the step a number of times, depending on the unroll 2052 // factor. The last of those goes into the PHI. 2053 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2054 &*LoopVectorBody->getFirstInsertionPt()); 2055 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2056 Instruction *LastInduction = VecInd; 2057 for (unsigned Part = 0; Part < UF; ++Part) { 2058 State.set(Def, LastInduction, Part); 2059 2060 if (isa<TruncInst>(EntryVal)) 2061 addMetadata(LastInduction, EntryVal); 2062 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, 2063 State, Part); 2064 2065 LastInduction = cast<Instruction>(addFastMathFlag( 2066 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 2067 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2068 } 2069 2070 // Move the last step to the end of the latch block. This ensures consistent 2071 // placement of all induction updates. 2072 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2073 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2074 auto *ICmp = cast<Instruction>(Br->getCondition()); 2075 LastInduction->moveBefore(ICmp); 2076 LastInduction->setName("vec.ind.next"); 2077 2078 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2079 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2080 } 2081 2082 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2083 return Cost->isScalarAfterVectorization(I, VF) || 2084 Cost->isProfitableToScalarize(I, VF); 2085 } 2086 2087 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2088 if (shouldScalarizeInstruction(IV)) 2089 return true; 2090 auto isScalarInst = [&](User *U) -> bool { 2091 auto *I = cast<Instruction>(U); 2092 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2093 }; 2094 return llvm::any_of(IV->users(), isScalarInst); 2095 } 2096 2097 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2098 const InductionDescriptor &ID, const Instruction *EntryVal, 2099 Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, 2100 unsigned Part, unsigned Lane) { 2101 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2102 "Expected either an induction phi-node or a truncate of it!"); 2103 2104 // This induction variable is not the phi from the original loop but the 2105 // newly-created IV based on the proof that casted Phi is equal to the 2106 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2107 // re-uses the same InductionDescriptor that original IV uses but we don't 2108 // have to do any recording in this case - that is done when original IV is 2109 // processed. 2110 if (isa<TruncInst>(EntryVal)) 2111 return; 2112 2113 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 2114 if (Casts.empty()) 2115 return; 2116 // Only the first Cast instruction in the Casts vector is of interest. 2117 // The rest of the Casts (if exist) have no uses outside the 2118 // induction update chain itself. 2119 if (Lane < UINT_MAX) 2120 State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); 2121 else 2122 State.set(CastDef, VectorLoopVal, Part); 2123 } 2124 2125 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2126 TruncInst *Trunc, VPValue *Def, 2127 VPValue *CastDef, 2128 VPTransformState &State) { 2129 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2130 "Primary induction variable must have an integer type"); 2131 2132 auto II = Legal->getInductionVars().find(IV); 2133 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2134 2135 auto ID = II->second; 2136 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2137 2138 // The value from the original loop to which we are mapping the new induction 2139 // variable. 2140 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2141 2142 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2143 2144 // Generate code for the induction step. Note that induction steps are 2145 // required to be loop-invariant 2146 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2147 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2148 "Induction step should be loop invariant"); 2149 if (PSE.getSE()->isSCEVable(IV->getType())) { 2150 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2151 return Exp.expandCodeFor(Step, Step->getType(), 2152 LoopVectorPreHeader->getTerminator()); 2153 } 2154 return cast<SCEVUnknown>(Step)->getValue(); 2155 }; 2156 2157 // The scalar value to broadcast. This is derived from the canonical 2158 // induction variable. If a truncation type is given, truncate the canonical 2159 // induction variable and step. Otherwise, derive these values from the 2160 // induction descriptor. 2161 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2162 Value *ScalarIV = Induction; 2163 if (IV != OldInduction) { 2164 ScalarIV = IV->getType()->isIntegerTy() 2165 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2166 : Builder.CreateCast(Instruction::SIToFP, Induction, 2167 IV->getType()); 2168 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2169 ScalarIV->setName("offset.idx"); 2170 } 2171 if (Trunc) { 2172 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2173 assert(Step->getType()->isIntegerTy() && 2174 "Truncation requires an integer step"); 2175 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2176 Step = Builder.CreateTrunc(Step, TruncType); 2177 } 2178 return ScalarIV; 2179 }; 2180 2181 // Create the vector values from the scalar IV, in the absence of creating a 2182 // vector IV. 2183 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2184 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2185 for (unsigned Part = 0; Part < UF; ++Part) { 2186 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2187 Value *EntryPart = 2188 getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step, 2189 ID.getInductionOpcode()); 2190 State.set(Def, EntryPart, Part); 2191 if (Trunc) 2192 addMetadata(EntryPart, Trunc); 2193 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, 2194 State, Part); 2195 } 2196 }; 2197 2198 // Now do the actual transformations, and start with creating the step value. 2199 Value *Step = CreateStepValue(ID.getStep()); 2200 if (VF.isZero() || VF.isScalar()) { 2201 Value *ScalarIV = CreateScalarIV(Step); 2202 CreateSplatIV(ScalarIV, Step); 2203 return; 2204 } 2205 2206 // Determine if we want a scalar version of the induction variable. This is 2207 // true if the induction variable itself is not widened, or if it has at 2208 // least one user in the loop that is not widened. 2209 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2210 if (!NeedsScalarIV) { 2211 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2212 State); 2213 return; 2214 } 2215 2216 // Try to create a new independent vector induction variable. If we can't 2217 // create the phi node, we will splat the scalar induction variable in each 2218 // loop iteration. 2219 if (!shouldScalarizeInstruction(EntryVal)) { 2220 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2221 State); 2222 Value *ScalarIV = CreateScalarIV(Step); 2223 // Create scalar steps that can be used by instructions we will later 2224 // scalarize. Note that the addition of the scalar steps will not increase 2225 // the number of instructions in the loop in the common case prior to 2226 // InstCombine. We will be trading one vector extract for each scalar step. 2227 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2228 return; 2229 } 2230 2231 // All IV users are scalar instructions, so only emit a scalar IV, not a 2232 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2233 // predicate used by the masked loads/stores. 2234 Value *ScalarIV = CreateScalarIV(Step); 2235 if (!Cost->isScalarEpilogueAllowed()) 2236 CreateSplatIV(ScalarIV, Step); 2237 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2238 } 2239 2240 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2241 Instruction::BinaryOps BinOp) { 2242 // Create and check the types. 2243 auto *ValVTy = cast<FixedVectorType>(Val->getType()); 2244 int VLen = ValVTy->getNumElements(); 2245 2246 Type *STy = Val->getType()->getScalarType(); 2247 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2248 "Induction Step must be an integer or FP"); 2249 assert(Step->getType() == STy && "Step has wrong type"); 2250 2251 SmallVector<Constant *, 8> Indices; 2252 2253 if (STy->isIntegerTy()) { 2254 // Create a vector of consecutive numbers from zero to VF. 2255 for (int i = 0; i < VLen; ++i) 2256 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2257 2258 // Add the consecutive indices to the vector value. 2259 Constant *Cv = ConstantVector::get(Indices); 2260 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2261 Step = Builder.CreateVectorSplat(VLen, Step); 2262 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2263 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2264 // which can be found from the original scalar operations. 2265 Step = Builder.CreateMul(Cv, Step); 2266 return Builder.CreateAdd(Val, Step, "induction"); 2267 } 2268 2269 // Floating point induction. 2270 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2271 "Binary Opcode should be specified for FP induction"); 2272 // Create a vector of consecutive numbers from zero to VF. 2273 for (int i = 0; i < VLen; ++i) 2274 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2275 2276 // Add the consecutive indices to the vector value. 2277 Constant *Cv = ConstantVector::get(Indices); 2278 2279 Step = Builder.CreateVectorSplat(VLen, Step); 2280 2281 // Floating point operations had to be 'fast' to enable the induction. 2282 FastMathFlags Flags; 2283 Flags.setFast(); 2284 2285 Value *MulOp = Builder.CreateFMul(Cv, Step); 2286 if (isa<Instruction>(MulOp)) 2287 // Have to check, MulOp may be a constant 2288 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2289 2290 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2291 if (isa<Instruction>(BOp)) 2292 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2293 return BOp; 2294 } 2295 2296 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2297 Instruction *EntryVal, 2298 const InductionDescriptor &ID, 2299 VPValue *Def, VPValue *CastDef, 2300 VPTransformState &State) { 2301 // We shouldn't have to build scalar steps if we aren't vectorizing. 2302 assert(VF.isVector() && "VF should be greater than one"); 2303 // Get the value type and ensure it and the step have the same integer type. 2304 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2305 assert(ScalarIVTy == Step->getType() && 2306 "Val and Step should have the same type"); 2307 2308 // We build scalar steps for both integer and floating-point induction 2309 // variables. Here, we determine the kind of arithmetic we will perform. 2310 Instruction::BinaryOps AddOp; 2311 Instruction::BinaryOps MulOp; 2312 if (ScalarIVTy->isIntegerTy()) { 2313 AddOp = Instruction::Add; 2314 MulOp = Instruction::Mul; 2315 } else { 2316 AddOp = ID.getInductionOpcode(); 2317 MulOp = Instruction::FMul; 2318 } 2319 2320 // Determine the number of scalars we need to generate for each unroll 2321 // iteration. If EntryVal is uniform, we only need to generate the first 2322 // lane. Otherwise, we generate all VF values. 2323 unsigned Lanes = 2324 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) 2325 ? 1 2326 : VF.getKnownMinValue(); 2327 assert((!VF.isScalable() || Lanes == 1) && 2328 "Should never scalarize a scalable vector"); 2329 // Compute the scalar steps and save the results in State. 2330 for (unsigned Part = 0; Part < UF; ++Part) { 2331 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2332 auto *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2333 ScalarIVTy->getScalarSizeInBits()); 2334 Value *StartIdx = 2335 createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF); 2336 if (ScalarIVTy->isFloatingPointTy()) 2337 StartIdx = Builder.CreateSIToFP(StartIdx, ScalarIVTy); 2338 StartIdx = addFastMathFlag(Builder.CreateBinOp( 2339 AddOp, StartIdx, getSignedIntOrFpConstant(ScalarIVTy, Lane))); 2340 // The step returned by `createStepForVF` is a runtime-evaluated value 2341 // when VF is scalable. Otherwise, it should be folded into a Constant. 2342 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2343 "Expected StartIdx to be folded to a constant when VF is not " 2344 "scalable"); 2345 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 2346 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 2347 State.set(Def, Add, VPIteration(Part, Lane)); 2348 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2349 Part, Lane); 2350 } 2351 } 2352 } 2353 2354 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2355 const VPIteration &Instance, 2356 VPTransformState &State) { 2357 Value *ScalarInst = State.get(Def, Instance); 2358 Value *VectorValue = State.get(Def, Instance.Part); 2359 VectorValue = Builder.CreateInsertElement( 2360 VectorValue, ScalarInst, State.Builder.getInt32(Instance.Lane)); 2361 State.set(Def, VectorValue, Instance.Part); 2362 } 2363 2364 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2365 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2366 assert(!VF.isScalable() && "Cannot reverse scalable vectors"); 2367 SmallVector<int, 8> ShuffleMask; 2368 for (unsigned i = 0; i < VF.getKnownMinValue(); ++i) 2369 ShuffleMask.push_back(VF.getKnownMinValue() - i - 1); 2370 2371 return Builder.CreateShuffleVector(Vec, ShuffleMask, "reverse"); 2372 } 2373 2374 // Return whether we allow using masked interleave-groups (for dealing with 2375 // strided loads/stores that reside in predicated blocks, or for dealing 2376 // with gaps). 2377 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2378 // If an override option has been passed in for interleaved accesses, use it. 2379 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2380 return EnableMaskedInterleavedMemAccesses; 2381 2382 return TTI.enableMaskedInterleavedAccessVectorization(); 2383 } 2384 2385 // Try to vectorize the interleave group that \p Instr belongs to. 2386 // 2387 // E.g. Translate following interleaved load group (factor = 3): 2388 // for (i = 0; i < N; i+=3) { 2389 // R = Pic[i]; // Member of index 0 2390 // G = Pic[i+1]; // Member of index 1 2391 // B = Pic[i+2]; // Member of index 2 2392 // ... // do something to R, G, B 2393 // } 2394 // To: 2395 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2396 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2397 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2398 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2399 // 2400 // Or translate following interleaved store group (factor = 3): 2401 // for (i = 0; i < N; i+=3) { 2402 // ... do something to R, G, B 2403 // Pic[i] = R; // Member of index 0 2404 // Pic[i+1] = G; // Member of index 1 2405 // Pic[i+2] = B; // Member of index 2 2406 // } 2407 // To: 2408 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2409 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2410 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2411 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2412 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2413 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2414 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2415 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2416 VPValue *BlockInMask) { 2417 Instruction *Instr = Group->getInsertPos(); 2418 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2419 2420 // Prepare for the vector type of the interleaved load/store. 2421 Type *ScalarTy = getMemInstValueType(Instr); 2422 unsigned InterleaveFactor = Group->getFactor(); 2423 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2424 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2425 2426 // Prepare for the new pointers. 2427 SmallVector<Value *, 2> AddrParts; 2428 unsigned Index = Group->getIndex(Instr); 2429 2430 // TODO: extend the masked interleaved-group support to reversed access. 2431 assert((!BlockInMask || !Group->isReverse()) && 2432 "Reversed masked interleave-group not supported."); 2433 2434 // If the group is reverse, adjust the index to refer to the last vector lane 2435 // instead of the first. We adjust the index from the first vector lane, 2436 // rather than directly getting the pointer for lane VF - 1, because the 2437 // pointer operand of the interleaved access is supposed to be uniform. For 2438 // uniform instructions, we're only required to generate a value for the 2439 // first vector lane in each unroll iteration. 2440 assert(!VF.isScalable() && 2441 "scalable vector reverse operation is not implemented"); 2442 if (Group->isReverse()) 2443 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2444 2445 for (unsigned Part = 0; Part < UF; Part++) { 2446 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2447 setDebugLocFromInst(Builder, AddrPart); 2448 2449 // Notice current instruction could be any index. Need to adjust the address 2450 // to the member of index 0. 2451 // 2452 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2453 // b = A[i]; // Member of index 0 2454 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2455 // 2456 // E.g. A[i+1] = a; // Member of index 1 2457 // A[i] = b; // Member of index 0 2458 // A[i+2] = c; // Member of index 2 (Current instruction) 2459 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2460 2461 bool InBounds = false; 2462 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2463 InBounds = gep->isInBounds(); 2464 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2465 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2466 2467 // Cast to the vector pointer type. 2468 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2469 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2470 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2471 } 2472 2473 setDebugLocFromInst(Builder, Instr); 2474 Value *PoisonVec = PoisonValue::get(VecTy); 2475 2476 Value *MaskForGaps = nullptr; 2477 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2478 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2479 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2480 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2481 } 2482 2483 // Vectorize the interleaved load group. 2484 if (isa<LoadInst>(Instr)) { 2485 // For each unroll part, create a wide load for the group. 2486 SmallVector<Value *, 2> NewLoads; 2487 for (unsigned Part = 0; Part < UF; Part++) { 2488 Instruction *NewLoad; 2489 if (BlockInMask || MaskForGaps) { 2490 assert(useMaskedInterleavedAccesses(*TTI) && 2491 "masked interleaved groups are not allowed."); 2492 Value *GroupMask = MaskForGaps; 2493 if (BlockInMask) { 2494 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2495 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2496 Value *ShuffledMask = Builder.CreateShuffleVector( 2497 BlockInMaskPart, 2498 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2499 "interleaved.mask"); 2500 GroupMask = MaskForGaps 2501 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2502 MaskForGaps) 2503 : ShuffledMask; 2504 } 2505 NewLoad = 2506 Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(), 2507 GroupMask, PoisonVec, "wide.masked.vec"); 2508 } 2509 else 2510 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2511 Group->getAlign(), "wide.vec"); 2512 Group->addMetadata(NewLoad); 2513 NewLoads.push_back(NewLoad); 2514 } 2515 2516 // For each member in the group, shuffle out the appropriate data from the 2517 // wide loads. 2518 unsigned J = 0; 2519 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2520 Instruction *Member = Group->getMember(I); 2521 2522 // Skip the gaps in the group. 2523 if (!Member) 2524 continue; 2525 2526 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2527 auto StrideMask = 2528 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2529 for (unsigned Part = 0; Part < UF; Part++) { 2530 Value *StridedVec = Builder.CreateShuffleVector( 2531 NewLoads[Part], StrideMask, "strided.vec"); 2532 2533 // If this member has different type, cast the result type. 2534 if (Member->getType() != ScalarTy) { 2535 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2536 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2537 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2538 } 2539 2540 if (Group->isReverse()) 2541 StridedVec = reverseVector(StridedVec); 2542 2543 State.set(VPDefs[J], StridedVec, Part); 2544 } 2545 ++J; 2546 } 2547 return; 2548 } 2549 2550 // The sub vector type for current instruction. 2551 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2552 auto *SubVT = VectorType::get(ScalarTy, VF); 2553 2554 // Vectorize the interleaved store group. 2555 for (unsigned Part = 0; Part < UF; Part++) { 2556 // Collect the stored vector from each member. 2557 SmallVector<Value *, 4> StoredVecs; 2558 for (unsigned i = 0; i < InterleaveFactor; i++) { 2559 // Interleaved store group doesn't allow a gap, so each index has a member 2560 assert(Group->getMember(i) && "Fail to get a member from an interleaved store group"); 2561 2562 Value *StoredVec = State.get(StoredValues[i], Part); 2563 2564 if (Group->isReverse()) 2565 StoredVec = reverseVector(StoredVec); 2566 2567 // If this member has different type, cast it to a unified type. 2568 2569 if (StoredVec->getType() != SubVT) 2570 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2571 2572 StoredVecs.push_back(StoredVec); 2573 } 2574 2575 // Concatenate all vectors into a wide vector. 2576 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2577 2578 // Interleave the elements in the wide vector. 2579 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2580 Value *IVec = Builder.CreateShuffleVector( 2581 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2582 "interleaved.vec"); 2583 2584 Instruction *NewStoreInstr; 2585 if (BlockInMask) { 2586 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2587 Value *ShuffledMask = Builder.CreateShuffleVector( 2588 BlockInMaskPart, 2589 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2590 "interleaved.mask"); 2591 NewStoreInstr = Builder.CreateMaskedStore( 2592 IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); 2593 } 2594 else 2595 NewStoreInstr = 2596 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2597 2598 Group->addMetadata(NewStoreInstr); 2599 } 2600 } 2601 2602 void InnerLoopVectorizer::vectorizeMemoryInstruction( 2603 Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, 2604 VPValue *StoredValue, VPValue *BlockInMask) { 2605 // Attempt to issue a wide load. 2606 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2607 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2608 2609 assert((LI || SI) && "Invalid Load/Store instruction"); 2610 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2611 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2612 2613 LoopVectorizationCostModel::InstWidening Decision = 2614 Cost->getWideningDecision(Instr, VF); 2615 assert((Decision == LoopVectorizationCostModel::CM_Widen || 2616 Decision == LoopVectorizationCostModel::CM_Widen_Reverse || 2617 Decision == LoopVectorizationCostModel::CM_GatherScatter) && 2618 "CM decision is not to widen the memory instruction"); 2619 2620 Type *ScalarDataTy = getMemInstValueType(Instr); 2621 2622 auto *DataTy = VectorType::get(ScalarDataTy, VF); 2623 const Align Alignment = getLoadStoreAlignment(Instr); 2624 2625 // Determine if the pointer operand of the access is either consecutive or 2626 // reverse consecutive. 2627 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2628 bool ConsecutiveStride = 2629 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2630 bool CreateGatherScatter = 2631 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2632 2633 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2634 // gather/scatter. Otherwise Decision should have been to Scalarize. 2635 assert((ConsecutiveStride || CreateGatherScatter) && 2636 "The instruction should be scalarized"); 2637 (void)ConsecutiveStride; 2638 2639 VectorParts BlockInMaskParts(UF); 2640 bool isMaskRequired = BlockInMask; 2641 if (isMaskRequired) 2642 for (unsigned Part = 0; Part < UF; ++Part) 2643 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2644 2645 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2646 // Calculate the pointer for the specific unroll-part. 2647 GetElementPtrInst *PartPtr = nullptr; 2648 2649 bool InBounds = false; 2650 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2651 InBounds = gep->isInBounds(); 2652 2653 if (Reverse) { 2654 assert(!VF.isScalable() && 2655 "Reversing vectors is not yet supported for scalable vectors."); 2656 2657 // If the address is consecutive but reversed, then the 2658 // wide store needs to start at the last vector element. 2659 PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP( 2660 ScalarDataTy, Ptr, Builder.getInt32(-Part * VF.getKnownMinValue()))); 2661 PartPtr->setIsInBounds(InBounds); 2662 PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP( 2663 ScalarDataTy, PartPtr, Builder.getInt32(1 - VF.getKnownMinValue()))); 2664 PartPtr->setIsInBounds(InBounds); 2665 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2666 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2667 } else { 2668 Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF); 2669 PartPtr = cast<GetElementPtrInst>( 2670 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 2671 PartPtr->setIsInBounds(InBounds); 2672 } 2673 2674 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2675 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2676 }; 2677 2678 // Handle Stores: 2679 if (SI) { 2680 setDebugLocFromInst(Builder, SI); 2681 2682 for (unsigned Part = 0; Part < UF; ++Part) { 2683 Instruction *NewSI = nullptr; 2684 Value *StoredVal = State.get(StoredValue, Part); 2685 if (CreateGatherScatter) { 2686 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2687 Value *VectorGep = State.get(Addr, Part); 2688 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2689 MaskPart); 2690 } else { 2691 if (Reverse) { 2692 // If we store to reverse consecutive memory locations, then we need 2693 // to reverse the order of elements in the stored value. 2694 StoredVal = reverseVector(StoredVal); 2695 // We don't want to update the value in the map as it might be used in 2696 // another expression. So don't call resetVectorValue(StoredVal). 2697 } 2698 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 2699 if (isMaskRequired) 2700 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2701 BlockInMaskParts[Part]); 2702 else 2703 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2704 } 2705 addMetadata(NewSI, SI); 2706 } 2707 return; 2708 } 2709 2710 // Handle loads. 2711 assert(LI && "Must have a load instruction"); 2712 setDebugLocFromInst(Builder, LI); 2713 for (unsigned Part = 0; Part < UF; ++Part) { 2714 Value *NewLI; 2715 if (CreateGatherScatter) { 2716 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2717 Value *VectorGep = State.get(Addr, Part); 2718 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2719 nullptr, "wide.masked.gather"); 2720 addMetadata(NewLI, LI); 2721 } else { 2722 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 2723 if (isMaskRequired) 2724 NewLI = Builder.CreateMaskedLoad( 2725 VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy), 2726 "wide.masked.load"); 2727 else 2728 NewLI = 2729 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2730 2731 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2732 addMetadata(NewLI, LI); 2733 if (Reverse) 2734 NewLI = reverseVector(NewLI); 2735 } 2736 2737 State.set(Def, NewLI, Part); 2738 } 2739 } 2740 2741 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def, 2742 VPUser &User, 2743 const VPIteration &Instance, 2744 bool IfPredicateInstr, 2745 VPTransformState &State) { 2746 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2747 2748 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2749 // the first lane and part. 2750 if (isa<NoAliasScopeDeclInst>(Instr)) 2751 if (!Instance.isFirstIteration()) 2752 return; 2753 2754 setDebugLocFromInst(Builder, Instr); 2755 2756 // Does this instruction return a value ? 2757 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2758 2759 Instruction *Cloned = Instr->clone(); 2760 if (!IsVoidRetTy) 2761 Cloned->setName(Instr->getName() + ".cloned"); 2762 2763 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 2764 Builder.GetInsertPoint()); 2765 // Replace the operands of the cloned instructions with their scalar 2766 // equivalents in the new loop. 2767 for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { 2768 auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); 2769 auto InputInstance = Instance; 2770 if (!Operand || !OrigLoop->contains(Operand) || 2771 (Cost->isUniformAfterVectorization(Operand, State.VF))) 2772 InputInstance.Lane = 0; 2773 auto *NewOp = State.get(User.getOperand(op), InputInstance); 2774 Cloned->setOperand(op, NewOp); 2775 } 2776 addNewMetadata(Cloned, Instr); 2777 2778 // Place the cloned scalar in the new loop. 2779 Builder.Insert(Cloned); 2780 2781 State.set(Def, Cloned, Instance); 2782 2783 // If we just cloned a new assumption, add it the assumption cache. 2784 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2785 if (II->getIntrinsicID() == Intrinsic::assume) 2786 AC->registerAssumption(II); 2787 2788 // End if-block. 2789 if (IfPredicateInstr) 2790 PredicatedInstructions.push_back(Cloned); 2791 } 2792 2793 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2794 Value *End, Value *Step, 2795 Instruction *DL) { 2796 BasicBlock *Header = L->getHeader(); 2797 BasicBlock *Latch = L->getLoopLatch(); 2798 // As we're just creating this loop, it's possible no latch exists 2799 // yet. If so, use the header as this will be a single block loop. 2800 if (!Latch) 2801 Latch = Header; 2802 2803 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2804 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2805 setDebugLocFromInst(Builder, OldInst); 2806 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2807 2808 Builder.SetInsertPoint(Latch->getTerminator()); 2809 setDebugLocFromInst(Builder, OldInst); 2810 2811 // Create i+1 and fill the PHINode. 2812 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2813 Induction->addIncoming(Start, L->getLoopPreheader()); 2814 Induction->addIncoming(Next, Latch); 2815 // Create the compare. 2816 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2817 Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 2818 2819 // Now we have two terminators. Remove the old one from the block. 2820 Latch->getTerminator()->eraseFromParent(); 2821 2822 return Induction; 2823 } 2824 2825 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2826 if (TripCount) 2827 return TripCount; 2828 2829 assert(L && "Create Trip Count for null loop."); 2830 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2831 // Find the loop boundaries. 2832 ScalarEvolution *SE = PSE.getSE(); 2833 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2834 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 2835 "Invalid loop count"); 2836 2837 Type *IdxTy = Legal->getWidestInductionType(); 2838 assert(IdxTy && "No type for induction"); 2839 2840 // The exit count might have the type of i64 while the phi is i32. This can 2841 // happen if we have an induction variable that is sign extended before the 2842 // compare. The only way that we get a backedge taken count is that the 2843 // induction variable was signed and as such will not overflow. In such a case 2844 // truncation is legal. 2845 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2846 IdxTy->getPrimitiveSizeInBits()) 2847 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2848 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2849 2850 // Get the total trip count from the count by adding 1. 2851 const SCEV *ExitCount = SE->getAddExpr( 2852 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2853 2854 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2855 2856 // Expand the trip count and place the new instructions in the preheader. 2857 // Notice that the pre-header does not change, only the loop body. 2858 SCEVExpander Exp(*SE, DL, "induction"); 2859 2860 // Count holds the overall loop count (N). 2861 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2862 L->getLoopPreheader()->getTerminator()); 2863 2864 if (TripCount->getType()->isPointerTy()) 2865 TripCount = 2866 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2867 L->getLoopPreheader()->getTerminator()); 2868 2869 return TripCount; 2870 } 2871 2872 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2873 if (VectorTripCount) 2874 return VectorTripCount; 2875 2876 Value *TC = getOrCreateTripCount(L); 2877 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2878 2879 Type *Ty = TC->getType(); 2880 // This is where we can make the step a runtime constant. 2881 Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF); 2882 2883 // If the tail is to be folded by masking, round the number of iterations N 2884 // up to a multiple of Step instead of rounding down. This is done by first 2885 // adding Step-1 and then rounding down. Note that it's ok if this addition 2886 // overflows: the vector induction variable will eventually wrap to zero given 2887 // that it starts at zero and its Step is a power of two; the loop will then 2888 // exit, with the last early-exit vector comparison also producing all-true. 2889 if (Cost->foldTailByMasking()) { 2890 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 2891 "VF*UF must be a power of 2 when folding tail by masking"); 2892 assert(!VF.isScalable() && 2893 "Tail folding not yet supported for scalable vectors"); 2894 TC = Builder.CreateAdd( 2895 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 2896 } 2897 2898 // Now we need to generate the expression for the part of the loop that the 2899 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2900 // iterations are not required for correctness, or N - Step, otherwise. Step 2901 // is equal to the vectorization factor (number of SIMD elements) times the 2902 // unroll factor (number of SIMD instructions). 2903 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2904 2905 // There are two cases where we need to ensure (at least) the last iteration 2906 // runs in the scalar remainder loop. Thus, if the step evenly divides 2907 // the trip count, we set the remainder to be equal to the step. If the step 2908 // does not evenly divide the trip count, no adjustment is necessary since 2909 // there will already be scalar iterations. Note that the minimum iterations 2910 // check ensures that N >= Step. The cases are: 2911 // 1) If there is a non-reversed interleaved group that may speculatively 2912 // access memory out-of-bounds. 2913 // 2) If any instruction may follow a conditionally taken exit. That is, if 2914 // the loop contains multiple exiting blocks, or a single exiting block 2915 // which is not the latch. 2916 if (VF.isVector() && Cost->requiresScalarEpilogue()) { 2917 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2918 R = Builder.CreateSelect(IsZero, Step, R); 2919 } 2920 2921 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2922 2923 return VectorTripCount; 2924 } 2925 2926 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2927 const DataLayout &DL) { 2928 // Verify that V is a vector type with same number of elements as DstVTy. 2929 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 2930 unsigned VF = DstFVTy->getNumElements(); 2931 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 2932 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2933 Type *SrcElemTy = SrcVecTy->getElementType(); 2934 Type *DstElemTy = DstFVTy->getElementType(); 2935 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2936 "Vector elements must have same size"); 2937 2938 // Do a direct cast if element types are castable. 2939 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2940 return Builder.CreateBitOrPointerCast(V, DstFVTy); 2941 } 2942 // V cannot be directly casted to desired vector type. 2943 // May happen when V is a floating point vector but DstVTy is a vector of 2944 // pointers or vice-versa. Handle this using a two-step bitcast using an 2945 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2946 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2947 "Only one type should be a pointer type"); 2948 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2949 "Only one type should be a floating point type"); 2950 Type *IntTy = 2951 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2952 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 2953 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2954 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 2955 } 2956 2957 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2958 BasicBlock *Bypass) { 2959 Value *Count = getOrCreateTripCount(L); 2960 // Reuse existing vector loop preheader for TC checks. 2961 // Note that new preheader block is generated for vector loop. 2962 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 2963 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 2964 2965 // Generate code to check if the loop's trip count is less than VF * UF, or 2966 // equal to it in case a scalar epilogue is required; this implies that the 2967 // vector trip count is zero. This check also covers the case where adding one 2968 // to the backedge-taken count overflowed leading to an incorrect trip count 2969 // of zero. In this case we will also jump to the scalar loop. 2970 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 2971 : ICmpInst::ICMP_ULT; 2972 2973 // If tail is to be folded, vector loop takes care of all iterations. 2974 Value *CheckMinIters = Builder.getFalse(); 2975 if (!Cost->foldTailByMasking()) { 2976 Value *Step = 2977 createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF); 2978 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 2979 } 2980 // Create new preheader for vector loop. 2981 LoopVectorPreHeader = 2982 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 2983 "vector.ph"); 2984 2985 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 2986 DT->getNode(Bypass)->getIDom()) && 2987 "TC check is expected to dominate Bypass"); 2988 2989 // Update dominator for Bypass & LoopExit. 2990 DT->changeImmediateDominator(Bypass, TCCheckBlock); 2991 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 2992 2993 ReplaceInstWithInst( 2994 TCCheckBlock->getTerminator(), 2995 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 2996 LoopBypassBlocks.push_back(TCCheckBlock); 2997 } 2998 2999 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3000 // Reuse existing vector loop preheader for SCEV checks. 3001 // Note that new preheader block is generated for vector loop. 3002 BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader; 3003 3004 // Generate the code to check that the SCEV assumptions that we made. 3005 // We want the new basic block to start at the first instruction in a 3006 // sequence of instructions that form a check. 3007 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3008 "scev.check"); 3009 Value *SCEVCheck = Exp.expandCodeForPredicate( 3010 &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator()); 3011 3012 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3013 if (C->isZero()) 3014 return; 3015 3016 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3017 (OptForSizeBasedOnProfile && 3018 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3019 "Cannot SCEV check stride or overflow when optimizing for size"); 3020 3021 SCEVCheckBlock->setName("vector.scevcheck"); 3022 // Create new preheader for vector loop. 3023 LoopVectorPreHeader = 3024 SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI, 3025 nullptr, "vector.ph"); 3026 3027 // Update dominator only if this is first RT check. 3028 if (LoopBypassBlocks.empty()) { 3029 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3030 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3031 } 3032 3033 ReplaceInstWithInst( 3034 SCEVCheckBlock->getTerminator(), 3035 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck)); 3036 LoopBypassBlocks.push_back(SCEVCheckBlock); 3037 AddedSafetyChecks = true; 3038 } 3039 3040 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3041 // VPlan-native path does not do any analysis for runtime checks currently. 3042 if (EnableVPlanNativePath) 3043 return; 3044 3045 // Reuse existing vector loop preheader for runtime memory checks. 3046 // Note that new preheader block is generated for vector loop. 3047 BasicBlock *const MemCheckBlock = L->getLoopPreheader(); 3048 3049 // Generate the code that checks in runtime if arrays overlap. We put the 3050 // checks into a separate block to make the more common case of few elements 3051 // faster. 3052 auto *LAI = Legal->getLAI(); 3053 const auto &RtPtrChecking = *LAI->getRuntimePointerChecking(); 3054 if (!RtPtrChecking.Need) 3055 return; 3056 3057 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3058 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3059 "Cannot emit memory checks when optimizing for size, unless forced " 3060 "to vectorize."); 3061 ORE->emit([&]() { 3062 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3063 L->getStartLoc(), L->getHeader()) 3064 << "Code-size may be reduced by not forcing " 3065 "vectorization, or by source-code modifications " 3066 "eliminating the need for runtime checks " 3067 "(e.g., adding 'restrict')."; 3068 }); 3069 } 3070 3071 MemCheckBlock->setName("vector.memcheck"); 3072 // Create new preheader for vector loop. 3073 LoopVectorPreHeader = 3074 SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr, 3075 "vector.ph"); 3076 3077 auto *CondBranch = cast<BranchInst>( 3078 Builder.CreateCondBr(Builder.getTrue(), Bypass, LoopVectorPreHeader)); 3079 ReplaceInstWithInst(MemCheckBlock->getTerminator(), CondBranch); 3080 LoopBypassBlocks.push_back(MemCheckBlock); 3081 AddedSafetyChecks = true; 3082 3083 // Update dominator only if this is first RT check. 3084 if (LoopBypassBlocks.empty()) { 3085 DT->changeImmediateDominator(Bypass, MemCheckBlock); 3086 DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock); 3087 } 3088 3089 Instruction *FirstCheckInst; 3090 Instruction *MemRuntimeCheck; 3091 SCEVExpander Exp(*PSE.getSE(), MemCheckBlock->getModule()->getDataLayout(), 3092 "induction"); 3093 std::tie(FirstCheckInst, MemRuntimeCheck) = addRuntimeChecks( 3094 MemCheckBlock->getTerminator(), OrigLoop, RtPtrChecking.getChecks(), Exp); 3095 assert(MemRuntimeCheck && "no RT checks generated although RtPtrChecking " 3096 "claimed checks are required"); 3097 CondBranch->setCondition(MemRuntimeCheck); 3098 3099 // We currently don't use LoopVersioning for the actual loop cloning but we 3100 // still use it to add the noalias metadata. 3101 LVer = std::make_unique<LoopVersioning>( 3102 *Legal->getLAI(), 3103 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3104 DT, PSE.getSE()); 3105 LVer->prepareNoAliasMetadata(); 3106 } 3107 3108 Value *InnerLoopVectorizer::emitTransformedIndex( 3109 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3110 const InductionDescriptor &ID) const { 3111 3112 SCEVExpander Exp(*SE, DL, "induction"); 3113 auto Step = ID.getStep(); 3114 auto StartValue = ID.getStartValue(); 3115 assert(Index->getType() == Step->getType() && 3116 "Index type does not match StepValue type"); 3117 3118 // Note: the IR at this point is broken. We cannot use SE to create any new 3119 // SCEV and then expand it, hoping that SCEV's simplification will give us 3120 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3121 // lead to various SCEV crashes. So all we can do is to use builder and rely 3122 // on InstCombine for future simplifications. Here we handle some trivial 3123 // cases only. 3124 auto CreateAdd = [&B](Value *X, Value *Y) { 3125 assert(X->getType() == Y->getType() && "Types don't match!"); 3126 if (auto *CX = dyn_cast<ConstantInt>(X)) 3127 if (CX->isZero()) 3128 return Y; 3129 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3130 if (CY->isZero()) 3131 return X; 3132 return B.CreateAdd(X, Y); 3133 }; 3134 3135 auto CreateMul = [&B](Value *X, Value *Y) { 3136 assert(X->getType() == Y->getType() && "Types don't match!"); 3137 if (auto *CX = dyn_cast<ConstantInt>(X)) 3138 if (CX->isOne()) 3139 return Y; 3140 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3141 if (CY->isOne()) 3142 return X; 3143 return B.CreateMul(X, Y); 3144 }; 3145 3146 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3147 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3148 // the DomTree is not kept up-to-date for additional blocks generated in the 3149 // vector loop. By using the header as insertion point, we guarantee that the 3150 // expanded instructions dominate all their uses. 3151 auto GetInsertPoint = [this, &B]() { 3152 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3153 if (InsertBB != LoopVectorBody && 3154 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3155 return LoopVectorBody->getTerminator(); 3156 return &*B.GetInsertPoint(); 3157 }; 3158 switch (ID.getKind()) { 3159 case InductionDescriptor::IK_IntInduction: { 3160 assert(Index->getType() == StartValue->getType() && 3161 "Index type does not match StartValue type"); 3162 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3163 return B.CreateSub(StartValue, Index); 3164 auto *Offset = CreateMul( 3165 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3166 return CreateAdd(StartValue, Offset); 3167 } 3168 case InductionDescriptor::IK_PtrInduction: { 3169 assert(isa<SCEVConstant>(Step) && 3170 "Expected constant step for pointer induction"); 3171 return B.CreateGEP( 3172 StartValue->getType()->getPointerElementType(), StartValue, 3173 CreateMul(Index, 3174 Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()))); 3175 } 3176 case InductionDescriptor::IK_FpInduction: { 3177 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3178 auto InductionBinOp = ID.getInductionBinOp(); 3179 assert(InductionBinOp && 3180 (InductionBinOp->getOpcode() == Instruction::FAdd || 3181 InductionBinOp->getOpcode() == Instruction::FSub) && 3182 "Original bin op should be defined for FP induction"); 3183 3184 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3185 3186 // Floating point operations had to be 'fast' to enable the induction. 3187 FastMathFlags Flags; 3188 Flags.setFast(); 3189 3190 Value *MulExp = B.CreateFMul(StepValue, Index); 3191 if (isa<Instruction>(MulExp)) 3192 // We have to check, the MulExp may be a constant. 3193 cast<Instruction>(MulExp)->setFastMathFlags(Flags); 3194 3195 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3196 "induction"); 3197 if (isa<Instruction>(BOp)) 3198 cast<Instruction>(BOp)->setFastMathFlags(Flags); 3199 3200 return BOp; 3201 } 3202 case InductionDescriptor::IK_NoInduction: 3203 return nullptr; 3204 } 3205 llvm_unreachable("invalid enum"); 3206 } 3207 3208 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3209 LoopScalarBody = OrigLoop->getHeader(); 3210 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3211 LoopExitBlock = OrigLoop->getUniqueExitBlock(); 3212 assert(LoopExitBlock && "Must have an exit block"); 3213 assert(LoopVectorPreHeader && "Invalid loop structure"); 3214 3215 LoopMiddleBlock = 3216 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3217 LI, nullptr, Twine(Prefix) + "middle.block"); 3218 LoopScalarPreHeader = 3219 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3220 nullptr, Twine(Prefix) + "scalar.ph"); 3221 3222 // Set up branch from middle block to the exit and scalar preheader blocks. 3223 // completeLoopSkeleton will update the condition to use an iteration check, 3224 // if required to decide whether to execute the remainder. 3225 BranchInst *BrInst = 3226 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue()); 3227 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3228 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3229 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3230 3231 // We intentionally don't let SplitBlock to update LoopInfo since 3232 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3233 // LoopVectorBody is explicitly added to the correct place few lines later. 3234 LoopVectorBody = 3235 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3236 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3237 3238 // Update dominator for loop exit. 3239 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3240 3241 // Create and register the new vector loop. 3242 Loop *Lp = LI->AllocateLoop(); 3243 Loop *ParentLoop = OrigLoop->getParentLoop(); 3244 3245 // Insert the new loop into the loop nest and register the new basic blocks 3246 // before calling any utilities such as SCEV that require valid LoopInfo. 3247 if (ParentLoop) { 3248 ParentLoop->addChildLoop(Lp); 3249 } else { 3250 LI->addTopLevelLoop(Lp); 3251 } 3252 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3253 return Lp; 3254 } 3255 3256 void InnerLoopVectorizer::createInductionResumeValues( 3257 Loop *L, Value *VectorTripCount, 3258 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3259 assert(VectorTripCount && L && "Expected valid arguments"); 3260 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3261 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3262 "Inconsistent information about additional bypass."); 3263 // We are going to resume the execution of the scalar loop. 3264 // Go over all of the induction variables that we found and fix the 3265 // PHIs that are left in the scalar version of the loop. 3266 // The starting values of PHI nodes depend on the counter of the last 3267 // iteration in the vectorized loop. 3268 // If we come from a bypass edge then we need to start from the original 3269 // start value. 3270 for (auto &InductionEntry : Legal->getInductionVars()) { 3271 PHINode *OrigPhi = InductionEntry.first; 3272 InductionDescriptor II = InductionEntry.second; 3273 3274 // Create phi nodes to merge from the backedge-taken check block. 3275 PHINode *BCResumeVal = 3276 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3277 LoopScalarPreHeader->getTerminator()); 3278 // Copy original phi DL over to the new one. 3279 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3280 Value *&EndValue = IVEndValues[OrigPhi]; 3281 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3282 if (OrigPhi == OldInduction) { 3283 // We know what the end value is. 3284 EndValue = VectorTripCount; 3285 } else { 3286 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3287 Type *StepType = II.getStep()->getType(); 3288 Instruction::CastOps CastOp = 3289 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3290 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3291 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3292 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3293 EndValue->setName("ind.end"); 3294 3295 // Compute the end value for the additional bypass (if applicable). 3296 if (AdditionalBypass.first) { 3297 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3298 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3299 StepType, true); 3300 CRD = 3301 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3302 EndValueFromAdditionalBypass = 3303 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3304 EndValueFromAdditionalBypass->setName("ind.end"); 3305 } 3306 } 3307 // The new PHI merges the original incoming value, in case of a bypass, 3308 // or the value at the end of the vectorized loop. 3309 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3310 3311 // Fix the scalar body counter (PHI node). 3312 // The old induction's phi node in the scalar body needs the truncated 3313 // value. 3314 for (BasicBlock *BB : LoopBypassBlocks) 3315 BCResumeVal->addIncoming(II.getStartValue(), BB); 3316 3317 if (AdditionalBypass.first) 3318 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3319 EndValueFromAdditionalBypass); 3320 3321 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3322 } 3323 } 3324 3325 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3326 MDNode *OrigLoopID) { 3327 assert(L && "Expected valid loop."); 3328 3329 // The trip counts should be cached by now. 3330 Value *Count = getOrCreateTripCount(L); 3331 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3332 3333 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3334 3335 // Add a check in the middle block to see if we have completed 3336 // all of the iterations in the first vector loop. 3337 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3338 // If tail is to be folded, we know we don't need to run the remainder. 3339 if (!Cost->foldTailByMasking()) { 3340 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3341 Count, VectorTripCount, "cmp.n", 3342 LoopMiddleBlock->getTerminator()); 3343 3344 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3345 // of the corresponding compare because they may have ended up with 3346 // different line numbers and we want to avoid awkward line stepping while 3347 // debugging. Eg. if the compare has got a line number inside the loop. 3348 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3349 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3350 } 3351 3352 // Get ready to start creating new instructions into the vectorized body. 3353 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3354 "Inconsistent vector loop preheader"); 3355 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3356 3357 Optional<MDNode *> VectorizedLoopID = 3358 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3359 LLVMLoopVectorizeFollowupVectorized}); 3360 if (VectorizedLoopID.hasValue()) { 3361 L->setLoopID(VectorizedLoopID.getValue()); 3362 3363 // Do not setAlreadyVectorized if loop attributes have been defined 3364 // explicitly. 3365 return LoopVectorPreHeader; 3366 } 3367 3368 // Keep all loop hints from the original loop on the vector loop (we'll 3369 // replace the vectorizer-specific hints below). 3370 if (MDNode *LID = OrigLoop->getLoopID()) 3371 L->setLoopID(LID); 3372 3373 LoopVectorizeHints Hints(L, true, *ORE); 3374 Hints.setAlreadyVectorized(); 3375 3376 #ifdef EXPENSIVE_CHECKS 3377 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3378 LI->verify(*DT); 3379 #endif 3380 3381 return LoopVectorPreHeader; 3382 } 3383 3384 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3385 /* 3386 In this function we generate a new loop. The new loop will contain 3387 the vectorized instructions while the old loop will continue to run the 3388 scalar remainder. 3389 3390 [ ] <-- loop iteration number check. 3391 / | 3392 / v 3393 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3394 | / | 3395 | / v 3396 || [ ] <-- vector pre header. 3397 |/ | 3398 | v 3399 | [ ] \ 3400 | [ ]_| <-- vector loop. 3401 | | 3402 | v 3403 | -[ ] <--- middle-block. 3404 | / | 3405 | / v 3406 -|- >[ ] <--- new preheader. 3407 | | 3408 | v 3409 | [ ] \ 3410 | [ ]_| <-- old scalar loop to handle remainder. 3411 \ | 3412 \ v 3413 >[ ] <-- exit block. 3414 ... 3415 */ 3416 3417 // Get the metadata of the original loop before it gets modified. 3418 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3419 3420 // Create an empty vector loop, and prepare basic blocks for the runtime 3421 // checks. 3422 Loop *Lp = createVectorLoopSkeleton(""); 3423 3424 // Now, compare the new count to zero. If it is zero skip the vector loop and 3425 // jump to the scalar loop. This check also covers the case where the 3426 // backedge-taken count is uint##_max: adding one to it will overflow leading 3427 // to an incorrect trip count of zero. In this (rare) case we will also jump 3428 // to the scalar loop. 3429 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3430 3431 // Generate the code to check any assumptions that we've made for SCEV 3432 // expressions. 3433 emitSCEVChecks(Lp, LoopScalarPreHeader); 3434 3435 // Generate the code that checks in runtime if arrays overlap. We put the 3436 // checks into a separate block to make the more common case of few elements 3437 // faster. 3438 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3439 3440 // Some loops have a single integer induction variable, while other loops 3441 // don't. One example is c++ iterators that often have multiple pointer 3442 // induction variables. In the code below we also support a case where we 3443 // don't have a single induction variable. 3444 // 3445 // We try to obtain an induction variable from the original loop as hard 3446 // as possible. However if we don't find one that: 3447 // - is an integer 3448 // - counts from zero, stepping by one 3449 // - is the size of the widest induction variable type 3450 // then we create a new one. 3451 OldInduction = Legal->getPrimaryInduction(); 3452 Type *IdxTy = Legal->getWidestInductionType(); 3453 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3454 // The loop step is equal to the vectorization factor (num of SIMD elements) 3455 // times the unroll factor (num of SIMD instructions). 3456 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3457 Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF); 3458 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3459 Induction = 3460 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3461 getDebugLocFromInstOrOperands(OldInduction)); 3462 3463 // Emit phis for the new starting index of the scalar loop. 3464 createInductionResumeValues(Lp, CountRoundDown); 3465 3466 return completeLoopSkeleton(Lp, OrigLoopID); 3467 } 3468 3469 // Fix up external users of the induction variable. At this point, we are 3470 // in LCSSA form, with all external PHIs that use the IV having one input value, 3471 // coming from the remainder loop. We need those PHIs to also have a correct 3472 // value for the IV when arriving directly from the middle block. 3473 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3474 const InductionDescriptor &II, 3475 Value *CountRoundDown, Value *EndValue, 3476 BasicBlock *MiddleBlock) { 3477 // There are two kinds of external IV usages - those that use the value 3478 // computed in the last iteration (the PHI) and those that use the penultimate 3479 // value (the value that feeds into the phi from the loop latch). 3480 // We allow both, but they, obviously, have different values. 3481 3482 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3483 3484 DenseMap<Value *, Value *> MissingVals; 3485 3486 // An external user of the last iteration's value should see the value that 3487 // the remainder loop uses to initialize its own IV. 3488 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3489 for (User *U : PostInc->users()) { 3490 Instruction *UI = cast<Instruction>(U); 3491 if (!OrigLoop->contains(UI)) { 3492 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3493 MissingVals[UI] = EndValue; 3494 } 3495 } 3496 3497 // An external user of the penultimate value need to see EndValue - Step. 3498 // The simplest way to get this is to recompute it from the constituent SCEVs, 3499 // that is Start + (Step * (CRD - 1)). 3500 for (User *U : OrigPhi->users()) { 3501 auto *UI = cast<Instruction>(U); 3502 if (!OrigLoop->contains(UI)) { 3503 const DataLayout &DL = 3504 OrigLoop->getHeader()->getModule()->getDataLayout(); 3505 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3506 3507 IRBuilder<> B(MiddleBlock->getTerminator()); 3508 Value *CountMinusOne = B.CreateSub( 3509 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3510 Value *CMO = 3511 !II.getStep()->getType()->isIntegerTy() 3512 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3513 II.getStep()->getType()) 3514 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3515 CMO->setName("cast.cmo"); 3516 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3517 Escape->setName("ind.escape"); 3518 MissingVals[UI] = Escape; 3519 } 3520 } 3521 3522 for (auto &I : MissingVals) { 3523 PHINode *PHI = cast<PHINode>(I.first); 3524 // One corner case we have to handle is two IVs "chasing" each-other, 3525 // that is %IV2 = phi [...], [ %IV1, %latch ] 3526 // In this case, if IV1 has an external use, we need to avoid adding both 3527 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3528 // don't already have an incoming value for the middle block. 3529 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3530 PHI->addIncoming(I.second, MiddleBlock); 3531 } 3532 } 3533 3534 namespace { 3535 3536 struct CSEDenseMapInfo { 3537 static bool canHandle(const Instruction *I) { 3538 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3539 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3540 } 3541 3542 static inline Instruction *getEmptyKey() { 3543 return DenseMapInfo<Instruction *>::getEmptyKey(); 3544 } 3545 3546 static inline Instruction *getTombstoneKey() { 3547 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3548 } 3549 3550 static unsigned getHashValue(const Instruction *I) { 3551 assert(canHandle(I) && "Unknown instruction!"); 3552 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3553 I->value_op_end())); 3554 } 3555 3556 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3557 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3558 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3559 return LHS == RHS; 3560 return LHS->isIdenticalTo(RHS); 3561 } 3562 }; 3563 3564 } // end anonymous namespace 3565 3566 ///Perform cse of induction variable instructions. 3567 static void cse(BasicBlock *BB) { 3568 // Perform simple cse. 3569 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3570 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3571 Instruction *In = &*I++; 3572 3573 if (!CSEDenseMapInfo::canHandle(In)) 3574 continue; 3575 3576 // Check if we can replace this instruction with any of the 3577 // visited instructions. 3578 if (Instruction *V = CSEMap.lookup(In)) { 3579 In->replaceAllUsesWith(V); 3580 In->eraseFromParent(); 3581 continue; 3582 } 3583 3584 CSEMap[In] = In; 3585 } 3586 } 3587 3588 InstructionCost 3589 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3590 bool &NeedToScalarize) { 3591 Function *F = CI->getCalledFunction(); 3592 Type *ScalarRetTy = CI->getType(); 3593 SmallVector<Type *, 4> Tys, ScalarTys; 3594 for (auto &ArgOp : CI->arg_operands()) 3595 ScalarTys.push_back(ArgOp->getType()); 3596 3597 // Estimate cost of scalarized vector call. The source operands are assumed 3598 // to be vectors, so we need to extract individual elements from there, 3599 // execute VF scalar calls, and then gather the result into the vector return 3600 // value. 3601 InstructionCost ScalarCallCost = 3602 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3603 if (VF.isScalar()) 3604 return ScalarCallCost; 3605 3606 // Compute corresponding vector type for return value and arguments. 3607 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3608 for (Type *ScalarTy : ScalarTys) 3609 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3610 3611 // Compute costs of unpacking argument values for the scalar calls and 3612 // packing the return values to a vector. 3613 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3614 3615 InstructionCost Cost = 3616 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3617 3618 // If we can't emit a vector call for this function, then the currently found 3619 // cost is the cost we need to return. 3620 NeedToScalarize = true; 3621 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3622 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3623 3624 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3625 return Cost; 3626 3627 // If the corresponding vector cost is cheaper, return its cost. 3628 InstructionCost VectorCallCost = 3629 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3630 if (VectorCallCost < Cost) { 3631 NeedToScalarize = false; 3632 Cost = VectorCallCost; 3633 } 3634 return Cost; 3635 } 3636 3637 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3638 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3639 return Elt; 3640 return VectorType::get(Elt, VF); 3641 } 3642 3643 InstructionCost 3644 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3645 ElementCount VF) { 3646 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3647 assert(ID && "Expected intrinsic call!"); 3648 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3649 FastMathFlags FMF; 3650 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3651 FMF = FPMO->getFastMathFlags(); 3652 3653 SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end()); 3654 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3655 SmallVector<Type *> ParamTys; 3656 std::transform(FTy->param_begin(), FTy->param_end(), 3657 std::back_inserter(ParamTys), 3658 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3659 3660 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3661 dyn_cast<IntrinsicInst>(CI)); 3662 return TTI.getIntrinsicInstrCost(CostAttrs, 3663 TargetTransformInfo::TCK_RecipThroughput); 3664 } 3665 3666 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3667 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3668 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3669 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3670 } 3671 3672 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3673 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3674 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3675 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3676 } 3677 3678 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3679 // For every instruction `I` in MinBWs, truncate the operands, create a 3680 // truncated version of `I` and reextend its result. InstCombine runs 3681 // later and will remove any ext/trunc pairs. 3682 SmallPtrSet<Value *, 4> Erased; 3683 for (const auto &KV : Cost->getMinimalBitwidths()) { 3684 // If the value wasn't vectorized, we must maintain the original scalar 3685 // type. The absence of the value from State indicates that it 3686 // wasn't vectorized. 3687 VPValue *Def = State.Plan->getVPValue(KV.first); 3688 if (!State.hasAnyVectorValue(Def)) 3689 continue; 3690 for (unsigned Part = 0; Part < UF; ++Part) { 3691 Value *I = State.get(Def, Part); 3692 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3693 continue; 3694 Type *OriginalTy = I->getType(); 3695 Type *ScalarTruncatedTy = 3696 IntegerType::get(OriginalTy->getContext(), KV.second); 3697 auto *TruncatedTy = FixedVectorType::get( 3698 ScalarTruncatedTy, 3699 cast<FixedVectorType>(OriginalTy)->getNumElements()); 3700 if (TruncatedTy == OriginalTy) 3701 continue; 3702 3703 IRBuilder<> B(cast<Instruction>(I)); 3704 auto ShrinkOperand = [&](Value *V) -> Value * { 3705 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3706 if (ZI->getSrcTy() == TruncatedTy) 3707 return ZI->getOperand(0); 3708 return B.CreateZExtOrTrunc(V, TruncatedTy); 3709 }; 3710 3711 // The actual instruction modification depends on the instruction type, 3712 // unfortunately. 3713 Value *NewI = nullptr; 3714 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3715 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3716 ShrinkOperand(BO->getOperand(1))); 3717 3718 // Any wrapping introduced by shrinking this operation shouldn't be 3719 // considered undefined behavior. So, we can't unconditionally copy 3720 // arithmetic wrapping flags to NewI. 3721 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3722 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3723 NewI = 3724 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3725 ShrinkOperand(CI->getOperand(1))); 3726 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3727 NewI = B.CreateSelect(SI->getCondition(), 3728 ShrinkOperand(SI->getTrueValue()), 3729 ShrinkOperand(SI->getFalseValue())); 3730 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3731 switch (CI->getOpcode()) { 3732 default: 3733 llvm_unreachable("Unhandled cast!"); 3734 case Instruction::Trunc: 3735 NewI = ShrinkOperand(CI->getOperand(0)); 3736 break; 3737 case Instruction::SExt: 3738 NewI = B.CreateSExtOrTrunc( 3739 CI->getOperand(0), 3740 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3741 break; 3742 case Instruction::ZExt: 3743 NewI = B.CreateZExtOrTrunc( 3744 CI->getOperand(0), 3745 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3746 break; 3747 } 3748 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3749 auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType()) 3750 ->getNumElements(); 3751 auto *O0 = B.CreateZExtOrTrunc( 3752 SI->getOperand(0), 3753 FixedVectorType::get(ScalarTruncatedTy, Elements0)); 3754 auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType()) 3755 ->getNumElements(); 3756 auto *O1 = B.CreateZExtOrTrunc( 3757 SI->getOperand(1), 3758 FixedVectorType::get(ScalarTruncatedTy, Elements1)); 3759 3760 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3761 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3762 // Don't do anything with the operands, just extend the result. 3763 continue; 3764 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3765 auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType()) 3766 ->getNumElements(); 3767 auto *O0 = B.CreateZExtOrTrunc( 3768 IE->getOperand(0), 3769 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3770 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3771 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3772 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3773 auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType()) 3774 ->getNumElements(); 3775 auto *O0 = B.CreateZExtOrTrunc( 3776 EE->getOperand(0), 3777 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3778 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3779 } else { 3780 // If we don't know what to do, be conservative and don't do anything. 3781 continue; 3782 } 3783 3784 // Lastly, extend the result. 3785 NewI->takeName(cast<Instruction>(I)); 3786 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3787 I->replaceAllUsesWith(Res); 3788 cast<Instruction>(I)->eraseFromParent(); 3789 Erased.insert(I); 3790 State.reset(Def, Res, Part); 3791 } 3792 } 3793 3794 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3795 for (const auto &KV : Cost->getMinimalBitwidths()) { 3796 // If the value wasn't vectorized, we must maintain the original scalar 3797 // type. The absence of the value from State indicates that it 3798 // wasn't vectorized. 3799 VPValue *Def = State.Plan->getVPValue(KV.first); 3800 if (!State.hasAnyVectorValue(Def)) 3801 continue; 3802 for (unsigned Part = 0; Part < UF; ++Part) { 3803 Value *I = State.get(Def, Part); 3804 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3805 if (Inst && Inst->use_empty()) { 3806 Value *NewI = Inst->getOperand(0); 3807 Inst->eraseFromParent(); 3808 State.reset(Def, NewI, Part); 3809 } 3810 } 3811 } 3812 } 3813 3814 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 3815 // Insert truncates and extends for any truncated instructions as hints to 3816 // InstCombine. 3817 if (VF.isVector()) 3818 truncateToMinimalBitwidths(State); 3819 3820 // Fix widened non-induction PHIs by setting up the PHI operands. 3821 if (OrigPHIsToFix.size()) { 3822 assert(EnableVPlanNativePath && 3823 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3824 fixNonInductionPHIs(State); 3825 } 3826 3827 // At this point every instruction in the original loop is widened to a 3828 // vector form. Now we need to fix the recurrences in the loop. These PHI 3829 // nodes are currently empty because we did not want to introduce cycles. 3830 // This is the second stage of vectorizing recurrences. 3831 fixCrossIterationPHIs(State); 3832 3833 // Forget the original basic block. 3834 PSE.getSE()->forgetLoop(OrigLoop); 3835 3836 // Fix-up external users of the induction variables. 3837 for (auto &Entry : Legal->getInductionVars()) 3838 fixupIVUsers(Entry.first, Entry.second, 3839 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3840 IVEndValues[Entry.first], LoopMiddleBlock); 3841 3842 fixLCSSAPHIs(State); 3843 for (Instruction *PI : PredicatedInstructions) 3844 sinkScalarOperands(&*PI); 3845 3846 // Remove redundant induction instructions. 3847 cse(LoopVectorBody); 3848 3849 // Set/update profile weights for the vector and remainder loops as original 3850 // loop iterations are now distributed among them. Note that original loop 3851 // represented by LoopScalarBody becomes remainder loop after vectorization. 3852 // 3853 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3854 // end up getting slightly roughened result but that should be OK since 3855 // profile is not inherently precise anyway. Note also possible bypass of 3856 // vector code caused by legality checks is ignored, assigning all the weight 3857 // to the vector loop, optimistically. 3858 // 3859 // For scalable vectorization we can't know at compile time how many iterations 3860 // of the loop are handled in one vector iteration, so instead assume a pessimistic 3861 // vscale of '1'. 3862 setProfileInfoAfterUnrolling( 3863 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 3864 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 3865 } 3866 3867 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 3868 // In order to support recurrences we need to be able to vectorize Phi nodes. 3869 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3870 // stage #2: We now need to fix the recurrences by adding incoming edges to 3871 // the currently empty PHI nodes. At this point every instruction in the 3872 // original loop is widened to a vector form so we can use them to construct 3873 // the incoming edges. 3874 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 3875 // Handle first-order recurrences and reductions that need to be fixed. 3876 if (Legal->isFirstOrderRecurrence(&Phi)) 3877 fixFirstOrderRecurrence(&Phi, State); 3878 else if (Legal->isReductionVariable(&Phi)) 3879 fixReduction(&Phi, State); 3880 } 3881 } 3882 3883 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi, 3884 VPTransformState &State) { 3885 // This is the second phase of vectorizing first-order recurrences. An 3886 // overview of the transformation is described below. Suppose we have the 3887 // following loop. 3888 // 3889 // for (int i = 0; i < n; ++i) 3890 // b[i] = a[i] - a[i - 1]; 3891 // 3892 // There is a first-order recurrence on "a". For this loop, the shorthand 3893 // scalar IR looks like: 3894 // 3895 // scalar.ph: 3896 // s_init = a[-1] 3897 // br scalar.body 3898 // 3899 // scalar.body: 3900 // i = phi [0, scalar.ph], [i+1, scalar.body] 3901 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3902 // s2 = a[i] 3903 // b[i] = s2 - s1 3904 // br cond, scalar.body, ... 3905 // 3906 // In this example, s1 is a recurrence because it's value depends on the 3907 // previous iteration. In the first phase of vectorization, we created a 3908 // temporary value for s1. We now complete the vectorization and produce the 3909 // shorthand vector IR shown below (for VF = 4, UF = 1). 3910 // 3911 // vector.ph: 3912 // v_init = vector(..., ..., ..., a[-1]) 3913 // br vector.body 3914 // 3915 // vector.body 3916 // i = phi [0, vector.ph], [i+4, vector.body] 3917 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3918 // v2 = a[i, i+1, i+2, i+3]; 3919 // v3 = vector(v1(3), v2(0, 1, 2)) 3920 // b[i, i+1, i+2, i+3] = v2 - v3 3921 // br cond, vector.body, middle.block 3922 // 3923 // middle.block: 3924 // x = v2(3) 3925 // br scalar.ph 3926 // 3927 // scalar.ph: 3928 // s_init = phi [x, middle.block], [a[-1], otherwise] 3929 // br scalar.body 3930 // 3931 // After execution completes the vector loop, we extract the next value of 3932 // the recurrence (x) to use as the initial value in the scalar loop. 3933 3934 // Get the original loop preheader and single loop latch. 3935 auto *Preheader = OrigLoop->getLoopPreheader(); 3936 auto *Latch = OrigLoop->getLoopLatch(); 3937 3938 // Get the initial and previous values of the scalar recurrence. 3939 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3940 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3941 3942 // Create a vector from the initial value. 3943 auto *VectorInit = ScalarInit; 3944 if (VF.isVector()) { 3945 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3946 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 3947 VectorInit = Builder.CreateInsertElement( 3948 PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3949 Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init"); 3950 } 3951 3952 VPValue *PhiDef = State.Plan->getVPValue(Phi); 3953 VPValue *PreviousDef = State.Plan->getVPValue(Previous); 3954 // We constructed a temporary phi node in the first phase of vectorization. 3955 // This phi node will eventually be deleted. 3956 Builder.SetInsertPoint(cast<Instruction>(State.get(PhiDef, 0))); 3957 3958 // Create a phi node for the new recurrence. The current value will either be 3959 // the initial value inserted into a vector or loop-varying vector value. 3960 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3961 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3962 3963 // Get the vectorized previous value of the last part UF - 1. It appears last 3964 // among all unrolled iterations, due to the order of their construction. 3965 Value *PreviousLastPart = State.get(PreviousDef, UF - 1); 3966 3967 // Find and set the insertion point after the previous value if it is an 3968 // instruction. 3969 BasicBlock::iterator InsertPt; 3970 // Note that the previous value may have been constant-folded so it is not 3971 // guaranteed to be an instruction in the vector loop. 3972 // FIXME: Loop invariant values do not form recurrences. We should deal with 3973 // them earlier. 3974 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart)) 3975 InsertPt = LoopVectorBody->getFirstInsertionPt(); 3976 else { 3977 Instruction *PreviousInst = cast<Instruction>(PreviousLastPart); 3978 if (isa<PHINode>(PreviousLastPart)) 3979 // If the previous value is a phi node, we should insert after all the phi 3980 // nodes in the block containing the PHI to avoid breaking basic block 3981 // verification. Note that the basic block may be different to 3982 // LoopVectorBody, in case we predicate the loop. 3983 InsertPt = PreviousInst->getParent()->getFirstInsertionPt(); 3984 else 3985 InsertPt = ++PreviousInst->getIterator(); 3986 } 3987 Builder.SetInsertPoint(&*InsertPt); 3988 3989 // We will construct a vector for the recurrence by combining the values for 3990 // the current and previous iterations. This is the required shuffle mask. 3991 assert(!VF.isScalable()); 3992 SmallVector<int, 8> ShuffleMask(VF.getKnownMinValue()); 3993 ShuffleMask[0] = VF.getKnownMinValue() - 1; 3994 for (unsigned I = 1; I < VF.getKnownMinValue(); ++I) 3995 ShuffleMask[I] = I + VF.getKnownMinValue() - 1; 3996 3997 // The vector from which to take the initial value for the current iteration 3998 // (actual or unrolled). Initially, this is the vector phi node. 3999 Value *Incoming = VecPhi; 4000 4001 // Shuffle the current and previous vector and update the vector parts. 4002 for (unsigned Part = 0; Part < UF; ++Part) { 4003 Value *PreviousPart = State.get(PreviousDef, Part); 4004 Value *PhiPart = State.get(PhiDef, Part); 4005 auto *Shuffle = 4006 VF.isVector() 4007 ? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask) 4008 : Incoming; 4009 PhiPart->replaceAllUsesWith(Shuffle); 4010 cast<Instruction>(PhiPart)->eraseFromParent(); 4011 State.reset(PhiDef, Shuffle, Part); 4012 Incoming = PreviousPart; 4013 } 4014 4015 // Fix the latch value of the new recurrence in the vector loop. 4016 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4017 4018 // Extract the last vector element in the middle block. This will be the 4019 // initial value for the recurrence when jumping to the scalar loop. 4020 auto *ExtractForScalar = Incoming; 4021 if (VF.isVector()) { 4022 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4023 ExtractForScalar = Builder.CreateExtractElement( 4024 ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1), 4025 "vector.recur.extract"); 4026 } 4027 // Extract the second last element in the middle block if the 4028 // Phi is used outside the loop. We need to extract the phi itself 4029 // and not the last element (the phi update in the current iteration). This 4030 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4031 // when the scalar loop is not run at all. 4032 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4033 if (VF.isVector()) 4034 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4035 Incoming, Builder.getInt32(VF.getKnownMinValue() - 2), 4036 "vector.recur.extract.for.phi"); 4037 // When loop is unrolled without vectorizing, initialize 4038 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 4039 // `Incoming`. This is analogous to the vectorized case above: extracting the 4040 // second last element when VF > 1. 4041 else if (UF > 1) 4042 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4043 4044 // Fix the initial value of the original recurrence in the scalar loop. 4045 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4046 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4047 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4048 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4049 Start->addIncoming(Incoming, BB); 4050 } 4051 4052 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4053 Phi->setName("scalar.recur"); 4054 4055 // Finally, fix users of the recurrence outside the loop. The users will need 4056 // either the last value of the scalar recurrence or the last value of the 4057 // vector recurrence we extracted in the middle block. Since the loop is in 4058 // LCSSA form, we just need to find all the phi nodes for the original scalar 4059 // recurrence in the exit block, and then add an edge for the middle block. 4060 // Note that LCSSA does not imply single entry when the original scalar loop 4061 // had multiple exiting edges (as we always run the last iteration in the 4062 // scalar epilogue); in that case, the exiting path through middle will be 4063 // dynamically dead and the value picked for the phi doesn't matter. 4064 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4065 if (any_of(LCSSAPhi.incoming_values(), 4066 [Phi](Value *V) { return V == Phi; })) 4067 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4068 } 4069 4070 void InnerLoopVectorizer::fixReduction(PHINode *Phi, VPTransformState &State) { 4071 // Get it's reduction variable descriptor. 4072 assert(Legal->isReductionVariable(Phi) && 4073 "Unable to find the reduction variable"); 4074 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 4075 4076 RecurKind RK = RdxDesc.getRecurrenceKind(); 4077 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4078 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4079 setDebugLocFromInst(Builder, ReductionStartValue); 4080 bool IsInLoopReductionPhi = Cost->isInLoopReduction(Phi); 4081 4082 VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst); 4083 // This is the vector-clone of the value that leaves the loop. 4084 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4085 4086 // Wrap flags are in general invalid after vectorization, clear them. 4087 clearReductionWrapFlags(RdxDesc, State); 4088 4089 // Fix the vector-loop phi. 4090 4091 // Reductions do not have to start at zero. They can start with 4092 // any loop invariant values. 4093 BasicBlock *Latch = OrigLoop->getLoopLatch(); 4094 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 4095 4096 for (unsigned Part = 0; Part < UF; ++Part) { 4097 Value *VecRdxPhi = State.get(State.Plan->getVPValue(Phi), Part); 4098 Value *Val = State.get(State.Plan->getVPValue(LoopVal), Part); 4099 cast<PHINode>(VecRdxPhi) 4100 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4101 } 4102 4103 // Before each round, move the insertion point right between 4104 // the PHIs and the values we are going to write. 4105 // This allows us to write both PHINodes and the extractelement 4106 // instructions. 4107 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4108 4109 setDebugLocFromInst(Builder, LoopExitInst); 4110 4111 // If tail is folded by masking, the vector value to leave the loop should be 4112 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4113 // instead of the former. For an inloop reduction the reduction will already 4114 // be predicated, and does not need to be handled here. 4115 if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) { 4116 for (unsigned Part = 0; Part < UF; ++Part) { 4117 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4118 Value *Sel = nullptr; 4119 for (User *U : VecLoopExitInst->users()) { 4120 if (isa<SelectInst>(U)) { 4121 assert(!Sel && "Reduction exit feeding two selects"); 4122 Sel = U; 4123 } else 4124 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4125 } 4126 assert(Sel && "Reduction exit feeds no select"); 4127 State.reset(LoopExitInstDef, Sel, Part); 4128 4129 // If the target can create a predicated operator for the reduction at no 4130 // extra cost in the loop (for example a predicated vadd), it can be 4131 // cheaper for the select to remain in the loop than be sunk out of it, 4132 // and so use the select value for the phi instead of the old 4133 // LoopExitValue. 4134 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 4135 if (PreferPredicatedReductionSelect || 4136 TTI->preferPredicatedReductionSelect( 4137 RdxDesc.getOpcode(), Phi->getType(), 4138 TargetTransformInfo::ReductionFlags())) { 4139 auto *VecRdxPhi = 4140 cast<PHINode>(State.get(State.Plan->getVPValue(Phi), Part)); 4141 VecRdxPhi->setIncomingValueForBlock( 4142 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4143 } 4144 } 4145 } 4146 4147 // If the vector reduction can be performed in a smaller type, we truncate 4148 // then extend the loop exit value to enable InstCombine to evaluate the 4149 // entire expression in the smaller type. 4150 if (VF.isVector() && Phi->getType() != RdxDesc.getRecurrenceType()) { 4151 assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!"); 4152 assert(!VF.isScalable() && "scalable vectors not yet supported."); 4153 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4154 Builder.SetInsertPoint( 4155 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4156 VectorParts RdxParts(UF); 4157 for (unsigned Part = 0; Part < UF; ++Part) { 4158 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4159 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4160 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4161 : Builder.CreateZExt(Trunc, VecTy); 4162 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 4163 UI != RdxParts[Part]->user_end();) 4164 if (*UI != Trunc) { 4165 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 4166 RdxParts[Part] = Extnd; 4167 } else { 4168 ++UI; 4169 } 4170 } 4171 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4172 for (unsigned Part = 0; Part < UF; ++Part) { 4173 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4174 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4175 } 4176 } 4177 4178 // Reduce all of the unrolled parts into a single vector. 4179 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4180 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4181 4182 // The middle block terminator has already been assigned a DebugLoc here (the 4183 // OrigLoop's single latch terminator). We want the whole middle block to 4184 // appear to execute on this line because: (a) it is all compiler generated, 4185 // (b) these instructions are always executed after evaluating the latch 4186 // conditional branch, and (c) other passes may add new predecessors which 4187 // terminate on this line. This is the easiest way to ensure we don't 4188 // accidentally cause an extra step back into the loop while debugging. 4189 setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator()); 4190 { 4191 // Floating-point operations should have some FMF to enable the reduction. 4192 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4193 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4194 for (unsigned Part = 1; Part < UF; ++Part) { 4195 Value *RdxPart = State.get(LoopExitInstDef, Part); 4196 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4197 ReducedPartRdx = Builder.CreateBinOp( 4198 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4199 } else { 4200 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4201 } 4202 } 4203 } 4204 4205 // Create the reduction after the loop. Note that inloop reductions create the 4206 // target reduction in the loop using a Reduction recipe. 4207 if (VF.isVector() && !IsInLoopReductionPhi) { 4208 ReducedPartRdx = 4209 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx); 4210 // If the reduction can be performed in a smaller type, we need to extend 4211 // the reduction to the wider type before we branch to the original loop. 4212 if (Phi->getType() != RdxDesc.getRecurrenceType()) 4213 ReducedPartRdx = 4214 RdxDesc.isSigned() 4215 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 4216 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 4217 } 4218 4219 // Create a phi node that merges control-flow from the backedge-taken check 4220 // block and the middle block. 4221 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4222 LoopScalarPreHeader->getTerminator()); 4223 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4224 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4225 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4226 4227 // Now, we need to fix the users of the reduction variable 4228 // inside and outside of the scalar remainder loop. 4229 4230 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4231 // in the exit blocks. See comment on analogous loop in 4232 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4233 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4234 if (any_of(LCSSAPhi.incoming_values(), 4235 [LoopExitInst](Value *V) { return V == LoopExitInst; })) 4236 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4237 4238 // Fix the scalar loop reduction variable with the incoming reduction sum 4239 // from the vector body and from the backedge value. 4240 int IncomingEdgeBlockIdx = 4241 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4242 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4243 // Pick the other block. 4244 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4245 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4246 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4247 } 4248 4249 void InnerLoopVectorizer::clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc, 4250 VPTransformState &State) { 4251 RecurKind RK = RdxDesc.getRecurrenceKind(); 4252 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4253 return; 4254 4255 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4256 assert(LoopExitInstr && "null loop exit instruction"); 4257 SmallVector<Instruction *, 8> Worklist; 4258 SmallPtrSet<Instruction *, 8> Visited; 4259 Worklist.push_back(LoopExitInstr); 4260 Visited.insert(LoopExitInstr); 4261 4262 while (!Worklist.empty()) { 4263 Instruction *Cur = Worklist.pop_back_val(); 4264 if (isa<OverflowingBinaryOperator>(Cur)) 4265 for (unsigned Part = 0; Part < UF; ++Part) { 4266 Value *V = State.get(State.Plan->getVPValue(Cur), Part); 4267 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4268 } 4269 4270 for (User *U : Cur->users()) { 4271 Instruction *UI = cast<Instruction>(U); 4272 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4273 Visited.insert(UI).second) 4274 Worklist.push_back(UI); 4275 } 4276 } 4277 } 4278 4279 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4280 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4281 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4282 // Some phis were already hand updated by the reduction and recurrence 4283 // code above, leave them alone. 4284 continue; 4285 4286 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4287 // Non-instruction incoming values will have only one value. 4288 unsigned LastLane = 0; 4289 if (isa<Instruction>(IncomingValue)) 4290 LastLane = Cost->isUniformAfterVectorization( 4291 cast<Instruction>(IncomingValue), VF) 4292 ? 0 4293 : VF.getKnownMinValue() - 1; 4294 assert((!VF.isScalable() || LastLane == 0) && 4295 "scalable vectors dont support non-uniform scalars yet"); 4296 // Can be a loop invariant incoming value or the last scalar value to be 4297 // extracted from the vectorized loop. 4298 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4299 Value *lastIncomingValue = 4300 OrigLoop->isLoopInvariant(IncomingValue) 4301 ? IncomingValue 4302 : State.get(State.Plan->getVPValue(IncomingValue), 4303 VPIteration(UF - 1, LastLane)); 4304 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4305 } 4306 } 4307 4308 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4309 // The basic block and loop containing the predicated instruction. 4310 auto *PredBB = PredInst->getParent(); 4311 auto *VectorLoop = LI->getLoopFor(PredBB); 4312 4313 // Initialize a worklist with the operands of the predicated instruction. 4314 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4315 4316 // Holds instructions that we need to analyze again. An instruction may be 4317 // reanalyzed if we don't yet know if we can sink it or not. 4318 SmallVector<Instruction *, 8> InstsToReanalyze; 4319 4320 // Returns true if a given use occurs in the predicated block. Phi nodes use 4321 // their operands in their corresponding predecessor blocks. 4322 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4323 auto *I = cast<Instruction>(U.getUser()); 4324 BasicBlock *BB = I->getParent(); 4325 if (auto *Phi = dyn_cast<PHINode>(I)) 4326 BB = Phi->getIncomingBlock( 4327 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4328 return BB == PredBB; 4329 }; 4330 4331 // Iteratively sink the scalarized operands of the predicated instruction 4332 // into the block we created for it. When an instruction is sunk, it's 4333 // operands are then added to the worklist. The algorithm ends after one pass 4334 // through the worklist doesn't sink a single instruction. 4335 bool Changed; 4336 do { 4337 // Add the instructions that need to be reanalyzed to the worklist, and 4338 // reset the changed indicator. 4339 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4340 InstsToReanalyze.clear(); 4341 Changed = false; 4342 4343 while (!Worklist.empty()) { 4344 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4345 4346 // We can't sink an instruction if it is a phi node, is already in the 4347 // predicated block, is not in the loop, or may have side effects. 4348 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4349 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4350 continue; 4351 4352 // It's legal to sink the instruction if all its uses occur in the 4353 // predicated block. Otherwise, there's nothing to do yet, and we may 4354 // need to reanalyze the instruction. 4355 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4356 InstsToReanalyze.push_back(I); 4357 continue; 4358 } 4359 4360 // Move the instruction to the beginning of the predicated block, and add 4361 // it's operands to the worklist. 4362 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4363 Worklist.insert(I->op_begin(), I->op_end()); 4364 4365 // The sinking may have enabled other instructions to be sunk, so we will 4366 // need to iterate. 4367 Changed = true; 4368 } 4369 } while (Changed); 4370 } 4371 4372 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4373 for (PHINode *OrigPhi : OrigPHIsToFix) { 4374 VPWidenPHIRecipe *VPPhi = 4375 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4376 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4377 // Make sure the builder has a valid insert point. 4378 Builder.SetInsertPoint(NewPhi); 4379 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4380 VPValue *Inc = VPPhi->getIncomingValue(i); 4381 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4382 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4383 } 4384 } 4385 } 4386 4387 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, 4388 VPUser &Operands, unsigned UF, 4389 ElementCount VF, bool IsPtrLoopInvariant, 4390 SmallBitVector &IsIndexLoopInvariant, 4391 VPTransformState &State) { 4392 // Construct a vector GEP by widening the operands of the scalar GEP as 4393 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4394 // results in a vector of pointers when at least one operand of the GEP 4395 // is vector-typed. Thus, to keep the representation compact, we only use 4396 // vector-typed operands for loop-varying values. 4397 4398 if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4399 // If we are vectorizing, but the GEP has only loop-invariant operands, 4400 // the GEP we build (by only using vector-typed operands for 4401 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4402 // produce a vector of pointers, we need to either arbitrarily pick an 4403 // operand to broadcast, or broadcast a clone of the original GEP. 4404 // Here, we broadcast a clone of the original. 4405 // 4406 // TODO: If at some point we decide to scalarize instructions having 4407 // loop-invariant operands, this special case will no longer be 4408 // required. We would add the scalarization decision to 4409 // collectLoopScalars() and teach getVectorValue() to broadcast 4410 // the lane-zero scalar value. 4411 auto *Clone = Builder.Insert(GEP->clone()); 4412 for (unsigned Part = 0; Part < UF; ++Part) { 4413 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4414 State.set(VPDef, EntryPart, Part); 4415 addMetadata(EntryPart, GEP); 4416 } 4417 } else { 4418 // If the GEP has at least one loop-varying operand, we are sure to 4419 // produce a vector of pointers. But if we are only unrolling, we want 4420 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4421 // produce with the code below will be scalar (if VF == 1) or vector 4422 // (otherwise). Note that for the unroll-only case, we still maintain 4423 // values in the vector mapping with initVector, as we do for other 4424 // instructions. 4425 for (unsigned Part = 0; Part < UF; ++Part) { 4426 // The pointer operand of the new GEP. If it's loop-invariant, we 4427 // won't broadcast it. 4428 auto *Ptr = IsPtrLoopInvariant 4429 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 4430 : State.get(Operands.getOperand(0), Part); 4431 4432 // Collect all the indices for the new GEP. If any index is 4433 // loop-invariant, we won't broadcast it. 4434 SmallVector<Value *, 4> Indices; 4435 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { 4436 VPValue *Operand = Operands.getOperand(I); 4437 if (IsIndexLoopInvariant[I - 1]) 4438 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 4439 else 4440 Indices.push_back(State.get(Operand, Part)); 4441 } 4442 4443 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4444 // but it should be a vector, otherwise. 4445 auto *NewGEP = 4446 GEP->isInBounds() 4447 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4448 Indices) 4449 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4450 assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) && 4451 "NewGEP is not a pointer vector"); 4452 State.set(VPDef, NewGEP, Part); 4453 addMetadata(NewGEP, GEP); 4454 } 4455 } 4456 } 4457 4458 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4459 RecurrenceDescriptor *RdxDesc, 4460 VPValue *StartVPV, VPValue *Def, 4461 VPTransformState &State) { 4462 PHINode *P = cast<PHINode>(PN); 4463 if (EnableVPlanNativePath) { 4464 // Currently we enter here in the VPlan-native path for non-induction 4465 // PHIs where all control flow is uniform. We simply widen these PHIs. 4466 // Create a vector phi with no operands - the vector phi operands will be 4467 // set at the end of vector code generation. 4468 Type *VecTy = (State.VF.isScalar()) 4469 ? PN->getType() 4470 : VectorType::get(PN->getType(), State.VF); 4471 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4472 State.set(Def, VecPhi, 0); 4473 OrigPHIsToFix.push_back(P); 4474 4475 return; 4476 } 4477 4478 assert(PN->getParent() == OrigLoop->getHeader() && 4479 "Non-header phis should have been handled elsewhere"); 4480 4481 Value *StartV = StartVPV ? StartVPV->getLiveInIRValue() : nullptr; 4482 // In order to support recurrences we need to be able to vectorize Phi nodes. 4483 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4484 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4485 // this value when we vectorize all of the instructions that use the PHI. 4486 if (RdxDesc || Legal->isFirstOrderRecurrence(P)) { 4487 Value *Iden = nullptr; 4488 bool ScalarPHI = 4489 (State.VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN)); 4490 Type *VecTy = 4491 ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF); 4492 4493 if (RdxDesc) { 4494 assert(Legal->isReductionVariable(P) && StartV && 4495 "RdxDesc should only be set for reduction variables; in that case " 4496 "a StartV is also required"); 4497 RecurKind RK = RdxDesc->getRecurrenceKind(); 4498 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) { 4499 // MinMax reduction have the start value as their identify. 4500 if (ScalarPHI) { 4501 Iden = StartV; 4502 } else { 4503 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4504 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4505 StartV = Iden = 4506 Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident"); 4507 } 4508 } else { 4509 Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity( 4510 RK, VecTy->getScalarType()); 4511 Iden = IdenC; 4512 4513 if (!ScalarPHI) { 4514 Iden = ConstantVector::getSplat(State.VF, IdenC); 4515 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4516 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4517 Constant *Zero = Builder.getInt32(0); 4518 StartV = Builder.CreateInsertElement(Iden, StartV, Zero); 4519 } 4520 } 4521 } 4522 4523 for (unsigned Part = 0; Part < State.UF; ++Part) { 4524 // This is phase one of vectorizing PHIs. 4525 Value *EntryPart = PHINode::Create( 4526 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4527 State.set(Def, EntryPart, Part); 4528 if (StartV) { 4529 // Make sure to add the reduction start value only to the 4530 // first unroll part. 4531 Value *StartVal = (Part == 0) ? StartV : Iden; 4532 cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader); 4533 } 4534 } 4535 return; 4536 } 4537 4538 assert(!Legal->isReductionVariable(P) && 4539 "reductions should be handled above"); 4540 4541 setDebugLocFromInst(Builder, P); 4542 4543 // This PHINode must be an induction variable. 4544 // Make sure that we know about it. 4545 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4546 4547 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4548 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4549 4550 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4551 // which can be found from the original scalar operations. 4552 switch (II.getKind()) { 4553 case InductionDescriptor::IK_NoInduction: 4554 llvm_unreachable("Unknown induction"); 4555 case InductionDescriptor::IK_IntInduction: 4556 case InductionDescriptor::IK_FpInduction: 4557 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4558 case InductionDescriptor::IK_PtrInduction: { 4559 // Handle the pointer induction variable case. 4560 assert(P->getType()->isPointerTy() && "Unexpected type."); 4561 4562 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4563 // This is the normalized GEP that starts counting at zero. 4564 Value *PtrInd = 4565 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4566 // Determine the number of scalars we need to generate for each unroll 4567 // iteration. If the instruction is uniform, we only need to generate the 4568 // first lane. Otherwise, we generate all VF values. 4569 unsigned Lanes = Cost->isUniformAfterVectorization(P, State.VF) 4570 ? 1 4571 : State.VF.getKnownMinValue(); 4572 for (unsigned Part = 0; Part < UF; ++Part) { 4573 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4574 Constant *Idx = ConstantInt::get( 4575 PtrInd->getType(), Lane + Part * State.VF.getKnownMinValue()); 4576 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4577 Value *SclrGep = 4578 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4579 SclrGep->setName("next.gep"); 4580 State.set(Def, SclrGep, VPIteration(Part, Lane)); 4581 } 4582 } 4583 return; 4584 } 4585 assert(isa<SCEVConstant>(II.getStep()) && 4586 "Induction step not a SCEV constant!"); 4587 Type *PhiType = II.getStep()->getType(); 4588 4589 // Build a pointer phi 4590 Value *ScalarStartValue = II.getStartValue(); 4591 Type *ScStValueType = ScalarStartValue->getType(); 4592 PHINode *NewPointerPhi = 4593 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4594 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4595 4596 // A pointer induction, performed by using a gep 4597 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4598 Instruction *InductionLoc = LoopLatch->getTerminator(); 4599 const SCEV *ScalarStep = II.getStep(); 4600 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4601 Value *ScalarStepValue = 4602 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4603 Value *InductionGEP = GetElementPtrInst::Create( 4604 ScStValueType->getPointerElementType(), NewPointerPhi, 4605 Builder.CreateMul( 4606 ScalarStepValue, 4607 ConstantInt::get(PhiType, State.VF.getKnownMinValue() * State.UF)), 4608 "ptr.ind", InductionLoc); 4609 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4610 4611 // Create UF many actual address geps that use the pointer 4612 // phi as base and a vectorized version of the step value 4613 // (<step*0, ..., step*N>) as offset. 4614 for (unsigned Part = 0; Part < State.UF; ++Part) { 4615 SmallVector<Constant *, 8> Indices; 4616 // Create a vector of consecutive numbers from zero to VF. 4617 for (unsigned i = 0; i < State.VF.getKnownMinValue(); ++i) 4618 Indices.push_back( 4619 ConstantInt::get(PhiType, i + Part * State.VF.getKnownMinValue())); 4620 Constant *StartOffset = ConstantVector::get(Indices); 4621 4622 Value *GEP = Builder.CreateGEP( 4623 ScStValueType->getPointerElementType(), NewPointerPhi, 4624 Builder.CreateMul(StartOffset, 4625 Builder.CreateVectorSplat( 4626 State.VF.getKnownMinValue(), ScalarStepValue), 4627 "vector.gep")); 4628 State.set(Def, GEP, Part); 4629 } 4630 } 4631 } 4632 } 4633 4634 /// A helper function for checking whether an integer division-related 4635 /// instruction may divide by zero (in which case it must be predicated if 4636 /// executed conditionally in the scalar code). 4637 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4638 /// Non-zero divisors that are non compile-time constants will not be 4639 /// converted into multiplication, so we will still end up scalarizing 4640 /// the division, but can do so w/o predication. 4641 static bool mayDivideByZero(Instruction &I) { 4642 assert((I.getOpcode() == Instruction::UDiv || 4643 I.getOpcode() == Instruction::SDiv || 4644 I.getOpcode() == Instruction::URem || 4645 I.getOpcode() == Instruction::SRem) && 4646 "Unexpected instruction"); 4647 Value *Divisor = I.getOperand(1); 4648 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4649 return !CInt || CInt->isZero(); 4650 } 4651 4652 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def, 4653 VPUser &User, 4654 VPTransformState &State) { 4655 switch (I.getOpcode()) { 4656 case Instruction::Call: 4657 case Instruction::Br: 4658 case Instruction::PHI: 4659 case Instruction::GetElementPtr: 4660 case Instruction::Select: 4661 llvm_unreachable("This instruction is handled by a different recipe."); 4662 case Instruction::UDiv: 4663 case Instruction::SDiv: 4664 case Instruction::SRem: 4665 case Instruction::URem: 4666 case Instruction::Add: 4667 case Instruction::FAdd: 4668 case Instruction::Sub: 4669 case Instruction::FSub: 4670 case Instruction::FNeg: 4671 case Instruction::Mul: 4672 case Instruction::FMul: 4673 case Instruction::FDiv: 4674 case Instruction::FRem: 4675 case Instruction::Shl: 4676 case Instruction::LShr: 4677 case Instruction::AShr: 4678 case Instruction::And: 4679 case Instruction::Or: 4680 case Instruction::Xor: { 4681 // Just widen unops and binops. 4682 setDebugLocFromInst(Builder, &I); 4683 4684 for (unsigned Part = 0; Part < UF; ++Part) { 4685 SmallVector<Value *, 2> Ops; 4686 for (VPValue *VPOp : User.operands()) 4687 Ops.push_back(State.get(VPOp, Part)); 4688 4689 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4690 4691 if (auto *VecOp = dyn_cast<Instruction>(V)) 4692 VecOp->copyIRFlags(&I); 4693 4694 // Use this vector value for all users of the original instruction. 4695 State.set(Def, V, Part); 4696 addMetadata(V, &I); 4697 } 4698 4699 break; 4700 } 4701 case Instruction::ICmp: 4702 case Instruction::FCmp: { 4703 // Widen compares. Generate vector compares. 4704 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4705 auto *Cmp = cast<CmpInst>(&I); 4706 setDebugLocFromInst(Builder, Cmp); 4707 for (unsigned Part = 0; Part < UF; ++Part) { 4708 Value *A = State.get(User.getOperand(0), Part); 4709 Value *B = State.get(User.getOperand(1), Part); 4710 Value *C = nullptr; 4711 if (FCmp) { 4712 // Propagate fast math flags. 4713 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4714 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4715 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4716 } else { 4717 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4718 } 4719 State.set(Def, C, Part); 4720 addMetadata(C, &I); 4721 } 4722 4723 break; 4724 } 4725 4726 case Instruction::ZExt: 4727 case Instruction::SExt: 4728 case Instruction::FPToUI: 4729 case Instruction::FPToSI: 4730 case Instruction::FPExt: 4731 case Instruction::PtrToInt: 4732 case Instruction::IntToPtr: 4733 case Instruction::SIToFP: 4734 case Instruction::UIToFP: 4735 case Instruction::Trunc: 4736 case Instruction::FPTrunc: 4737 case Instruction::BitCast: { 4738 auto *CI = cast<CastInst>(&I); 4739 setDebugLocFromInst(Builder, CI); 4740 4741 /// Vectorize casts. 4742 Type *DestTy = 4743 (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); 4744 4745 for (unsigned Part = 0; Part < UF; ++Part) { 4746 Value *A = State.get(User.getOperand(0), Part); 4747 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4748 State.set(Def, Cast, Part); 4749 addMetadata(Cast, &I); 4750 } 4751 break; 4752 } 4753 default: 4754 // This instruction is not vectorized by simple widening. 4755 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4756 llvm_unreachable("Unhandled instruction!"); 4757 } // end of switch. 4758 } 4759 4760 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4761 VPUser &ArgOperands, 4762 VPTransformState &State) { 4763 assert(!isa<DbgInfoIntrinsic>(I) && 4764 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4765 setDebugLocFromInst(Builder, &I); 4766 4767 Module *M = I.getParent()->getParent()->getParent(); 4768 auto *CI = cast<CallInst>(&I); 4769 4770 SmallVector<Type *, 4> Tys; 4771 for (Value *ArgOperand : CI->arg_operands()) 4772 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4773 4774 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4775 4776 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4777 // version of the instruction. 4778 // Is it beneficial to perform intrinsic call compared to lib call? 4779 bool NeedToScalarize = false; 4780 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4781 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4782 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4783 assert((UseVectorIntrinsic || !NeedToScalarize) && 4784 "Instruction should be scalarized elsewhere."); 4785 assert(IntrinsicCost.isValid() && CallCost.isValid() && 4786 "Cannot have invalid costs while widening"); 4787 4788 for (unsigned Part = 0; Part < UF; ++Part) { 4789 SmallVector<Value *, 4> Args; 4790 for (auto &I : enumerate(ArgOperands.operands())) { 4791 // Some intrinsics have a scalar argument - don't replace it with a 4792 // vector. 4793 Value *Arg; 4794 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4795 Arg = State.get(I.value(), Part); 4796 else 4797 Arg = State.get(I.value(), VPIteration(0, 0)); 4798 Args.push_back(Arg); 4799 } 4800 4801 Function *VectorF; 4802 if (UseVectorIntrinsic) { 4803 // Use vector version of the intrinsic. 4804 Type *TysForDecl[] = {CI->getType()}; 4805 if (VF.isVector()) 4806 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4807 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4808 assert(VectorF && "Can't retrieve vector intrinsic."); 4809 } else { 4810 // Use vector version of the function call. 4811 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4812 #ifndef NDEBUG 4813 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4814 "Can't create vector function."); 4815 #endif 4816 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4817 } 4818 SmallVector<OperandBundleDef, 1> OpBundles; 4819 CI->getOperandBundlesAsDefs(OpBundles); 4820 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4821 4822 if (isa<FPMathOperator>(V)) 4823 V->copyFastMathFlags(CI); 4824 4825 State.set(Def, V, Part); 4826 addMetadata(V, &I); 4827 } 4828 } 4829 4830 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, 4831 VPUser &Operands, 4832 bool InvariantCond, 4833 VPTransformState &State) { 4834 setDebugLocFromInst(Builder, &I); 4835 4836 // The condition can be loop invariant but still defined inside the 4837 // loop. This means that we can't just use the original 'cond' value. 4838 // We have to take the 'vectorized' value and pick the first lane. 4839 // Instcombine will make this a no-op. 4840 auto *InvarCond = InvariantCond 4841 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 4842 : nullptr; 4843 4844 for (unsigned Part = 0; Part < UF; ++Part) { 4845 Value *Cond = 4846 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); 4847 Value *Op0 = State.get(Operands.getOperand(1), Part); 4848 Value *Op1 = State.get(Operands.getOperand(2), Part); 4849 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); 4850 State.set(VPDef, Sel, Part); 4851 addMetadata(Sel, &I); 4852 } 4853 } 4854 4855 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4856 // We should not collect Scalars more than once per VF. Right now, this 4857 // function is called from collectUniformsAndScalars(), which already does 4858 // this check. Collecting Scalars for VF=1 does not make any sense. 4859 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4860 "This function should not be visited twice for the same VF"); 4861 4862 SmallSetVector<Instruction *, 8> Worklist; 4863 4864 // These sets are used to seed the analysis with pointers used by memory 4865 // accesses that will remain scalar. 4866 SmallSetVector<Instruction *, 8> ScalarPtrs; 4867 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4868 auto *Latch = TheLoop->getLoopLatch(); 4869 4870 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4871 // The pointer operands of loads and stores will be scalar as long as the 4872 // memory access is not a gather or scatter operation. The value operand of a 4873 // store will remain scalar if the store is scalarized. 4874 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4875 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4876 assert(WideningDecision != CM_Unknown && 4877 "Widening decision should be ready at this moment"); 4878 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4879 if (Ptr == Store->getValueOperand()) 4880 return WideningDecision == CM_Scalarize; 4881 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4882 "Ptr is neither a value or pointer operand"); 4883 return WideningDecision != CM_GatherScatter; 4884 }; 4885 4886 // A helper that returns true if the given value is a bitcast or 4887 // getelementptr instruction contained in the loop. 4888 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4889 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4890 isa<GetElementPtrInst>(V)) && 4891 !TheLoop->isLoopInvariant(V); 4892 }; 4893 4894 auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { 4895 if (!isa<PHINode>(Ptr) || 4896 !Legal->getInductionVars().count(cast<PHINode>(Ptr))) 4897 return false; 4898 auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; 4899 if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) 4900 return false; 4901 return isScalarUse(MemAccess, Ptr); 4902 }; 4903 4904 // A helper that evaluates a memory access's use of a pointer. If the 4905 // pointer is actually the pointer induction of a loop, it is being 4906 // inserted into Worklist. If the use will be a scalar use, and the 4907 // pointer is only used by memory accesses, we place the pointer in 4908 // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. 4909 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4910 if (isScalarPtrInduction(MemAccess, Ptr)) { 4911 Worklist.insert(cast<Instruction>(Ptr)); 4912 Instruction *Update = cast<Instruction>( 4913 cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); 4914 Worklist.insert(Update); 4915 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr 4916 << "\n"); 4917 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update 4918 << "\n"); 4919 return; 4920 } 4921 // We only care about bitcast and getelementptr instructions contained in 4922 // the loop. 4923 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4924 return; 4925 4926 // If the pointer has already been identified as scalar (e.g., if it was 4927 // also identified as uniform), there's nothing to do. 4928 auto *I = cast<Instruction>(Ptr); 4929 if (Worklist.count(I)) 4930 return; 4931 4932 // If the use of the pointer will be a scalar use, and all users of the 4933 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4934 // place the pointer in PossibleNonScalarPtrs. 4935 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4936 return isa<LoadInst>(U) || isa<StoreInst>(U); 4937 })) 4938 ScalarPtrs.insert(I); 4939 else 4940 PossibleNonScalarPtrs.insert(I); 4941 }; 4942 4943 // We seed the scalars analysis with three classes of instructions: (1) 4944 // instructions marked uniform-after-vectorization and (2) bitcast, 4945 // getelementptr and (pointer) phi instructions used by memory accesses 4946 // requiring a scalar use. 4947 // 4948 // (1) Add to the worklist all instructions that have been identified as 4949 // uniform-after-vectorization. 4950 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4951 4952 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4953 // memory accesses requiring a scalar use. The pointer operands of loads and 4954 // stores will be scalar as long as the memory accesses is not a gather or 4955 // scatter operation. The value operand of a store will remain scalar if the 4956 // store is scalarized. 4957 for (auto *BB : TheLoop->blocks()) 4958 for (auto &I : *BB) { 4959 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4960 evaluatePtrUse(Load, Load->getPointerOperand()); 4961 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4962 evaluatePtrUse(Store, Store->getPointerOperand()); 4963 evaluatePtrUse(Store, Store->getValueOperand()); 4964 } 4965 } 4966 for (auto *I : ScalarPtrs) 4967 if (!PossibleNonScalarPtrs.count(I)) { 4968 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4969 Worklist.insert(I); 4970 } 4971 4972 // Insert the forced scalars. 4973 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4974 // induction variable when the PHI user is scalarized. 4975 auto ForcedScalar = ForcedScalars.find(VF); 4976 if (ForcedScalar != ForcedScalars.end()) 4977 for (auto *I : ForcedScalar->second) 4978 Worklist.insert(I); 4979 4980 // Expand the worklist by looking through any bitcasts and getelementptr 4981 // instructions we've already identified as scalar. This is similar to the 4982 // expansion step in collectLoopUniforms(); however, here we're only 4983 // expanding to include additional bitcasts and getelementptr instructions. 4984 unsigned Idx = 0; 4985 while (Idx != Worklist.size()) { 4986 Instruction *Dst = Worklist[Idx++]; 4987 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4988 continue; 4989 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4990 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4991 auto *J = cast<Instruction>(U); 4992 return !TheLoop->contains(J) || Worklist.count(J) || 4993 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4994 isScalarUse(J, Src)); 4995 })) { 4996 Worklist.insert(Src); 4997 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4998 } 4999 } 5000 5001 // An induction variable will remain scalar if all users of the induction 5002 // variable and induction variable update remain scalar. 5003 for (auto &Induction : Legal->getInductionVars()) { 5004 auto *Ind = Induction.first; 5005 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5006 5007 // If tail-folding is applied, the primary induction variable will be used 5008 // to feed a vector compare. 5009 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 5010 continue; 5011 5012 // Determine if all users of the induction variable are scalar after 5013 // vectorization. 5014 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5015 auto *I = cast<Instruction>(U); 5016 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5017 }); 5018 if (!ScalarInd) 5019 continue; 5020 5021 // Determine if all users of the induction variable update instruction are 5022 // scalar after vectorization. 5023 auto ScalarIndUpdate = 5024 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5025 auto *I = cast<Instruction>(U); 5026 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5027 }); 5028 if (!ScalarIndUpdate) 5029 continue; 5030 5031 // The induction variable and its update instruction will remain scalar. 5032 Worklist.insert(Ind); 5033 Worklist.insert(IndUpdate); 5034 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5035 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 5036 << "\n"); 5037 } 5038 5039 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5040 } 5041 5042 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, 5043 ElementCount VF) { 5044 if (!blockNeedsPredication(I->getParent())) 5045 return false; 5046 switch(I->getOpcode()) { 5047 default: 5048 break; 5049 case Instruction::Load: 5050 case Instruction::Store: { 5051 if (!Legal->isMaskRequired(I)) 5052 return false; 5053 auto *Ptr = getLoadStorePointerOperand(I); 5054 auto *Ty = getMemInstValueType(I); 5055 // We have already decided how to vectorize this instruction, get that 5056 // result. 5057 if (VF.isVector()) { 5058 InstWidening WideningDecision = getWideningDecision(I, VF); 5059 assert(WideningDecision != CM_Unknown && 5060 "Widening decision should be ready at this moment"); 5061 return WideningDecision == CM_Scalarize; 5062 } 5063 const Align Alignment = getLoadStoreAlignment(I); 5064 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 5065 isLegalMaskedGather(Ty, Alignment)) 5066 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 5067 isLegalMaskedScatter(Ty, Alignment)); 5068 } 5069 case Instruction::UDiv: 5070 case Instruction::SDiv: 5071 case Instruction::SRem: 5072 case Instruction::URem: 5073 return mayDivideByZero(*I); 5074 } 5075 return false; 5076 } 5077 5078 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5079 Instruction *I, ElementCount VF) { 5080 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5081 assert(getWideningDecision(I, VF) == CM_Unknown && 5082 "Decision should not be set yet."); 5083 auto *Group = getInterleavedAccessGroup(I); 5084 assert(Group && "Must have a group."); 5085 5086 // If the instruction's allocated size doesn't equal it's type size, it 5087 // requires padding and will be scalarized. 5088 auto &DL = I->getModule()->getDataLayout(); 5089 auto *ScalarTy = getMemInstValueType(I); 5090 if (hasIrregularType(ScalarTy, DL, VF)) 5091 return false; 5092 5093 // Check if masking is required. 5094 // A Group may need masking for one of two reasons: it resides in a block that 5095 // needs predication, or it was decided to use masking to deal with gaps. 5096 bool PredicatedAccessRequiresMasking = 5097 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 5098 bool AccessWithGapsRequiresMasking = 5099 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 5100 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 5101 return true; 5102 5103 // If masked interleaving is required, we expect that the user/target had 5104 // enabled it, because otherwise it either wouldn't have been created or 5105 // it should have been invalidated by the CostModel. 5106 assert(useMaskedInterleavedAccesses(TTI) && 5107 "Masked interleave-groups for predicated accesses are not enabled."); 5108 5109 auto *Ty = getMemInstValueType(I); 5110 const Align Alignment = getLoadStoreAlignment(I); 5111 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5112 : TTI.isLegalMaskedStore(Ty, Alignment); 5113 } 5114 5115 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5116 Instruction *I, ElementCount VF) { 5117 // Get and ensure we have a valid memory instruction. 5118 LoadInst *LI = dyn_cast<LoadInst>(I); 5119 StoreInst *SI = dyn_cast<StoreInst>(I); 5120 assert((LI || SI) && "Invalid memory instruction"); 5121 5122 auto *Ptr = getLoadStorePointerOperand(I); 5123 5124 // In order to be widened, the pointer should be consecutive, first of all. 5125 if (!Legal->isConsecutivePtr(Ptr)) 5126 return false; 5127 5128 // If the instruction is a store located in a predicated block, it will be 5129 // scalarized. 5130 if (isScalarWithPredication(I)) 5131 return false; 5132 5133 // If the instruction's allocated size doesn't equal it's type size, it 5134 // requires padding and will be scalarized. 5135 auto &DL = I->getModule()->getDataLayout(); 5136 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5137 if (hasIrregularType(ScalarTy, DL, VF)) 5138 return false; 5139 5140 return true; 5141 } 5142 5143 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5144 // We should not collect Uniforms more than once per VF. Right now, 5145 // this function is called from collectUniformsAndScalars(), which 5146 // already does this check. Collecting Uniforms for VF=1 does not make any 5147 // sense. 5148 5149 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5150 "This function should not be visited twice for the same VF"); 5151 5152 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5153 // not analyze again. Uniforms.count(VF) will return 1. 5154 Uniforms[VF].clear(); 5155 5156 // We now know that the loop is vectorizable! 5157 // Collect instructions inside the loop that will remain uniform after 5158 // vectorization. 5159 5160 // Global values, params and instructions outside of current loop are out of 5161 // scope. 5162 auto isOutOfScope = [&](Value *V) -> bool { 5163 Instruction *I = dyn_cast<Instruction>(V); 5164 return (!I || !TheLoop->contains(I)); 5165 }; 5166 5167 SetVector<Instruction *> Worklist; 5168 BasicBlock *Latch = TheLoop->getLoopLatch(); 5169 5170 // Instructions that are scalar with predication must not be considered 5171 // uniform after vectorization, because that would create an erroneous 5172 // replicating region where only a single instance out of VF should be formed. 5173 // TODO: optimize such seldom cases if found important, see PR40816. 5174 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5175 if (isOutOfScope(I)) { 5176 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5177 << *I << "\n"); 5178 return; 5179 } 5180 if (isScalarWithPredication(I, VF)) { 5181 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5182 << *I << "\n"); 5183 return; 5184 } 5185 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5186 Worklist.insert(I); 5187 }; 5188 5189 // Start with the conditional branch. If the branch condition is an 5190 // instruction contained in the loop that is only used by the branch, it is 5191 // uniform. 5192 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5193 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5194 addToWorklistIfAllowed(Cmp); 5195 5196 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5197 InstWidening WideningDecision = getWideningDecision(I, VF); 5198 assert(WideningDecision != CM_Unknown && 5199 "Widening decision should be ready at this moment"); 5200 5201 // A uniform memory op is itself uniform. We exclude uniform stores 5202 // here as they demand the last lane, not the first one. 5203 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5204 assert(WideningDecision == CM_Scalarize); 5205 return true; 5206 } 5207 5208 return (WideningDecision == CM_Widen || 5209 WideningDecision == CM_Widen_Reverse || 5210 WideningDecision == CM_Interleave); 5211 }; 5212 5213 5214 // Returns true if Ptr is the pointer operand of a memory access instruction 5215 // I, and I is known to not require scalarization. 5216 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5217 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5218 }; 5219 5220 // Holds a list of values which are known to have at least one uniform use. 5221 // Note that there may be other uses which aren't uniform. A "uniform use" 5222 // here is something which only demands lane 0 of the unrolled iterations; 5223 // it does not imply that all lanes produce the same value (e.g. this is not 5224 // the usual meaning of uniform) 5225 SmallPtrSet<Value *, 8> HasUniformUse; 5226 5227 // Scan the loop for instructions which are either a) known to have only 5228 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5229 for (auto *BB : TheLoop->blocks()) 5230 for (auto &I : *BB) { 5231 // If there's no pointer operand, there's nothing to do. 5232 auto *Ptr = getLoadStorePointerOperand(&I); 5233 if (!Ptr) 5234 continue; 5235 5236 // A uniform memory op is itself uniform. We exclude uniform stores 5237 // here as they demand the last lane, not the first one. 5238 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5239 addToWorklistIfAllowed(&I); 5240 5241 if (isUniformDecision(&I, VF)) { 5242 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5243 HasUniformUse.insert(Ptr); 5244 } 5245 } 5246 5247 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5248 // demanding) users. Since loops are assumed to be in LCSSA form, this 5249 // disallows uses outside the loop as well. 5250 for (auto *V : HasUniformUse) { 5251 if (isOutOfScope(V)) 5252 continue; 5253 auto *I = cast<Instruction>(V); 5254 auto UsersAreMemAccesses = 5255 llvm::all_of(I->users(), [&](User *U) -> bool { 5256 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5257 }); 5258 if (UsersAreMemAccesses) 5259 addToWorklistIfAllowed(I); 5260 } 5261 5262 // Expand Worklist in topological order: whenever a new instruction 5263 // is added , its users should be already inside Worklist. It ensures 5264 // a uniform instruction will only be used by uniform instructions. 5265 unsigned idx = 0; 5266 while (idx != Worklist.size()) { 5267 Instruction *I = Worklist[idx++]; 5268 5269 for (auto OV : I->operand_values()) { 5270 // isOutOfScope operands cannot be uniform instructions. 5271 if (isOutOfScope(OV)) 5272 continue; 5273 // First order recurrence Phi's should typically be considered 5274 // non-uniform. 5275 auto *OP = dyn_cast<PHINode>(OV); 5276 if (OP && Legal->isFirstOrderRecurrence(OP)) 5277 continue; 5278 // If all the users of the operand are uniform, then add the 5279 // operand into the uniform worklist. 5280 auto *OI = cast<Instruction>(OV); 5281 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5282 auto *J = cast<Instruction>(U); 5283 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5284 })) 5285 addToWorklistIfAllowed(OI); 5286 } 5287 } 5288 5289 // For an instruction to be added into Worklist above, all its users inside 5290 // the loop should also be in Worklist. However, this condition cannot be 5291 // true for phi nodes that form a cyclic dependence. We must process phi 5292 // nodes separately. An induction variable will remain uniform if all users 5293 // of the induction variable and induction variable update remain uniform. 5294 // The code below handles both pointer and non-pointer induction variables. 5295 for (auto &Induction : Legal->getInductionVars()) { 5296 auto *Ind = Induction.first; 5297 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5298 5299 // Determine if all users of the induction variable are uniform after 5300 // vectorization. 5301 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5302 auto *I = cast<Instruction>(U); 5303 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5304 isVectorizedMemAccessUse(I, Ind); 5305 }); 5306 if (!UniformInd) 5307 continue; 5308 5309 // Determine if all users of the induction variable update instruction are 5310 // uniform after vectorization. 5311 auto UniformIndUpdate = 5312 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5313 auto *I = cast<Instruction>(U); 5314 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5315 isVectorizedMemAccessUse(I, IndUpdate); 5316 }); 5317 if (!UniformIndUpdate) 5318 continue; 5319 5320 // The induction variable and its update instruction will remain uniform. 5321 addToWorklistIfAllowed(Ind); 5322 addToWorklistIfAllowed(IndUpdate); 5323 } 5324 5325 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5326 } 5327 5328 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5329 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5330 5331 if (Legal->getRuntimePointerChecking()->Need) { 5332 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5333 "runtime pointer checks needed. Enable vectorization of this " 5334 "loop with '#pragma clang loop vectorize(enable)' when " 5335 "compiling with -Os/-Oz", 5336 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5337 return true; 5338 } 5339 5340 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5341 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5342 "runtime SCEV checks needed. Enable vectorization of this " 5343 "loop with '#pragma clang loop vectorize(enable)' when " 5344 "compiling with -Os/-Oz", 5345 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5346 return true; 5347 } 5348 5349 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5350 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5351 reportVectorizationFailure("Runtime stride check for small trip count", 5352 "runtime stride == 1 checks needed. Enable vectorization of " 5353 "this loop without such check by compiling with -Os/-Oz", 5354 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5355 return true; 5356 } 5357 5358 return false; 5359 } 5360 5361 Optional<ElementCount> 5362 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5363 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5364 // TODO: It may by useful to do since it's still likely to be dynamically 5365 // uniform if the target can skip. 5366 reportVectorizationFailure( 5367 "Not inserting runtime ptr check for divergent target", 5368 "runtime pointer checks needed. Not enabled for divergent target", 5369 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5370 return None; 5371 } 5372 5373 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5374 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5375 if (TC == 1) { 5376 reportVectorizationFailure("Single iteration (non) loop", 5377 "loop trip count is one, irrelevant for vectorization", 5378 "SingleIterationLoop", ORE, TheLoop); 5379 return None; 5380 } 5381 5382 switch (ScalarEpilogueStatus) { 5383 case CM_ScalarEpilogueAllowed: 5384 return computeFeasibleMaxVF(TC, UserVF); 5385 case CM_ScalarEpilogueNotAllowedUsePredicate: 5386 LLVM_FALLTHROUGH; 5387 case CM_ScalarEpilogueNotNeededUsePredicate: 5388 LLVM_DEBUG( 5389 dbgs() << "LV: vector predicate hint/switch found.\n" 5390 << "LV: Not allowing scalar epilogue, creating predicated " 5391 << "vector loop.\n"); 5392 break; 5393 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5394 // fallthrough as a special case of OptForSize 5395 case CM_ScalarEpilogueNotAllowedOptSize: 5396 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5397 LLVM_DEBUG( 5398 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5399 else 5400 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5401 << "count.\n"); 5402 5403 // Bail if runtime checks are required, which are not good when optimising 5404 // for size. 5405 if (runtimeChecksRequired()) 5406 return None; 5407 5408 break; 5409 } 5410 5411 // The only loops we can vectorize without a scalar epilogue, are loops with 5412 // a bottom-test and a single exiting block. We'd have to handle the fact 5413 // that not every instruction executes on the last iteration. This will 5414 // require a lane mask which varies through the vector loop body. (TODO) 5415 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5416 // If there was a tail-folding hint/switch, but we can't fold the tail by 5417 // masking, fallback to a vectorization with a scalar epilogue. 5418 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5419 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5420 "scalar epilogue instead.\n"); 5421 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5422 return computeFeasibleMaxVF(TC, UserVF); 5423 } 5424 return None; 5425 } 5426 5427 // Now try the tail folding 5428 5429 // Invalidate interleave groups that require an epilogue if we can't mask 5430 // the interleave-group. 5431 if (!useMaskedInterleavedAccesses(TTI)) { 5432 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5433 "No decisions should have been taken at this point"); 5434 // Note: There is no need to invalidate any cost modeling decisions here, as 5435 // non where taken so far. 5436 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5437 } 5438 5439 ElementCount MaxVF = computeFeasibleMaxVF(TC, UserVF); 5440 assert(!MaxVF.isScalable() && 5441 "Scalable vectors do not yet support tail folding"); 5442 assert((UserVF.isNonZero() || isPowerOf2_32(MaxVF.getFixedValue())) && 5443 "MaxVF must be a power of 2"); 5444 unsigned MaxVFtimesIC = 5445 UserIC ? MaxVF.getFixedValue() * UserIC : MaxVF.getFixedValue(); 5446 // Avoid tail folding if the trip count is known to be a multiple of any VF we 5447 // chose. 5448 ScalarEvolution *SE = PSE.getSE(); 5449 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5450 const SCEV *ExitCount = SE->getAddExpr( 5451 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5452 const SCEV *Rem = SE->getURemExpr( 5453 SE->applyLoopGuards(ExitCount, TheLoop), 5454 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5455 if (Rem->isZero()) { 5456 // Accept MaxVF if we do not have a tail. 5457 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5458 return MaxVF; 5459 } 5460 5461 // If we don't know the precise trip count, or if the trip count that we 5462 // found modulo the vectorization factor is not zero, try to fold the tail 5463 // by masking. 5464 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5465 if (Legal->prepareToFoldTailByMasking()) { 5466 FoldTailByMasking = true; 5467 return MaxVF; 5468 } 5469 5470 // If there was a tail-folding hint/switch, but we can't fold the tail by 5471 // masking, fallback to a vectorization with a scalar epilogue. 5472 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5473 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5474 "scalar epilogue instead.\n"); 5475 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5476 return MaxVF; 5477 } 5478 5479 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5480 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5481 return None; 5482 } 5483 5484 if (TC == 0) { 5485 reportVectorizationFailure( 5486 "Unable to calculate the loop count due to complex control flow", 5487 "unable to calculate the loop count due to complex control flow", 5488 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5489 return None; 5490 } 5491 5492 reportVectorizationFailure( 5493 "Cannot optimize for size and vectorize at the same time.", 5494 "cannot optimize for size and vectorize at the same time. " 5495 "Enable vectorization of this loop with '#pragma clang loop " 5496 "vectorize(enable)' when compiling with -Os/-Oz", 5497 "NoTailLoopWithOptForSize", ORE, TheLoop); 5498 return None; 5499 } 5500 5501 ElementCount 5502 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5503 ElementCount UserVF) { 5504 bool IgnoreScalableUserVF = UserVF.isScalable() && 5505 !TTI.supportsScalableVectors() && 5506 !ForceTargetSupportsScalableVectors; 5507 if (IgnoreScalableUserVF) { 5508 LLVM_DEBUG( 5509 dbgs() << "LV: Ignoring VF=" << UserVF 5510 << " because target does not support scalable vectors.\n"); 5511 ORE->emit([&]() { 5512 return OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreScalableUserVF", 5513 TheLoop->getStartLoc(), 5514 TheLoop->getHeader()) 5515 << "Ignoring VF=" << ore::NV("UserVF", UserVF) 5516 << " because target does not support scalable vectors."; 5517 }); 5518 } 5519 5520 // Beyond this point two scenarios are handled. If UserVF isn't specified 5521 // then a suitable VF is chosen. If UserVF is specified and there are 5522 // dependencies, check if it's legal. However, if a UserVF is specified and 5523 // there are no dependencies, then there's nothing to do. 5524 if (UserVF.isNonZero() && !IgnoreScalableUserVF) { 5525 if (!canVectorizeReductions(UserVF)) { 5526 reportVectorizationFailure( 5527 "LV: Scalable vectorization not supported for the reduction " 5528 "operations found in this loop. Using fixed-width " 5529 "vectorization instead.", 5530 "Scalable vectorization not supported for the reduction operations " 5531 "found in this loop. Using fixed-width vectorization instead.", 5532 "ScalableVFUnfeasible", ORE, TheLoop); 5533 return computeFeasibleMaxVF( 5534 ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue())); 5535 } 5536 5537 if (Legal->isSafeForAnyVectorWidth()) 5538 return UserVF; 5539 } 5540 5541 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5542 unsigned SmallestType, WidestType; 5543 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5544 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5545 5546 // Get the maximum safe dependence distance in bits computed by LAA. 5547 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5548 // the memory accesses that is most restrictive (involved in the smallest 5549 // dependence distance). 5550 unsigned MaxSafeVectorWidthInBits = Legal->getMaxSafeVectorWidthInBits(); 5551 5552 // If the user vectorization factor is legally unsafe, clamp it to a safe 5553 // value. Otherwise, return as is. 5554 if (UserVF.isNonZero() && !IgnoreScalableUserVF) { 5555 unsigned MaxSafeElements = 5556 PowerOf2Floor(MaxSafeVectorWidthInBits / WidestType); 5557 ElementCount MaxSafeVF = ElementCount::getFixed(MaxSafeElements); 5558 5559 if (UserVF.isScalable()) { 5560 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5561 5562 // Scale VF by vscale before checking if it's safe. 5563 MaxSafeVF = ElementCount::getScalable( 5564 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5565 5566 if (MaxSafeVF.isZero()) { 5567 // The dependence distance is too small to use scalable vectors, 5568 // fallback on fixed. 5569 LLVM_DEBUG( 5570 dbgs() 5571 << "LV: Max legal vector width too small, scalable vectorization " 5572 "unfeasible. Using fixed-width vectorization instead.\n"); 5573 ORE->emit([&]() { 5574 return OptimizationRemarkAnalysis(DEBUG_TYPE, "ScalableVFUnfeasible", 5575 TheLoop->getStartLoc(), 5576 TheLoop->getHeader()) 5577 << "Max legal vector width too small, scalable vectorization " 5578 << "unfeasible. Using fixed-width vectorization instead."; 5579 }); 5580 return computeFeasibleMaxVF( 5581 ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue())); 5582 } 5583 } 5584 5585 LLVM_DEBUG(dbgs() << "LV: The max safe VF is: " << MaxSafeVF << ".\n"); 5586 5587 if (ElementCount::isKnownLE(UserVF, MaxSafeVF)) 5588 return UserVF; 5589 5590 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5591 << " is unsafe, clamping to max safe VF=" << MaxSafeVF 5592 << ".\n"); 5593 ORE->emit([&]() { 5594 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5595 TheLoop->getStartLoc(), 5596 TheLoop->getHeader()) 5597 << "User-specified vectorization factor " 5598 << ore::NV("UserVectorizationFactor", UserVF) 5599 << " is unsafe, clamping to maximum safe vectorization factor " 5600 << ore::NV("VectorizationFactor", MaxSafeVF); 5601 }); 5602 return MaxSafeVF; 5603 } 5604 5605 WidestRegister = std::min(WidestRegister, MaxSafeVectorWidthInBits); 5606 5607 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5608 // Note that both WidestRegister and WidestType may not be a powers of 2. 5609 auto MaxVectorSize = 5610 ElementCount::getFixed(PowerOf2Floor(WidestRegister / WidestType)); 5611 5612 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5613 << " / " << WidestType << " bits.\n"); 5614 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5615 << WidestRegister << " bits.\n"); 5616 5617 assert(MaxVectorSize.getFixedValue() <= WidestRegister && 5618 "Did not expect to pack so many elements" 5619 " into one vector!"); 5620 if (MaxVectorSize.getFixedValue() == 0) { 5621 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5622 return ElementCount::getFixed(1); 5623 } else if (ConstTripCount && ConstTripCount < MaxVectorSize.getFixedValue() && 5624 isPowerOf2_32(ConstTripCount)) { 5625 // We need to clamp the VF to be the ConstTripCount. There is no point in 5626 // choosing a higher viable VF as done in the loop below. 5627 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5628 << ConstTripCount << "\n"); 5629 return ElementCount::getFixed(ConstTripCount); 5630 } 5631 5632 ElementCount MaxVF = MaxVectorSize; 5633 if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) || 5634 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5635 // Collect all viable vectorization factors larger than the default MaxVF 5636 // (i.e. MaxVectorSize). 5637 SmallVector<ElementCount, 8> VFs; 5638 auto MaxVectorSizeMaxBW = 5639 ElementCount::getFixed(WidestRegister / SmallestType); 5640 for (ElementCount VS = MaxVectorSize * 2; 5641 ElementCount::isKnownLE(VS, MaxVectorSizeMaxBW); VS *= 2) 5642 VFs.push_back(VS); 5643 5644 // For each VF calculate its register usage. 5645 auto RUs = calculateRegisterUsage(VFs); 5646 5647 // Select the largest VF which doesn't require more registers than existing 5648 // ones. 5649 for (int i = RUs.size() - 1; i >= 0; --i) { 5650 bool Selected = true; 5651 for (auto &pair : RUs[i].MaxLocalUsers) { 5652 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5653 if (pair.second > TargetNumRegisters) 5654 Selected = false; 5655 } 5656 if (Selected) { 5657 MaxVF = VFs[i]; 5658 break; 5659 } 5660 } 5661 if (ElementCount MinVF = 5662 TTI.getMinimumVF(SmallestType, /*IsScalable=*/false)) { 5663 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5664 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5665 << ") with target's minimum: " << MinVF << '\n'); 5666 MaxVF = MinVF; 5667 } 5668 } 5669 } 5670 return MaxVF; 5671 } 5672 5673 VectorizationFactor 5674 LoopVectorizationCostModel::selectVectorizationFactor(ElementCount MaxVF) { 5675 // FIXME: This can be fixed for scalable vectors later, because at this stage 5676 // the LoopVectorizer will only consider vectorizing a loop with scalable 5677 // vectors when the loop has a hint to enable vectorization for a given VF. 5678 assert(!MaxVF.isScalable() && "scalable vectors not yet supported"); 5679 5680 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5681 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5682 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5683 5684 auto Width = ElementCount::getFixed(1); 5685 const float ScalarCost = *ExpectedCost.getValue(); 5686 float Cost = ScalarCost; 5687 5688 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5689 if (ForceVectorization && MaxVF.isVector()) { 5690 // Ignore scalar width, because the user explicitly wants vectorization. 5691 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5692 // evaluation. 5693 Cost = std::numeric_limits<float>::max(); 5694 } 5695 5696 for (auto i = ElementCount::getFixed(2); ElementCount::isKnownLE(i, MaxVF); 5697 i *= 2) { 5698 // Notice that the vector loop needs to be executed less times, so 5699 // we need to divide the cost of the vector loops by the width of 5700 // the vector elements. 5701 VectorizationCostTy C = expectedCost(i); 5702 assert(C.first.isValid() && "Unexpected invalid cost for vector loop"); 5703 float VectorCost = *C.first.getValue() / (float)i.getFixedValue(); 5704 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5705 << " costs: " << (int)VectorCost << ".\n"); 5706 if (!C.second && !ForceVectorization) { 5707 LLVM_DEBUG( 5708 dbgs() << "LV: Not considering vector loop of width " << i 5709 << " because it will not generate any vector instructions.\n"); 5710 continue; 5711 } 5712 5713 // If profitable add it to ProfitableVF list. 5714 if (VectorCost < ScalarCost) { 5715 ProfitableVFs.push_back(VectorizationFactor( 5716 {i, (unsigned)VectorCost})); 5717 } 5718 5719 if (VectorCost < Cost) { 5720 Cost = VectorCost; 5721 Width = i; 5722 } 5723 } 5724 5725 if (!EnableCondStoresVectorization && NumPredStores) { 5726 reportVectorizationFailure("There are conditional stores.", 5727 "store that is conditionally executed prevents vectorization", 5728 "ConditionalStore", ORE, TheLoop); 5729 Width = ElementCount::getFixed(1); 5730 Cost = ScalarCost; 5731 } 5732 5733 LLVM_DEBUG(if (ForceVectorization && !Width.isScalar() && Cost >= ScalarCost) dbgs() 5734 << "LV: Vectorization seems to be not beneficial, " 5735 << "but was forced by a user.\n"); 5736 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5737 VectorizationFactor Factor = {Width, 5738 (unsigned)(Width.getKnownMinValue() * Cost)}; 5739 return Factor; 5740 } 5741 5742 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5743 const Loop &L, ElementCount VF) const { 5744 // Cross iteration phis such as reductions need special handling and are 5745 // currently unsupported. 5746 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 5747 return Legal->isFirstOrderRecurrence(&Phi) || 5748 Legal->isReductionVariable(&Phi); 5749 })) 5750 return false; 5751 5752 // Phis with uses outside of the loop require special handling and are 5753 // currently unsupported. 5754 for (auto &Entry : Legal->getInductionVars()) { 5755 // Look for uses of the value of the induction at the last iteration. 5756 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5757 for (User *U : PostInc->users()) 5758 if (!L.contains(cast<Instruction>(U))) 5759 return false; 5760 // Look for uses of penultimate value of the induction. 5761 for (User *U : Entry.first->users()) 5762 if (!L.contains(cast<Instruction>(U))) 5763 return false; 5764 } 5765 5766 // Induction variables that are widened require special handling that is 5767 // currently not supported. 5768 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5769 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5770 this->isProfitableToScalarize(Entry.first, VF)); 5771 })) 5772 return false; 5773 5774 return true; 5775 } 5776 5777 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5778 const ElementCount VF) const { 5779 // FIXME: We need a much better cost-model to take different parameters such 5780 // as register pressure, code size increase and cost of extra branches into 5781 // account. For now we apply a very crude heuristic and only consider loops 5782 // with vectorization factors larger than a certain value. 5783 // We also consider epilogue vectorization unprofitable for targets that don't 5784 // consider interleaving beneficial (eg. MVE). 5785 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5786 return false; 5787 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 5788 return true; 5789 return false; 5790 } 5791 5792 VectorizationFactor 5793 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5794 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5795 VectorizationFactor Result = VectorizationFactor::Disabled(); 5796 if (!EnableEpilogueVectorization) { 5797 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5798 return Result; 5799 } 5800 5801 if (!isScalarEpilogueAllowed()) { 5802 LLVM_DEBUG( 5803 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5804 "allowed.\n";); 5805 return Result; 5806 } 5807 5808 // FIXME: This can be fixed for scalable vectors later, because at this stage 5809 // the LoopVectorizer will only consider vectorizing a loop with scalable 5810 // vectors when the loop has a hint to enable vectorization for a given VF. 5811 if (MainLoopVF.isScalable()) { 5812 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not " 5813 "yet supported.\n"); 5814 return Result; 5815 } 5816 5817 // Not really a cost consideration, but check for unsupported cases here to 5818 // simplify the logic. 5819 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5820 LLVM_DEBUG( 5821 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5822 "not a supported candidate.\n";); 5823 return Result; 5824 } 5825 5826 if (EpilogueVectorizationForceVF > 1) { 5827 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5828 if (LVP.hasPlanWithVFs( 5829 {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)})) 5830 return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0}; 5831 else { 5832 LLVM_DEBUG( 5833 dbgs() 5834 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5835 return Result; 5836 } 5837 } 5838 5839 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5840 TheLoop->getHeader()->getParent()->hasMinSize()) { 5841 LLVM_DEBUG( 5842 dbgs() 5843 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5844 return Result; 5845 } 5846 5847 if (!isEpilogueVectorizationProfitable(MainLoopVF)) 5848 return Result; 5849 5850 for (auto &NextVF : ProfitableVFs) 5851 if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) && 5852 (Result.Width.getFixedValue() == 1 || NextVF.Cost < Result.Cost) && 5853 LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width})) 5854 Result = NextVF; 5855 5856 if (Result != VectorizationFactor::Disabled()) 5857 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5858 << Result.Width.getFixedValue() << "\n";); 5859 return Result; 5860 } 5861 5862 std::pair<unsigned, unsigned> 5863 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5864 unsigned MinWidth = -1U; 5865 unsigned MaxWidth = 8; 5866 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5867 5868 // For each block. 5869 for (BasicBlock *BB : TheLoop->blocks()) { 5870 // For each instruction in the loop. 5871 for (Instruction &I : BB->instructionsWithoutDebug()) { 5872 Type *T = I.getType(); 5873 5874 // Skip ignored values. 5875 if (ValuesToIgnore.count(&I)) 5876 continue; 5877 5878 // Only examine Loads, Stores and PHINodes. 5879 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5880 continue; 5881 5882 // Examine PHI nodes that are reduction variables. Update the type to 5883 // account for the recurrence type. 5884 if (auto *PN = dyn_cast<PHINode>(&I)) { 5885 if (!Legal->isReductionVariable(PN)) 5886 continue; 5887 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN]; 5888 if (PreferInLoopReductions || 5889 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 5890 RdxDesc.getRecurrenceType(), 5891 TargetTransformInfo::ReductionFlags())) 5892 continue; 5893 T = RdxDesc.getRecurrenceType(); 5894 } 5895 5896 // Examine the stored values. 5897 if (auto *ST = dyn_cast<StoreInst>(&I)) 5898 T = ST->getValueOperand()->getType(); 5899 5900 // Ignore loaded pointer types and stored pointer types that are not 5901 // vectorizable. 5902 // 5903 // FIXME: The check here attempts to predict whether a load or store will 5904 // be vectorized. We only know this for certain after a VF has 5905 // been selected. Here, we assume that if an access can be 5906 // vectorized, it will be. We should also look at extending this 5907 // optimization to non-pointer types. 5908 // 5909 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 5910 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 5911 continue; 5912 5913 MinWidth = std::min(MinWidth, 5914 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5915 MaxWidth = std::max(MaxWidth, 5916 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5917 } 5918 } 5919 5920 return {MinWidth, MaxWidth}; 5921 } 5922 5923 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 5924 unsigned LoopCost) { 5925 // -- The interleave heuristics -- 5926 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5927 // There are many micro-architectural considerations that we can't predict 5928 // at this level. For example, frontend pressure (on decode or fetch) due to 5929 // code size, or the number and capabilities of the execution ports. 5930 // 5931 // We use the following heuristics to select the interleave count: 5932 // 1. If the code has reductions, then we interleave to break the cross 5933 // iteration dependency. 5934 // 2. If the loop is really small, then we interleave to reduce the loop 5935 // overhead. 5936 // 3. We don't interleave if we think that we will spill registers to memory 5937 // due to the increased register pressure. 5938 5939 if (!isScalarEpilogueAllowed()) 5940 return 1; 5941 5942 // We used the distance for the interleave count. 5943 if (Legal->getMaxSafeDepDistBytes() != -1U) 5944 return 1; 5945 5946 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 5947 const bool HasReductions = !Legal->getReductionVars().empty(); 5948 // Do not interleave loops with a relatively small known or estimated trip 5949 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 5950 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 5951 // because with the above conditions interleaving can expose ILP and break 5952 // cross iteration dependences for reductions. 5953 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 5954 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 5955 return 1; 5956 5957 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5958 // We divide by these constants so assume that we have at least one 5959 // instruction that uses at least one register. 5960 for (auto& pair : R.MaxLocalUsers) { 5961 pair.second = std::max(pair.second, 1U); 5962 } 5963 5964 // We calculate the interleave count using the following formula. 5965 // Subtract the number of loop invariants from the number of available 5966 // registers. These registers are used by all of the interleaved instances. 5967 // Next, divide the remaining registers by the number of registers that is 5968 // required by the loop, in order to estimate how many parallel instances 5969 // fit without causing spills. All of this is rounded down if necessary to be 5970 // a power of two. We want power of two interleave count to simplify any 5971 // addressing operations or alignment considerations. 5972 // We also want power of two interleave counts to ensure that the induction 5973 // variable of the vector loop wraps to zero, when tail is folded by masking; 5974 // this currently happens when OptForSize, in which case IC is set to 1 above. 5975 unsigned IC = UINT_MAX; 5976 5977 for (auto& pair : R.MaxLocalUsers) { 5978 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5979 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5980 << " registers of " 5981 << TTI.getRegisterClassName(pair.first) << " register class\n"); 5982 if (VF.isScalar()) { 5983 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5984 TargetNumRegisters = ForceTargetNumScalarRegs; 5985 } else { 5986 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5987 TargetNumRegisters = ForceTargetNumVectorRegs; 5988 } 5989 unsigned MaxLocalUsers = pair.second; 5990 unsigned LoopInvariantRegs = 0; 5991 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 5992 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 5993 5994 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 5995 // Don't count the induction variable as interleaved. 5996 if (EnableIndVarRegisterHeur) { 5997 TmpIC = 5998 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 5999 std::max(1U, (MaxLocalUsers - 1))); 6000 } 6001 6002 IC = std::min(IC, TmpIC); 6003 } 6004 6005 // Clamp the interleave ranges to reasonable counts. 6006 unsigned MaxInterleaveCount = 6007 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6008 6009 // Check if the user has overridden the max. 6010 if (VF.isScalar()) { 6011 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6012 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6013 } else { 6014 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6015 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6016 } 6017 6018 // If trip count is known or estimated compile time constant, limit the 6019 // interleave count to be less than the trip count divided by VF, provided it 6020 // is at least 1. 6021 // 6022 // For scalable vectors we can't know if interleaving is beneficial. It may 6023 // not be beneficial for small loops if none of the lanes in the second vector 6024 // iterations is enabled. However, for larger loops, there is likely to be a 6025 // similar benefit as for fixed-width vectors. For now, we choose to leave 6026 // the InterleaveCount as if vscale is '1', although if some information about 6027 // the vector is known (e.g. min vector size), we can make a better decision. 6028 if (BestKnownTC) { 6029 MaxInterleaveCount = 6030 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6031 // Make sure MaxInterleaveCount is greater than 0. 6032 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6033 } 6034 6035 assert(MaxInterleaveCount > 0 && 6036 "Maximum interleave count must be greater than 0"); 6037 6038 // Clamp the calculated IC to be between the 1 and the max interleave count 6039 // that the target and trip count allows. 6040 if (IC > MaxInterleaveCount) 6041 IC = MaxInterleaveCount; 6042 else 6043 // Make sure IC is greater than 0. 6044 IC = std::max(1u, IC); 6045 6046 assert(IC > 0 && "Interleave count must be greater than 0."); 6047 6048 // If we did not calculate the cost for VF (because the user selected the VF) 6049 // then we calculate the cost of VF here. 6050 if (LoopCost == 0) { 6051 assert(expectedCost(VF).first.isValid() && "Expected a valid cost"); 6052 LoopCost = *expectedCost(VF).first.getValue(); 6053 } 6054 6055 assert(LoopCost && "Non-zero loop cost expected"); 6056 6057 // Interleave if we vectorized this loop and there is a reduction that could 6058 // benefit from interleaving. 6059 if (VF.isVector() && HasReductions) { 6060 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6061 return IC; 6062 } 6063 6064 // Note that if we've already vectorized the loop we will have done the 6065 // runtime check and so interleaving won't require further checks. 6066 bool InterleavingRequiresRuntimePointerCheck = 6067 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6068 6069 // We want to interleave small loops in order to reduce the loop overhead and 6070 // potentially expose ILP opportunities. 6071 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6072 << "LV: IC is " << IC << '\n' 6073 << "LV: VF is " << VF << '\n'); 6074 const bool AggressivelyInterleaveReductions = 6075 TTI.enableAggressiveInterleaving(HasReductions); 6076 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6077 // We assume that the cost overhead is 1 and we use the cost model 6078 // to estimate the cost of the loop and interleave until the cost of the 6079 // loop overhead is about 5% of the cost of the loop. 6080 unsigned SmallIC = 6081 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6082 6083 // Interleave until store/load ports (estimated by max interleave count) are 6084 // saturated. 6085 unsigned NumStores = Legal->getNumStores(); 6086 unsigned NumLoads = Legal->getNumLoads(); 6087 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6088 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6089 6090 // If we have a scalar reduction (vector reductions are already dealt with 6091 // by this point), we can increase the critical path length if the loop 6092 // we're interleaving is inside another loop. Limit, by default to 2, so the 6093 // critical path only gets increased by one reduction operation. 6094 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6095 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6096 SmallIC = std::min(SmallIC, F); 6097 StoresIC = std::min(StoresIC, F); 6098 LoadsIC = std::min(LoadsIC, F); 6099 } 6100 6101 if (EnableLoadStoreRuntimeInterleave && 6102 std::max(StoresIC, LoadsIC) > SmallIC) { 6103 LLVM_DEBUG( 6104 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6105 return std::max(StoresIC, LoadsIC); 6106 } 6107 6108 // If there are scalar reductions and TTI has enabled aggressive 6109 // interleaving for reductions, we will interleave to expose ILP. 6110 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6111 AggressivelyInterleaveReductions) { 6112 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6113 // Interleave no less than SmallIC but not as aggressive as the normal IC 6114 // to satisfy the rare situation when resources are too limited. 6115 return std::max(IC / 2, SmallIC); 6116 } else { 6117 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6118 return SmallIC; 6119 } 6120 } 6121 6122 // Interleave if this is a large loop (small loops are already dealt with by 6123 // this point) that could benefit from interleaving. 6124 if (AggressivelyInterleaveReductions) { 6125 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6126 return IC; 6127 } 6128 6129 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6130 return 1; 6131 } 6132 6133 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6134 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6135 // This function calculates the register usage by measuring the highest number 6136 // of values that are alive at a single location. Obviously, this is a very 6137 // rough estimation. We scan the loop in a topological order in order and 6138 // assign a number to each instruction. We use RPO to ensure that defs are 6139 // met before their users. We assume that each instruction that has in-loop 6140 // users starts an interval. We record every time that an in-loop value is 6141 // used, so we have a list of the first and last occurrences of each 6142 // instruction. Next, we transpose this data structure into a multi map that 6143 // holds the list of intervals that *end* at a specific location. This multi 6144 // map allows us to perform a linear search. We scan the instructions linearly 6145 // and record each time that a new interval starts, by placing it in a set. 6146 // If we find this value in the multi-map then we remove it from the set. 6147 // The max register usage is the maximum size of the set. 6148 // We also search for instructions that are defined outside the loop, but are 6149 // used inside the loop. We need this number separately from the max-interval 6150 // usage number because when we unroll, loop-invariant values do not take 6151 // more register. 6152 LoopBlocksDFS DFS(TheLoop); 6153 DFS.perform(LI); 6154 6155 RegisterUsage RU; 6156 6157 // Each 'key' in the map opens a new interval. The values 6158 // of the map are the index of the 'last seen' usage of the 6159 // instruction that is the key. 6160 using IntervalMap = DenseMap<Instruction *, unsigned>; 6161 6162 // Maps instruction to its index. 6163 SmallVector<Instruction *, 64> IdxToInstr; 6164 // Marks the end of each interval. 6165 IntervalMap EndPoint; 6166 // Saves the list of instruction indices that are used in the loop. 6167 SmallPtrSet<Instruction *, 8> Ends; 6168 // Saves the list of values that are used in the loop but are 6169 // defined outside the loop, such as arguments and constants. 6170 SmallPtrSet<Value *, 8> LoopInvariants; 6171 6172 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6173 for (Instruction &I : BB->instructionsWithoutDebug()) { 6174 IdxToInstr.push_back(&I); 6175 6176 // Save the end location of each USE. 6177 for (Value *U : I.operands()) { 6178 auto *Instr = dyn_cast<Instruction>(U); 6179 6180 // Ignore non-instruction values such as arguments, constants, etc. 6181 if (!Instr) 6182 continue; 6183 6184 // If this instruction is outside the loop then record it and continue. 6185 if (!TheLoop->contains(Instr)) { 6186 LoopInvariants.insert(Instr); 6187 continue; 6188 } 6189 6190 // Overwrite previous end points. 6191 EndPoint[Instr] = IdxToInstr.size(); 6192 Ends.insert(Instr); 6193 } 6194 } 6195 } 6196 6197 // Saves the list of intervals that end with the index in 'key'. 6198 using InstrList = SmallVector<Instruction *, 2>; 6199 DenseMap<unsigned, InstrList> TransposeEnds; 6200 6201 // Transpose the EndPoints to a list of values that end at each index. 6202 for (auto &Interval : EndPoint) 6203 TransposeEnds[Interval.second].push_back(Interval.first); 6204 6205 SmallPtrSet<Instruction *, 8> OpenIntervals; 6206 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6207 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6208 6209 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6210 6211 // A lambda that gets the register usage for the given type and VF. 6212 const auto &TTICapture = TTI; 6213 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) { 6214 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6215 return 0U; 6216 return TTICapture.getRegUsageForType(VectorType::get(Ty, VF)); 6217 }; 6218 6219 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6220 Instruction *I = IdxToInstr[i]; 6221 6222 // Remove all of the instructions that end at this location. 6223 InstrList &List = TransposeEnds[i]; 6224 for (Instruction *ToRemove : List) 6225 OpenIntervals.erase(ToRemove); 6226 6227 // Ignore instructions that are never used within the loop. 6228 if (!Ends.count(I)) 6229 continue; 6230 6231 // Skip ignored values. 6232 if (ValuesToIgnore.count(I)) 6233 continue; 6234 6235 // For each VF find the maximum usage of registers. 6236 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6237 // Count the number of live intervals. 6238 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6239 6240 if (VFs[j].isScalar()) { 6241 for (auto Inst : OpenIntervals) { 6242 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6243 if (RegUsage.find(ClassID) == RegUsage.end()) 6244 RegUsage[ClassID] = 1; 6245 else 6246 RegUsage[ClassID] += 1; 6247 } 6248 } else { 6249 collectUniformsAndScalars(VFs[j]); 6250 for (auto Inst : OpenIntervals) { 6251 // Skip ignored values for VF > 1. 6252 if (VecValuesToIgnore.count(Inst)) 6253 continue; 6254 if (isScalarAfterVectorization(Inst, VFs[j])) { 6255 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6256 if (RegUsage.find(ClassID) == RegUsage.end()) 6257 RegUsage[ClassID] = 1; 6258 else 6259 RegUsage[ClassID] += 1; 6260 } else { 6261 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6262 if (RegUsage.find(ClassID) == RegUsage.end()) 6263 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6264 else 6265 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6266 } 6267 } 6268 } 6269 6270 for (auto& pair : RegUsage) { 6271 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6272 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6273 else 6274 MaxUsages[j][pair.first] = pair.second; 6275 } 6276 } 6277 6278 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6279 << OpenIntervals.size() << '\n'); 6280 6281 // Add the current instruction to the list of open intervals. 6282 OpenIntervals.insert(I); 6283 } 6284 6285 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6286 SmallMapVector<unsigned, unsigned, 4> Invariant; 6287 6288 for (auto Inst : LoopInvariants) { 6289 unsigned Usage = 6290 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6291 unsigned ClassID = 6292 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6293 if (Invariant.find(ClassID) == Invariant.end()) 6294 Invariant[ClassID] = Usage; 6295 else 6296 Invariant[ClassID] += Usage; 6297 } 6298 6299 LLVM_DEBUG({ 6300 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6301 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6302 << " item\n"; 6303 for (const auto &pair : MaxUsages[i]) { 6304 dbgs() << "LV(REG): RegisterClass: " 6305 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6306 << " registers\n"; 6307 } 6308 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6309 << " item\n"; 6310 for (const auto &pair : Invariant) { 6311 dbgs() << "LV(REG): RegisterClass: " 6312 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6313 << " registers\n"; 6314 } 6315 }); 6316 6317 RU.LoopInvariantRegs = Invariant; 6318 RU.MaxLocalUsers = MaxUsages[i]; 6319 RUs[i] = RU; 6320 } 6321 6322 return RUs; 6323 } 6324 6325 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6326 // TODO: Cost model for emulated masked load/store is completely 6327 // broken. This hack guides the cost model to use an artificially 6328 // high enough value to practically disable vectorization with such 6329 // operations, except where previously deployed legality hack allowed 6330 // using very low cost values. This is to avoid regressions coming simply 6331 // from moving "masked load/store" check from legality to cost model. 6332 // Masked Load/Gather emulation was previously never allowed. 6333 // Limited number of Masked Store/Scatter emulation was allowed. 6334 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 6335 return isa<LoadInst>(I) || 6336 (isa<StoreInst>(I) && 6337 NumPredStores > NumberOfStoresToPredicate); 6338 } 6339 6340 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6341 // If we aren't vectorizing the loop, or if we've already collected the 6342 // instructions to scalarize, there's nothing to do. Collection may already 6343 // have occurred if we have a user-selected VF and are now computing the 6344 // expected cost for interleaving. 6345 if (VF.isScalar() || VF.isZero() || 6346 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6347 return; 6348 6349 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6350 // not profitable to scalarize any instructions, the presence of VF in the 6351 // map will indicate that we've analyzed it already. 6352 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6353 6354 // Find all the instructions that are scalar with predication in the loop and 6355 // determine if it would be better to not if-convert the blocks they are in. 6356 // If so, we also record the instructions to scalarize. 6357 for (BasicBlock *BB : TheLoop->blocks()) { 6358 if (!blockNeedsPredication(BB)) 6359 continue; 6360 for (Instruction &I : *BB) 6361 if (isScalarWithPredication(&I)) { 6362 ScalarCostsTy ScalarCosts; 6363 // Do not apply discount logic if hacked cost is needed 6364 // for emulated masked memrefs. 6365 if (!useEmulatedMaskMemRefHack(&I) && 6366 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6367 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6368 // Remember that BB will remain after vectorization. 6369 PredicatedBBsAfterVectorization.insert(BB); 6370 } 6371 } 6372 } 6373 6374 int LoopVectorizationCostModel::computePredInstDiscount( 6375 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6376 assert(!isUniformAfterVectorization(PredInst, VF) && 6377 "Instruction marked uniform-after-vectorization will be predicated"); 6378 6379 // Initialize the discount to zero, meaning that the scalar version and the 6380 // vector version cost the same. 6381 InstructionCost Discount = 0; 6382 6383 // Holds instructions to analyze. The instructions we visit are mapped in 6384 // ScalarCosts. Those instructions are the ones that would be scalarized if 6385 // we find that the scalar version costs less. 6386 SmallVector<Instruction *, 8> Worklist; 6387 6388 // Returns true if the given instruction can be scalarized. 6389 auto canBeScalarized = [&](Instruction *I) -> bool { 6390 // We only attempt to scalarize instructions forming a single-use chain 6391 // from the original predicated block that would otherwise be vectorized. 6392 // Although not strictly necessary, we give up on instructions we know will 6393 // already be scalar to avoid traversing chains that are unlikely to be 6394 // beneficial. 6395 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6396 isScalarAfterVectorization(I, VF)) 6397 return false; 6398 6399 // If the instruction is scalar with predication, it will be analyzed 6400 // separately. We ignore it within the context of PredInst. 6401 if (isScalarWithPredication(I)) 6402 return false; 6403 6404 // If any of the instruction's operands are uniform after vectorization, 6405 // the instruction cannot be scalarized. This prevents, for example, a 6406 // masked load from being scalarized. 6407 // 6408 // We assume we will only emit a value for lane zero of an instruction 6409 // marked uniform after vectorization, rather than VF identical values. 6410 // Thus, if we scalarize an instruction that uses a uniform, we would 6411 // create uses of values corresponding to the lanes we aren't emitting code 6412 // for. This behavior can be changed by allowing getScalarValue to clone 6413 // the lane zero values for uniforms rather than asserting. 6414 for (Use &U : I->operands()) 6415 if (auto *J = dyn_cast<Instruction>(U.get())) 6416 if (isUniformAfterVectorization(J, VF)) 6417 return false; 6418 6419 // Otherwise, we can scalarize the instruction. 6420 return true; 6421 }; 6422 6423 // Compute the expected cost discount from scalarizing the entire expression 6424 // feeding the predicated instruction. We currently only consider expressions 6425 // that are single-use instruction chains. 6426 Worklist.push_back(PredInst); 6427 while (!Worklist.empty()) { 6428 Instruction *I = Worklist.pop_back_val(); 6429 6430 // If we've already analyzed the instruction, there's nothing to do. 6431 if (ScalarCosts.find(I) != ScalarCosts.end()) 6432 continue; 6433 6434 // Compute the cost of the vector instruction. Note that this cost already 6435 // includes the scalarization overhead of the predicated instruction. 6436 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6437 6438 // Compute the cost of the scalarized instruction. This cost is the cost of 6439 // the instruction as if it wasn't if-converted and instead remained in the 6440 // predicated block. We will scale this cost by block probability after 6441 // computing the scalarization overhead. 6442 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6443 InstructionCost ScalarCost = 6444 VF.getKnownMinValue() * 6445 getInstructionCost(I, ElementCount::getFixed(1)).first; 6446 6447 // Compute the scalarization overhead of needed insertelement instructions 6448 // and phi nodes. 6449 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6450 ScalarCost += TTI.getScalarizationOverhead( 6451 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6452 APInt::getAllOnesValue(VF.getKnownMinValue()), true, false); 6453 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6454 ScalarCost += 6455 VF.getKnownMinValue() * 6456 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6457 } 6458 6459 // Compute the scalarization overhead of needed extractelement 6460 // instructions. For each of the instruction's operands, if the operand can 6461 // be scalarized, add it to the worklist; otherwise, account for the 6462 // overhead. 6463 for (Use &U : I->operands()) 6464 if (auto *J = dyn_cast<Instruction>(U.get())) { 6465 assert(VectorType::isValidElementType(J->getType()) && 6466 "Instruction has non-scalar type"); 6467 if (canBeScalarized(J)) 6468 Worklist.push_back(J); 6469 else if (needsExtract(J, VF)) { 6470 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6471 ScalarCost += TTI.getScalarizationOverhead( 6472 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6473 APInt::getAllOnesValue(VF.getKnownMinValue()), false, true); 6474 } 6475 } 6476 6477 // Scale the total scalar cost by block probability. 6478 ScalarCost /= getReciprocalPredBlockProb(); 6479 6480 // Compute the discount. A non-negative discount means the vector version 6481 // of the instruction costs more, and scalarizing would be beneficial. 6482 Discount += VectorCost - ScalarCost; 6483 ScalarCosts[I] = ScalarCost; 6484 } 6485 6486 return *Discount.getValue(); 6487 } 6488 6489 LoopVectorizationCostModel::VectorizationCostTy 6490 LoopVectorizationCostModel::expectedCost(ElementCount VF) { 6491 VectorizationCostTy Cost; 6492 6493 // For each block. 6494 for (BasicBlock *BB : TheLoop->blocks()) { 6495 VectorizationCostTy BlockCost; 6496 6497 // For each instruction in the old loop. 6498 for (Instruction &I : BB->instructionsWithoutDebug()) { 6499 // Skip ignored values. 6500 if (ValuesToIgnore.count(&I) || 6501 (VF.isVector() && VecValuesToIgnore.count(&I))) 6502 continue; 6503 6504 VectorizationCostTy C = getInstructionCost(&I, VF); 6505 6506 // Check if we should override the cost. 6507 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6508 C.first = InstructionCost(ForceTargetInstructionCost); 6509 6510 BlockCost.first += C.first; 6511 BlockCost.second |= C.second; 6512 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6513 << " for VF " << VF << " For instruction: " << I 6514 << '\n'); 6515 } 6516 6517 // If we are vectorizing a predicated block, it will have been 6518 // if-converted. This means that the block's instructions (aside from 6519 // stores and instructions that may divide by zero) will now be 6520 // unconditionally executed. For the scalar case, we may not always execute 6521 // the predicated block, if it is an if-else block. Thus, scale the block's 6522 // cost by the probability of executing it. blockNeedsPredication from 6523 // Legal is used so as to not include all blocks in tail folded loops. 6524 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6525 BlockCost.first /= getReciprocalPredBlockProb(); 6526 6527 Cost.first += BlockCost.first; 6528 Cost.second |= BlockCost.second; 6529 } 6530 6531 return Cost; 6532 } 6533 6534 /// Gets Address Access SCEV after verifying that the access pattern 6535 /// is loop invariant except the induction variable dependence. 6536 /// 6537 /// This SCEV can be sent to the Target in order to estimate the address 6538 /// calculation cost. 6539 static const SCEV *getAddressAccessSCEV( 6540 Value *Ptr, 6541 LoopVectorizationLegality *Legal, 6542 PredicatedScalarEvolution &PSE, 6543 const Loop *TheLoop) { 6544 6545 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6546 if (!Gep) 6547 return nullptr; 6548 6549 // We are looking for a gep with all loop invariant indices except for one 6550 // which should be an induction variable. 6551 auto SE = PSE.getSE(); 6552 unsigned NumOperands = Gep->getNumOperands(); 6553 for (unsigned i = 1; i < NumOperands; ++i) { 6554 Value *Opd = Gep->getOperand(i); 6555 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6556 !Legal->isInductionVariable(Opd)) 6557 return nullptr; 6558 } 6559 6560 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6561 return PSE.getSCEV(Ptr); 6562 } 6563 6564 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6565 return Legal->hasStride(I->getOperand(0)) || 6566 Legal->hasStride(I->getOperand(1)); 6567 } 6568 6569 InstructionCost 6570 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6571 ElementCount VF) { 6572 assert(VF.isVector() && 6573 "Scalarization cost of instruction implies vectorization."); 6574 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6575 Type *ValTy = getMemInstValueType(I); 6576 auto SE = PSE.getSE(); 6577 6578 unsigned AS = getLoadStoreAddressSpace(I); 6579 Value *Ptr = getLoadStorePointerOperand(I); 6580 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6581 6582 // Figure out whether the access is strided and get the stride value 6583 // if it's known in compile time 6584 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6585 6586 // Get the cost of the scalar memory instruction and address computation. 6587 InstructionCost Cost = 6588 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6589 6590 // Don't pass *I here, since it is scalar but will actually be part of a 6591 // vectorized loop where the user of it is a vectorized instruction. 6592 const Align Alignment = getLoadStoreAlignment(I); 6593 Cost += VF.getKnownMinValue() * 6594 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6595 AS, TTI::TCK_RecipThroughput); 6596 6597 // Get the overhead of the extractelement and insertelement instructions 6598 // we might create due to scalarization. 6599 Cost += getScalarizationOverhead(I, VF); 6600 6601 // If we have a predicated store, it may not be executed for each vector 6602 // lane. Scale the cost by the probability of executing the predicated 6603 // block. 6604 if (isPredicatedInst(I)) { 6605 Cost /= getReciprocalPredBlockProb(); 6606 6607 if (useEmulatedMaskMemRefHack(I)) 6608 // Artificially setting to a high enough value to practically disable 6609 // vectorization with such operations. 6610 Cost = 3000000; 6611 } 6612 6613 return Cost; 6614 } 6615 6616 InstructionCost 6617 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6618 ElementCount VF) { 6619 Type *ValTy = getMemInstValueType(I); 6620 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6621 Value *Ptr = getLoadStorePointerOperand(I); 6622 unsigned AS = getLoadStoreAddressSpace(I); 6623 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6624 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6625 6626 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6627 "Stride should be 1 or -1 for consecutive memory access"); 6628 const Align Alignment = getLoadStoreAlignment(I); 6629 InstructionCost Cost = 0; 6630 if (Legal->isMaskRequired(I)) 6631 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6632 CostKind); 6633 else 6634 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6635 CostKind, I); 6636 6637 bool Reverse = ConsecutiveStride < 0; 6638 if (Reverse) 6639 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6640 return Cost; 6641 } 6642 6643 InstructionCost 6644 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6645 ElementCount VF) { 6646 assert(Legal->isUniformMemOp(*I)); 6647 6648 Type *ValTy = getMemInstValueType(I); 6649 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6650 const Align Alignment = getLoadStoreAlignment(I); 6651 unsigned AS = getLoadStoreAddressSpace(I); 6652 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6653 if (isa<LoadInst>(I)) { 6654 return TTI.getAddressComputationCost(ValTy) + 6655 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6656 CostKind) + 6657 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6658 } 6659 StoreInst *SI = cast<StoreInst>(I); 6660 6661 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6662 return TTI.getAddressComputationCost(ValTy) + 6663 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6664 CostKind) + 6665 (isLoopInvariantStoreValue 6666 ? 0 6667 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6668 VF.getKnownMinValue() - 1)); 6669 } 6670 6671 InstructionCost 6672 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6673 ElementCount VF) { 6674 Type *ValTy = getMemInstValueType(I); 6675 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6676 const Align Alignment = getLoadStoreAlignment(I); 6677 const Value *Ptr = getLoadStorePointerOperand(I); 6678 6679 return TTI.getAddressComputationCost(VectorTy) + 6680 TTI.getGatherScatterOpCost( 6681 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6682 TargetTransformInfo::TCK_RecipThroughput, I); 6683 } 6684 6685 InstructionCost 6686 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6687 ElementCount VF) { 6688 // TODO: Once we have support for interleaving with scalable vectors 6689 // we can calculate the cost properly here. 6690 if (VF.isScalable()) 6691 return InstructionCost::getInvalid(); 6692 6693 Type *ValTy = getMemInstValueType(I); 6694 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6695 unsigned AS = getLoadStoreAddressSpace(I); 6696 6697 auto Group = getInterleavedAccessGroup(I); 6698 assert(Group && "Fail to get an interleaved access group."); 6699 6700 unsigned InterleaveFactor = Group->getFactor(); 6701 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6702 6703 // Holds the indices of existing members in an interleaved load group. 6704 // An interleaved store group doesn't need this as it doesn't allow gaps. 6705 SmallVector<unsigned, 4> Indices; 6706 if (isa<LoadInst>(I)) { 6707 for (unsigned i = 0; i < InterleaveFactor; i++) 6708 if (Group->getMember(i)) 6709 Indices.push_back(i); 6710 } 6711 6712 // Calculate the cost of the whole interleaved group. 6713 bool UseMaskForGaps = 6714 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 6715 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6716 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6717 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6718 6719 if (Group->isReverse()) { 6720 // TODO: Add support for reversed masked interleaved access. 6721 assert(!Legal->isMaskRequired(I) && 6722 "Reverse masked interleaved access not supported."); 6723 Cost += Group->getNumMembers() * 6724 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6725 } 6726 return Cost; 6727 } 6728 6729 InstructionCost LoopVectorizationCostModel::getReductionPatternCost( 6730 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6731 // Early exit for no inloop reductions 6732 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6733 return InstructionCost::getInvalid(); 6734 auto *VectorTy = cast<VectorType>(Ty); 6735 6736 // We are looking for a pattern of, and finding the minimal acceptable cost: 6737 // reduce(mul(ext(A), ext(B))) or 6738 // reduce(mul(A, B)) or 6739 // reduce(ext(A)) or 6740 // reduce(A). 6741 // The basic idea is that we walk down the tree to do that, finding the root 6742 // reduction instruction in InLoopReductionImmediateChains. From there we find 6743 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6744 // of the components. If the reduction cost is lower then we return it for the 6745 // reduction instruction and 0 for the other instructions in the pattern. If 6746 // it is not we return an invalid cost specifying the orignal cost method 6747 // should be used. 6748 Instruction *RetI = I; 6749 if ((RetI->getOpcode() == Instruction::SExt || 6750 RetI->getOpcode() == Instruction::ZExt)) { 6751 if (!RetI->hasOneUser()) 6752 return InstructionCost::getInvalid(); 6753 RetI = RetI->user_back(); 6754 } 6755 if (RetI->getOpcode() == Instruction::Mul && 6756 RetI->user_back()->getOpcode() == Instruction::Add) { 6757 if (!RetI->hasOneUser()) 6758 return InstructionCost::getInvalid(); 6759 RetI = RetI->user_back(); 6760 } 6761 6762 // Test if the found instruction is a reduction, and if not return an invalid 6763 // cost specifying the parent to use the original cost modelling. 6764 if (!InLoopReductionImmediateChains.count(RetI)) 6765 return InstructionCost::getInvalid(); 6766 6767 // Find the reduction this chain is a part of and calculate the basic cost of 6768 // the reduction on its own. 6769 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6770 Instruction *ReductionPhi = LastChain; 6771 while (!isa<PHINode>(ReductionPhi)) 6772 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6773 6774 RecurrenceDescriptor RdxDesc = 6775 Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; 6776 unsigned BaseCost = TTI.getArithmeticReductionCost(RdxDesc.getOpcode(), 6777 VectorTy, false, CostKind); 6778 6779 // Get the operand that was not the reduction chain and match it to one of the 6780 // patterns, returning the better cost if it is found. 6781 Instruction *RedOp = RetI->getOperand(1) == LastChain 6782 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6783 : dyn_cast<Instruction>(RetI->getOperand(1)); 6784 6785 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6786 6787 if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) && 6788 !TheLoop->isLoopInvariant(RedOp)) { 6789 bool IsUnsigned = isa<ZExtInst>(RedOp); 6790 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6791 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6792 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6793 CostKind); 6794 6795 unsigned ExtCost = 6796 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6797 TTI::CastContextHint::None, CostKind, RedOp); 6798 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6799 return I == RetI ? *RedCost.getValue() : 0; 6800 } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) { 6801 Instruction *Mul = RedOp; 6802 Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0)); 6803 Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1)); 6804 if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) && 6805 Op0->getOpcode() == Op1->getOpcode() && 6806 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6807 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 6808 bool IsUnsigned = isa<ZExtInst>(Op0); 6809 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6810 // reduce(mul(ext, ext)) 6811 unsigned ExtCost = 6812 TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, 6813 TTI::CastContextHint::None, CostKind, Op0); 6814 unsigned MulCost = 6815 TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); 6816 6817 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6818 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6819 CostKind); 6820 6821 if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) 6822 return I == RetI ? *RedCost.getValue() : 0; 6823 } else { 6824 unsigned MulCost = 6825 TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); 6826 6827 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6828 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 6829 CostKind); 6830 6831 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 6832 return I == RetI ? *RedCost.getValue() : 0; 6833 } 6834 } 6835 6836 return I == RetI ? BaseCost : InstructionCost::getInvalid(); 6837 } 6838 6839 InstructionCost 6840 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6841 ElementCount VF) { 6842 // Calculate scalar cost only. Vectorization cost should be ready at this 6843 // moment. 6844 if (VF.isScalar()) { 6845 Type *ValTy = getMemInstValueType(I); 6846 const Align Alignment = getLoadStoreAlignment(I); 6847 unsigned AS = getLoadStoreAddressSpace(I); 6848 6849 return TTI.getAddressComputationCost(ValTy) + 6850 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 6851 TTI::TCK_RecipThroughput, I); 6852 } 6853 return getWideningCost(I, VF); 6854 } 6855 6856 LoopVectorizationCostModel::VectorizationCostTy 6857 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6858 ElementCount VF) { 6859 // If we know that this instruction will remain uniform, check the cost of 6860 // the scalar version. 6861 if (isUniformAfterVectorization(I, VF)) 6862 VF = ElementCount::getFixed(1); 6863 6864 if (VF.isVector() && isProfitableToScalarize(I, VF)) 6865 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6866 6867 // Forced scalars do not have any scalarization overhead. 6868 auto ForcedScalar = ForcedScalars.find(VF); 6869 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 6870 auto InstSet = ForcedScalar->second; 6871 if (InstSet.count(I)) 6872 return VectorizationCostTy( 6873 (getInstructionCost(I, ElementCount::getFixed(1)).first * 6874 VF.getKnownMinValue()), 6875 false); 6876 } 6877 6878 Type *VectorTy; 6879 InstructionCost C = getInstructionCost(I, VF, VectorTy); 6880 6881 bool TypeNotScalarized = 6882 VF.isVector() && VectorTy->isVectorTy() && 6883 TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue(); 6884 return VectorizationCostTy(C, TypeNotScalarized); 6885 } 6886 6887 InstructionCost 6888 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6889 ElementCount VF) { 6890 6891 if (VF.isScalable()) 6892 return InstructionCost::getInvalid(); 6893 6894 if (VF.isScalar()) 6895 return 0; 6896 6897 InstructionCost Cost = 0; 6898 Type *RetTy = ToVectorTy(I->getType(), VF); 6899 if (!RetTy->isVoidTy() && 6900 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 6901 Cost += TTI.getScalarizationOverhead( 6902 cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()), 6903 true, false); 6904 6905 // Some targets keep addresses scalar. 6906 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 6907 return Cost; 6908 6909 // Some targets support efficient element stores. 6910 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 6911 return Cost; 6912 6913 // Collect operands to consider. 6914 CallInst *CI = dyn_cast<CallInst>(I); 6915 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 6916 6917 // Skip operands that do not require extraction/scalarization and do not incur 6918 // any overhead. 6919 SmallVector<Type *> Tys; 6920 for (auto *V : filterExtractingOperands(Ops, VF)) 6921 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 6922 return Cost + TTI.getOperandsScalarizationOverhead( 6923 filterExtractingOperands(Ops, VF), Tys); 6924 } 6925 6926 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 6927 if (VF.isScalar()) 6928 return; 6929 NumPredStores = 0; 6930 for (BasicBlock *BB : TheLoop->blocks()) { 6931 // For each instruction in the old loop. 6932 for (Instruction &I : *BB) { 6933 Value *Ptr = getLoadStorePointerOperand(&I); 6934 if (!Ptr) 6935 continue; 6936 6937 // TODO: We should generate better code and update the cost model for 6938 // predicated uniform stores. Today they are treated as any other 6939 // predicated store (see added test cases in 6940 // invariant-store-vectorization.ll). 6941 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 6942 NumPredStores++; 6943 6944 if (Legal->isUniformMemOp(I)) { 6945 // TODO: Avoid replicating loads and stores instead of 6946 // relying on instcombine to remove them. 6947 // Load: Scalar load + broadcast 6948 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 6949 InstructionCost Cost = getUniformMemOpCost(&I, VF); 6950 setWideningDecision(&I, VF, CM_Scalarize, Cost); 6951 continue; 6952 } 6953 6954 // We assume that widening is the best solution when possible. 6955 if (memoryInstructionCanBeWidened(&I, VF)) { 6956 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 6957 int ConsecutiveStride = 6958 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 6959 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6960 "Expected consecutive stride."); 6961 InstWidening Decision = 6962 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 6963 setWideningDecision(&I, VF, Decision, Cost); 6964 continue; 6965 } 6966 6967 // Choose between Interleaving, Gather/Scatter or Scalarization. 6968 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 6969 unsigned NumAccesses = 1; 6970 if (isAccessInterleaved(&I)) { 6971 auto Group = getInterleavedAccessGroup(&I); 6972 assert(Group && "Fail to get an interleaved access group."); 6973 6974 // Make one decision for the whole group. 6975 if (getWideningDecision(&I, VF) != CM_Unknown) 6976 continue; 6977 6978 NumAccesses = Group->getNumMembers(); 6979 if (interleavedAccessCanBeWidened(&I, VF)) 6980 InterleaveCost = getInterleaveGroupCost(&I, VF); 6981 } 6982 6983 InstructionCost GatherScatterCost = 6984 isLegalGatherOrScatter(&I) 6985 ? getGatherScatterCost(&I, VF) * NumAccesses 6986 : InstructionCost::getInvalid(); 6987 6988 InstructionCost ScalarizationCost = 6989 !VF.isScalable() ? getMemInstScalarizationCost(&I, VF) * NumAccesses 6990 : InstructionCost::getInvalid(); 6991 6992 // Choose better solution for the current VF, 6993 // write down this decision and use it during vectorization. 6994 InstructionCost Cost; 6995 InstWidening Decision; 6996 if (InterleaveCost <= GatherScatterCost && 6997 InterleaveCost < ScalarizationCost) { 6998 Decision = CM_Interleave; 6999 Cost = InterleaveCost; 7000 } else if (GatherScatterCost < ScalarizationCost) { 7001 Decision = CM_GatherScatter; 7002 Cost = GatherScatterCost; 7003 } else { 7004 assert(!VF.isScalable() && 7005 "We cannot yet scalarise for scalable vectors"); 7006 Decision = CM_Scalarize; 7007 Cost = ScalarizationCost; 7008 } 7009 // If the instructions belongs to an interleave group, the whole group 7010 // receives the same decision. The whole group receives the cost, but 7011 // the cost will actually be assigned to one instruction. 7012 if (auto Group = getInterleavedAccessGroup(&I)) 7013 setWideningDecision(Group, VF, Decision, Cost); 7014 else 7015 setWideningDecision(&I, VF, Decision, Cost); 7016 } 7017 } 7018 7019 // Make sure that any load of address and any other address computation 7020 // remains scalar unless there is gather/scatter support. This avoids 7021 // inevitable extracts into address registers, and also has the benefit of 7022 // activating LSR more, since that pass can't optimize vectorized 7023 // addresses. 7024 if (TTI.prefersVectorizedAddressing()) 7025 return; 7026 7027 // Start with all scalar pointer uses. 7028 SmallPtrSet<Instruction *, 8> AddrDefs; 7029 for (BasicBlock *BB : TheLoop->blocks()) 7030 for (Instruction &I : *BB) { 7031 Instruction *PtrDef = 7032 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7033 if (PtrDef && TheLoop->contains(PtrDef) && 7034 getWideningDecision(&I, VF) != CM_GatherScatter) 7035 AddrDefs.insert(PtrDef); 7036 } 7037 7038 // Add all instructions used to generate the addresses. 7039 SmallVector<Instruction *, 4> Worklist; 7040 append_range(Worklist, AddrDefs); 7041 while (!Worklist.empty()) { 7042 Instruction *I = Worklist.pop_back_val(); 7043 for (auto &Op : I->operands()) 7044 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7045 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7046 AddrDefs.insert(InstOp).second) 7047 Worklist.push_back(InstOp); 7048 } 7049 7050 for (auto *I : AddrDefs) { 7051 if (isa<LoadInst>(I)) { 7052 // Setting the desired widening decision should ideally be handled in 7053 // by cost functions, but since this involves the task of finding out 7054 // if the loaded register is involved in an address computation, it is 7055 // instead changed here when we know this is the case. 7056 InstWidening Decision = getWideningDecision(I, VF); 7057 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7058 // Scalarize a widened load of address. 7059 setWideningDecision( 7060 I, VF, CM_Scalarize, 7061 (VF.getKnownMinValue() * 7062 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7063 else if (auto Group = getInterleavedAccessGroup(I)) { 7064 // Scalarize an interleave group of address loads. 7065 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7066 if (Instruction *Member = Group->getMember(I)) 7067 setWideningDecision( 7068 Member, VF, CM_Scalarize, 7069 (VF.getKnownMinValue() * 7070 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7071 } 7072 } 7073 } else 7074 // Make sure I gets scalarized and a cost estimate without 7075 // scalarization overhead. 7076 ForcedScalars[VF].insert(I); 7077 } 7078 } 7079 7080 InstructionCost 7081 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7082 Type *&VectorTy) { 7083 Type *RetTy = I->getType(); 7084 if (canTruncateToMinimalBitwidth(I, VF)) 7085 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7086 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 7087 auto SE = PSE.getSE(); 7088 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7089 7090 // TODO: We need to estimate the cost of intrinsic calls. 7091 switch (I->getOpcode()) { 7092 case Instruction::GetElementPtr: 7093 // We mark this instruction as zero-cost because the cost of GEPs in 7094 // vectorized code depends on whether the corresponding memory instruction 7095 // is scalarized or not. Therefore, we handle GEPs with the memory 7096 // instruction cost. 7097 return 0; 7098 case Instruction::Br: { 7099 // In cases of scalarized and predicated instructions, there will be VF 7100 // predicated blocks in the vectorized loop. Each branch around these 7101 // blocks requires also an extract of its vector compare i1 element. 7102 bool ScalarPredicatedBB = false; 7103 BranchInst *BI = cast<BranchInst>(I); 7104 if (VF.isVector() && BI->isConditional() && 7105 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7106 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7107 ScalarPredicatedBB = true; 7108 7109 if (ScalarPredicatedBB) { 7110 // Return cost for branches around scalarized and predicated blocks. 7111 assert(!VF.isScalable() && "scalable vectors not yet supported."); 7112 auto *Vec_i1Ty = 7113 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7114 return (TTI.getScalarizationOverhead( 7115 Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), 7116 false, true) + 7117 (TTI.getCFInstrCost(Instruction::Br, CostKind) * 7118 VF.getKnownMinValue())); 7119 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7120 // The back-edge branch will remain, as will all scalar branches. 7121 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7122 else 7123 // This branch will be eliminated by if-conversion. 7124 return 0; 7125 // Note: We currently assume zero cost for an unconditional branch inside 7126 // a predicated block since it will become a fall-through, although we 7127 // may decide in the future to call TTI for all branches. 7128 } 7129 case Instruction::PHI: { 7130 auto *Phi = cast<PHINode>(I); 7131 7132 // First-order recurrences are replaced by vector shuffles inside the loop. 7133 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7134 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7135 return TTI.getShuffleCost( 7136 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7137 VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7138 7139 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7140 // converted into select instructions. We require N - 1 selects per phi 7141 // node, where N is the number of incoming values. 7142 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7143 return (Phi->getNumIncomingValues() - 1) * 7144 TTI.getCmpSelInstrCost( 7145 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7146 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7147 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7148 7149 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7150 } 7151 case Instruction::UDiv: 7152 case Instruction::SDiv: 7153 case Instruction::URem: 7154 case Instruction::SRem: 7155 // If we have a predicated instruction, it may not be executed for each 7156 // vector lane. Get the scalarization cost and scale this amount by the 7157 // probability of executing the predicated block. If the instruction is not 7158 // predicated, we fall through to the next case. 7159 if (VF.isVector() && isScalarWithPredication(I)) { 7160 InstructionCost Cost = 0; 7161 7162 // These instructions have a non-void type, so account for the phi nodes 7163 // that we will create. This cost is likely to be zero. The phi node 7164 // cost, if any, should be scaled by the block probability because it 7165 // models a copy at the end of each predicated block. 7166 Cost += VF.getKnownMinValue() * 7167 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7168 7169 // The cost of the non-predicated instruction. 7170 Cost += VF.getKnownMinValue() * 7171 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7172 7173 // The cost of insertelement and extractelement instructions needed for 7174 // scalarization. 7175 Cost += getScalarizationOverhead(I, VF); 7176 7177 // Scale the cost by the probability of executing the predicated blocks. 7178 // This assumes the predicated block for each vector lane is equally 7179 // likely. 7180 return Cost / getReciprocalPredBlockProb(); 7181 } 7182 LLVM_FALLTHROUGH; 7183 case Instruction::Add: 7184 case Instruction::FAdd: 7185 case Instruction::Sub: 7186 case Instruction::FSub: 7187 case Instruction::Mul: 7188 case Instruction::FMul: 7189 case Instruction::FDiv: 7190 case Instruction::FRem: 7191 case Instruction::Shl: 7192 case Instruction::LShr: 7193 case Instruction::AShr: 7194 case Instruction::And: 7195 case Instruction::Or: 7196 case Instruction::Xor: { 7197 // Since we will replace the stride by 1 the multiplication should go away. 7198 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7199 return 0; 7200 7201 // Detect reduction patterns 7202 InstructionCost RedCost; 7203 if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7204 .isValid()) 7205 return RedCost; 7206 7207 // Certain instructions can be cheaper to vectorize if they have a constant 7208 // second vector operand. One example of this are shifts on x86. 7209 Value *Op2 = I->getOperand(1); 7210 TargetTransformInfo::OperandValueProperties Op2VP; 7211 TargetTransformInfo::OperandValueKind Op2VK = 7212 TTI.getOperandInfo(Op2, Op2VP); 7213 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7214 Op2VK = TargetTransformInfo::OK_UniformValue; 7215 7216 SmallVector<const Value *, 4> Operands(I->operand_values()); 7217 unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1; 7218 return N * TTI.getArithmeticInstrCost( 7219 I->getOpcode(), VectorTy, CostKind, 7220 TargetTransformInfo::OK_AnyValue, 7221 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7222 } 7223 case Instruction::FNeg: { 7224 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 7225 unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1; 7226 return N * TTI.getArithmeticInstrCost( 7227 I->getOpcode(), VectorTy, CostKind, 7228 TargetTransformInfo::OK_AnyValue, 7229 TargetTransformInfo::OK_AnyValue, 7230 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None, 7231 I->getOperand(0), I); 7232 } 7233 case Instruction::Select: { 7234 SelectInst *SI = cast<SelectInst>(I); 7235 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7236 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7237 Type *CondTy = SI->getCondition()->getType(); 7238 if (!ScalarCond) 7239 CondTy = VectorType::get(CondTy, VF); 7240 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 7241 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7242 } 7243 case Instruction::ICmp: 7244 case Instruction::FCmp: { 7245 Type *ValTy = I->getOperand(0)->getType(); 7246 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7247 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7248 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7249 VectorTy = ToVectorTy(ValTy, VF); 7250 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7251 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7252 } 7253 case Instruction::Store: 7254 case Instruction::Load: { 7255 ElementCount Width = VF; 7256 if (Width.isVector()) { 7257 InstWidening Decision = getWideningDecision(I, Width); 7258 assert(Decision != CM_Unknown && 7259 "CM decision should be taken at this point"); 7260 if (Decision == CM_Scalarize) 7261 Width = ElementCount::getFixed(1); 7262 } 7263 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 7264 return getMemoryInstructionCost(I, VF); 7265 } 7266 case Instruction::ZExt: 7267 case Instruction::SExt: 7268 case Instruction::FPToUI: 7269 case Instruction::FPToSI: 7270 case Instruction::FPExt: 7271 case Instruction::PtrToInt: 7272 case Instruction::IntToPtr: 7273 case Instruction::SIToFP: 7274 case Instruction::UIToFP: 7275 case Instruction::Trunc: 7276 case Instruction::FPTrunc: 7277 case Instruction::BitCast: { 7278 // Computes the CastContextHint from a Load/Store instruction. 7279 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7280 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7281 "Expected a load or a store!"); 7282 7283 if (VF.isScalar() || !TheLoop->contains(I)) 7284 return TTI::CastContextHint::Normal; 7285 7286 switch (getWideningDecision(I, VF)) { 7287 case LoopVectorizationCostModel::CM_GatherScatter: 7288 return TTI::CastContextHint::GatherScatter; 7289 case LoopVectorizationCostModel::CM_Interleave: 7290 return TTI::CastContextHint::Interleave; 7291 case LoopVectorizationCostModel::CM_Scalarize: 7292 case LoopVectorizationCostModel::CM_Widen: 7293 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7294 : TTI::CastContextHint::Normal; 7295 case LoopVectorizationCostModel::CM_Widen_Reverse: 7296 return TTI::CastContextHint::Reversed; 7297 case LoopVectorizationCostModel::CM_Unknown: 7298 llvm_unreachable("Instr did not go through cost modelling?"); 7299 } 7300 7301 llvm_unreachable("Unhandled case!"); 7302 }; 7303 7304 unsigned Opcode = I->getOpcode(); 7305 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7306 // For Trunc, the context is the only user, which must be a StoreInst. 7307 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7308 if (I->hasOneUse()) 7309 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7310 CCH = ComputeCCH(Store); 7311 } 7312 // For Z/Sext, the context is the operand, which must be a LoadInst. 7313 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7314 Opcode == Instruction::FPExt) { 7315 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7316 CCH = ComputeCCH(Load); 7317 } 7318 7319 // We optimize the truncation of induction variables having constant 7320 // integer steps. The cost of these truncations is the same as the scalar 7321 // operation. 7322 if (isOptimizableIVTruncate(I, VF)) { 7323 auto *Trunc = cast<TruncInst>(I); 7324 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7325 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7326 } 7327 7328 // Detect reduction patterns 7329 InstructionCost RedCost; 7330 if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7331 .isValid()) 7332 return RedCost; 7333 7334 Type *SrcScalarTy = I->getOperand(0)->getType(); 7335 Type *SrcVecTy = 7336 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7337 if (canTruncateToMinimalBitwidth(I, VF)) { 7338 // This cast is going to be shrunk. This may remove the cast or it might 7339 // turn it into slightly different cast. For example, if MinBW == 16, 7340 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7341 // 7342 // Calculate the modified src and dest types. 7343 Type *MinVecTy = VectorTy; 7344 if (Opcode == Instruction::Trunc) { 7345 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7346 VectorTy = 7347 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7348 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7349 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7350 VectorTy = 7351 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7352 } 7353 } 7354 7355 unsigned N; 7356 if (isScalarAfterVectorization(I, VF)) { 7357 assert(!VF.isScalable() && "VF is assumed to be non scalable"); 7358 N = VF.getKnownMinValue(); 7359 } else 7360 N = 1; 7361 return N * 7362 TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7363 } 7364 case Instruction::Call: { 7365 bool NeedToScalarize; 7366 CallInst *CI = cast<CallInst>(I); 7367 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7368 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7369 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7370 return std::min(CallCost, IntrinsicCost); 7371 } 7372 return CallCost; 7373 } 7374 case Instruction::ExtractValue: 7375 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7376 default: 7377 // The cost of executing VF copies of the scalar instruction. This opcode 7378 // is unknown. Assume that it is the same as 'mul'. 7379 return VF.getKnownMinValue() * TTI.getArithmeticInstrCost( 7380 Instruction::Mul, VectorTy, CostKind) + 7381 getScalarizationOverhead(I, VF); 7382 } // end of switch. 7383 } 7384 7385 char LoopVectorize::ID = 0; 7386 7387 static const char lv_name[] = "Loop Vectorization"; 7388 7389 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7390 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7391 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7392 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7393 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7394 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7395 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7396 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7397 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7398 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7399 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7400 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7401 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7402 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7403 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7404 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7405 7406 namespace llvm { 7407 7408 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7409 7410 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7411 bool VectorizeOnlyWhenForced) { 7412 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7413 } 7414 7415 } // end namespace llvm 7416 7417 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7418 // Check if the pointer operand of a load or store instruction is 7419 // consecutive. 7420 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7421 return Legal->isConsecutivePtr(Ptr); 7422 return false; 7423 } 7424 7425 void LoopVectorizationCostModel::collectValuesToIgnore() { 7426 // Ignore ephemeral values. 7427 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7428 7429 // Ignore type-promoting instructions we identified during reduction 7430 // detection. 7431 for (auto &Reduction : Legal->getReductionVars()) { 7432 RecurrenceDescriptor &RedDes = Reduction.second; 7433 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7434 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7435 } 7436 // Ignore type-casting instructions we identified during induction 7437 // detection. 7438 for (auto &Induction : Legal->getInductionVars()) { 7439 InductionDescriptor &IndDes = Induction.second; 7440 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7441 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7442 } 7443 } 7444 7445 void LoopVectorizationCostModel::collectInLoopReductions() { 7446 for (auto &Reduction : Legal->getReductionVars()) { 7447 PHINode *Phi = Reduction.first; 7448 RecurrenceDescriptor &RdxDesc = Reduction.second; 7449 7450 // We don't collect reductions that are type promoted (yet). 7451 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7452 continue; 7453 7454 // If the target would prefer this reduction to happen "in-loop", then we 7455 // want to record it as such. 7456 unsigned Opcode = RdxDesc.getOpcode(); 7457 if (!PreferInLoopReductions && 7458 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7459 TargetTransformInfo::ReductionFlags())) 7460 continue; 7461 7462 // Check that we can correctly put the reductions into the loop, by 7463 // finding the chain of operations that leads from the phi to the loop 7464 // exit value. 7465 SmallVector<Instruction *, 4> ReductionOperations = 7466 RdxDesc.getReductionOpChain(Phi, TheLoop); 7467 bool InLoop = !ReductionOperations.empty(); 7468 if (InLoop) { 7469 InLoopReductionChains[Phi] = ReductionOperations; 7470 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7471 Instruction *LastChain = Phi; 7472 for (auto *I : ReductionOperations) { 7473 InLoopReductionImmediateChains[I] = LastChain; 7474 LastChain = I; 7475 } 7476 } 7477 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7478 << " reduction for phi: " << *Phi << "\n"); 7479 } 7480 } 7481 7482 // TODO: we could return a pair of values that specify the max VF and 7483 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7484 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7485 // doesn't have a cost model that can choose which plan to execute if 7486 // more than one is generated. 7487 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7488 LoopVectorizationCostModel &CM) { 7489 unsigned WidestType; 7490 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7491 return WidestVectorRegBits / WidestType; 7492 } 7493 7494 VectorizationFactor 7495 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7496 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7497 ElementCount VF = UserVF; 7498 // Outer loop handling: They may require CFG and instruction level 7499 // transformations before even evaluating whether vectorization is profitable. 7500 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7501 // the vectorization pipeline. 7502 if (!OrigLoop->isInnermost()) { 7503 // If the user doesn't provide a vectorization factor, determine a 7504 // reasonable one. 7505 if (UserVF.isZero()) { 7506 VF = ElementCount::getFixed( 7507 determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM)); 7508 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7509 7510 // Make sure we have a VF > 1 for stress testing. 7511 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7512 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7513 << "overriding computed VF.\n"); 7514 VF = ElementCount::getFixed(4); 7515 } 7516 } 7517 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7518 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7519 "VF needs to be a power of two"); 7520 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7521 << "VF " << VF << " to build VPlans.\n"); 7522 buildVPlans(VF, VF); 7523 7524 // For VPlan build stress testing, we bail out after VPlan construction. 7525 if (VPlanBuildStressTest) 7526 return VectorizationFactor::Disabled(); 7527 7528 return {VF, 0 /*Cost*/}; 7529 } 7530 7531 LLVM_DEBUG( 7532 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7533 "VPlan-native path.\n"); 7534 return VectorizationFactor::Disabled(); 7535 } 7536 7537 Optional<VectorizationFactor> 7538 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7539 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7540 Optional<ElementCount> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC); 7541 if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved. 7542 return None; 7543 7544 // Invalidate interleave groups if all blocks of loop will be predicated. 7545 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 7546 !useMaskedInterleavedAccesses(*TTI)) { 7547 LLVM_DEBUG( 7548 dbgs() 7549 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7550 "which requires masked-interleaved support.\n"); 7551 if (CM.InterleaveInfo.invalidateGroups()) 7552 // Invalidating interleave groups also requires invalidating all decisions 7553 // based on them, which includes widening decisions and uniform and scalar 7554 // values. 7555 CM.invalidateCostModelingDecisions(); 7556 } 7557 7558 ElementCount MaxVF = MaybeMaxVF.getValue(); 7559 assert(MaxVF.isNonZero() && "MaxVF is zero."); 7560 7561 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxVF); 7562 if (!UserVF.isZero() && 7563 (UserVFIsLegal || (UserVF.isScalable() && MaxVF.isScalable()))) { 7564 // FIXME: MaxVF is temporarily used inplace of UserVF for illegal scalable 7565 // VFs here, this should be reverted to only use legal UserVFs once the 7566 // loop below supports scalable VFs. 7567 ElementCount VF = UserVFIsLegal ? UserVF : MaxVF; 7568 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max") 7569 << " VF " << VF << ".\n"); 7570 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7571 "VF needs to be a power of two"); 7572 // Collect the instructions (and their associated costs) that will be more 7573 // profitable to scalarize. 7574 CM.selectUserVectorizationFactor(VF); 7575 CM.collectInLoopReductions(); 7576 buildVPlansWithVPRecipes(VF, VF); 7577 LLVM_DEBUG(printPlans(dbgs())); 7578 return {{VF, 0}}; 7579 } 7580 7581 assert(!MaxVF.isScalable() && 7582 "Scalable vectors not yet supported beyond this point"); 7583 7584 for (ElementCount VF = ElementCount::getFixed(1); 7585 ElementCount::isKnownLE(VF, MaxVF); VF *= 2) { 7586 // Collect Uniform and Scalar instructions after vectorization with VF. 7587 CM.collectUniformsAndScalars(VF); 7588 7589 // Collect the instructions (and their associated costs) that will be more 7590 // profitable to scalarize. 7591 if (VF.isVector()) 7592 CM.collectInstsToScalarize(VF); 7593 } 7594 7595 CM.collectInLoopReductions(); 7596 7597 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxVF); 7598 LLVM_DEBUG(printPlans(dbgs())); 7599 if (MaxVF.isScalar()) 7600 return VectorizationFactor::Disabled(); 7601 7602 // Select the optimal vectorization factor. 7603 return CM.selectVectorizationFactor(MaxVF); 7604 } 7605 7606 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) { 7607 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 7608 << '\n'); 7609 BestVF = VF; 7610 BestUF = UF; 7611 7612 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 7613 return !Plan->hasVF(VF); 7614 }); 7615 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 7616 } 7617 7618 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 7619 DominatorTree *DT) { 7620 // Perform the actual loop transformation. 7621 7622 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7623 assert(BestVF.hasValue() && "Vectorization Factor is missing"); 7624 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 7625 7626 VPTransformState State{ 7627 *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()}; 7628 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 7629 State.TripCount = ILV.getOrCreateTripCount(nullptr); 7630 State.CanonicalIV = ILV.Induction; 7631 7632 ILV.printDebugTracesAtStart(); 7633 7634 //===------------------------------------------------===// 7635 // 7636 // Notice: any optimization or new instruction that go 7637 // into the code below should also be implemented in 7638 // the cost-model. 7639 // 7640 //===------------------------------------------------===// 7641 7642 // 2. Copy and widen instructions from the old loop into the new loop. 7643 VPlans.front()->execute(&State); 7644 7645 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7646 // predication, updating analyses. 7647 ILV.fixVectorizedLoop(State); 7648 7649 ILV.printDebugTracesAtEnd(); 7650 } 7651 7652 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7653 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7654 7655 // We create new control-flow for the vectorized loop, so the original exit 7656 // conditions will be dead after vectorization if it's only used by the 7657 // terminator 7658 SmallVector<BasicBlock*> ExitingBlocks; 7659 OrigLoop->getExitingBlocks(ExitingBlocks); 7660 for (auto *BB : ExitingBlocks) { 7661 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 7662 if (!Cmp || !Cmp->hasOneUse()) 7663 continue; 7664 7665 // TODO: we should introduce a getUniqueExitingBlocks on Loop 7666 if (!DeadInstructions.insert(Cmp).second) 7667 continue; 7668 7669 // The operands of the icmp is often a dead trunc, used by IndUpdate. 7670 // TODO: can recurse through operands in general 7671 for (Value *Op : Cmp->operands()) { 7672 if (isa<TruncInst>(Op) && Op->hasOneUse()) 7673 DeadInstructions.insert(cast<Instruction>(Op)); 7674 } 7675 } 7676 7677 // We create new "steps" for induction variable updates to which the original 7678 // induction variables map. An original update instruction will be dead if 7679 // all its users except the induction variable are dead. 7680 auto *Latch = OrigLoop->getLoopLatch(); 7681 for (auto &Induction : Legal->getInductionVars()) { 7682 PHINode *Ind = Induction.first; 7683 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7684 7685 // If the tail is to be folded by masking, the primary induction variable, 7686 // if exists, isn't dead: it will be used for masking. Don't kill it. 7687 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 7688 continue; 7689 7690 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7691 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7692 })) 7693 DeadInstructions.insert(IndUpdate); 7694 7695 // We record as "Dead" also the type-casting instructions we had identified 7696 // during induction analysis. We don't need any handling for them in the 7697 // vectorized loop because we have proven that, under a proper runtime 7698 // test guarding the vectorized loop, the value of the phi, and the casted 7699 // value of the phi, are the same. The last instruction in this casting chain 7700 // will get its scalar/vector/widened def from the scalar/vector/widened def 7701 // of the respective phi node. Any other casts in the induction def-use chain 7702 // have no other uses outside the phi update chain, and will be ignored. 7703 InductionDescriptor &IndDes = Induction.second; 7704 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7705 DeadInstructions.insert(Casts.begin(), Casts.end()); 7706 } 7707 } 7708 7709 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 7710 7711 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7712 7713 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 7714 Instruction::BinaryOps BinOp) { 7715 // When unrolling and the VF is 1, we only need to add a simple scalar. 7716 Type *Ty = Val->getType(); 7717 assert(!Ty->isVectorTy() && "Val must be a scalar"); 7718 7719 if (Ty->isFloatingPointTy()) { 7720 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 7721 7722 // Floating point operations had to be 'fast' to enable the unrolling. 7723 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 7724 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 7725 } 7726 Constant *C = ConstantInt::get(Ty, StartIdx); 7727 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 7728 } 7729 7730 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7731 SmallVector<Metadata *, 4> MDs; 7732 // Reserve first location for self reference to the LoopID metadata node. 7733 MDs.push_back(nullptr); 7734 bool IsUnrollMetadata = false; 7735 MDNode *LoopID = L->getLoopID(); 7736 if (LoopID) { 7737 // First find existing loop unrolling disable metadata. 7738 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7739 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7740 if (MD) { 7741 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7742 IsUnrollMetadata = 7743 S && S->getString().startswith("llvm.loop.unroll.disable"); 7744 } 7745 MDs.push_back(LoopID->getOperand(i)); 7746 } 7747 } 7748 7749 if (!IsUnrollMetadata) { 7750 // Add runtime unroll disable metadata. 7751 LLVMContext &Context = L->getHeader()->getContext(); 7752 SmallVector<Metadata *, 1> DisableOperands; 7753 DisableOperands.push_back( 7754 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7755 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7756 MDs.push_back(DisableNode); 7757 MDNode *NewLoopID = MDNode::get(Context, MDs); 7758 // Set operand 0 to refer to the loop id itself. 7759 NewLoopID->replaceOperandWith(0, NewLoopID); 7760 L->setLoopID(NewLoopID); 7761 } 7762 } 7763 7764 //===--------------------------------------------------------------------===// 7765 // EpilogueVectorizerMainLoop 7766 //===--------------------------------------------------------------------===// 7767 7768 /// This function is partially responsible for generating the control flow 7769 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7770 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 7771 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7772 Loop *Lp = createVectorLoopSkeleton(""); 7773 7774 // Generate the code to check the minimum iteration count of the vector 7775 // epilogue (see below). 7776 EPI.EpilogueIterationCountCheck = 7777 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 7778 EPI.EpilogueIterationCountCheck->setName("iter.check"); 7779 7780 // Generate the code to check any assumptions that we've made for SCEV 7781 // expressions. 7782 BasicBlock *SavedPreHeader = LoopVectorPreHeader; 7783 emitSCEVChecks(Lp, LoopScalarPreHeader); 7784 7785 // If a safety check was generated save it. 7786 if (SavedPreHeader != LoopVectorPreHeader) 7787 EPI.SCEVSafetyCheck = SavedPreHeader; 7788 7789 // Generate the code that checks at runtime if arrays overlap. We put the 7790 // checks into a separate block to make the more common case of few elements 7791 // faster. 7792 SavedPreHeader = LoopVectorPreHeader; 7793 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 7794 7795 // If a safety check was generated save/overwite it. 7796 if (SavedPreHeader != LoopVectorPreHeader) 7797 EPI.MemSafetyCheck = SavedPreHeader; 7798 7799 // Generate the iteration count check for the main loop, *after* the check 7800 // for the epilogue loop, so that the path-length is shorter for the case 7801 // that goes directly through the vector epilogue. The longer-path length for 7802 // the main loop is compensated for, by the gain from vectorizing the larger 7803 // trip count. Note: the branch will get updated later on when we vectorize 7804 // the epilogue. 7805 EPI.MainLoopIterationCountCheck = 7806 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 7807 7808 // Generate the induction variable. 7809 OldInduction = Legal->getPrimaryInduction(); 7810 Type *IdxTy = Legal->getWidestInductionType(); 7811 Value *StartIdx = ConstantInt::get(IdxTy, 0); 7812 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 7813 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 7814 EPI.VectorTripCount = CountRoundDown; 7815 Induction = 7816 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 7817 getDebugLocFromInstOrOperands(OldInduction)); 7818 7819 // Skip induction resume value creation here because they will be created in 7820 // the second pass. If we created them here, they wouldn't be used anyway, 7821 // because the vplan in the second pass still contains the inductions from the 7822 // original loop. 7823 7824 return completeLoopSkeleton(Lp, OrigLoopID); 7825 } 7826 7827 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 7828 LLVM_DEBUG({ 7829 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 7830 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 7831 << ", Main Loop UF:" << EPI.MainLoopUF 7832 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 7833 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7834 }); 7835 } 7836 7837 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 7838 DEBUG_WITH_TYPE(VerboseDebug, { 7839 dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; 7840 }); 7841 } 7842 7843 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 7844 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 7845 assert(L && "Expected valid Loop."); 7846 assert(Bypass && "Expected valid bypass basic block."); 7847 unsigned VFactor = 7848 ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue(); 7849 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 7850 Value *Count = getOrCreateTripCount(L); 7851 // Reuse existing vector loop preheader for TC checks. 7852 // Note that new preheader block is generated for vector loop. 7853 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 7854 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 7855 7856 // Generate code to check if the loop's trip count is less than VF * UF of the 7857 // main vector loop. 7858 auto P = 7859 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7860 7861 Value *CheckMinIters = Builder.CreateICmp( 7862 P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor), 7863 "min.iters.check"); 7864 7865 if (!ForEpilogue) 7866 TCCheckBlock->setName("vector.main.loop.iter.check"); 7867 7868 // Create new preheader for vector loop. 7869 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 7870 DT, LI, nullptr, "vector.ph"); 7871 7872 if (ForEpilogue) { 7873 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 7874 DT->getNode(Bypass)->getIDom()) && 7875 "TC check is expected to dominate Bypass"); 7876 7877 // Update dominator for Bypass & LoopExit. 7878 DT->changeImmediateDominator(Bypass, TCCheckBlock); 7879 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 7880 7881 LoopBypassBlocks.push_back(TCCheckBlock); 7882 7883 // Save the trip count so we don't have to regenerate it in the 7884 // vec.epilog.iter.check. This is safe to do because the trip count 7885 // generated here dominates the vector epilog iter check. 7886 EPI.TripCount = Count; 7887 } 7888 7889 ReplaceInstWithInst( 7890 TCCheckBlock->getTerminator(), 7891 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7892 7893 return TCCheckBlock; 7894 } 7895 7896 //===--------------------------------------------------------------------===// 7897 // EpilogueVectorizerEpilogueLoop 7898 //===--------------------------------------------------------------------===// 7899 7900 /// This function is partially responsible for generating the control flow 7901 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7902 BasicBlock * 7903 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 7904 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7905 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 7906 7907 // Now, compare the remaining count and if there aren't enough iterations to 7908 // execute the vectorized epilogue skip to the scalar part. 7909 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 7910 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 7911 LoopVectorPreHeader = 7912 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 7913 LI, nullptr, "vec.epilog.ph"); 7914 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 7915 VecEpilogueIterationCountCheck); 7916 7917 // Adjust the control flow taking the state info from the main loop 7918 // vectorization into account. 7919 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 7920 "expected this to be saved from the previous pass."); 7921 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 7922 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 7923 7924 DT->changeImmediateDominator(LoopVectorPreHeader, 7925 EPI.MainLoopIterationCountCheck); 7926 7927 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 7928 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7929 7930 if (EPI.SCEVSafetyCheck) 7931 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 7932 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7933 if (EPI.MemSafetyCheck) 7934 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 7935 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7936 7937 DT->changeImmediateDominator( 7938 VecEpilogueIterationCountCheck, 7939 VecEpilogueIterationCountCheck->getSinglePredecessor()); 7940 7941 DT->changeImmediateDominator(LoopScalarPreHeader, 7942 EPI.EpilogueIterationCountCheck); 7943 DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck); 7944 7945 // Keep track of bypass blocks, as they feed start values to the induction 7946 // phis in the scalar loop preheader. 7947 if (EPI.SCEVSafetyCheck) 7948 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 7949 if (EPI.MemSafetyCheck) 7950 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 7951 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 7952 7953 // Generate a resume induction for the vector epilogue and put it in the 7954 // vector epilogue preheader 7955 Type *IdxTy = Legal->getWidestInductionType(); 7956 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 7957 LoopVectorPreHeader->getFirstNonPHI()); 7958 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 7959 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 7960 EPI.MainLoopIterationCountCheck); 7961 7962 // Generate the induction variable. 7963 OldInduction = Legal->getPrimaryInduction(); 7964 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 7965 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 7966 Value *StartIdx = EPResumeVal; 7967 Induction = 7968 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 7969 getDebugLocFromInstOrOperands(OldInduction)); 7970 7971 // Generate induction resume values. These variables save the new starting 7972 // indexes for the scalar loop. They are used to test if there are any tail 7973 // iterations left once the vector loop has completed. 7974 // Note that when the vectorized epilogue is skipped due to iteration count 7975 // check, then the resume value for the induction variable comes from 7976 // the trip count of the main vector loop, hence passing the AdditionalBypass 7977 // argument. 7978 createInductionResumeValues(Lp, CountRoundDown, 7979 {VecEpilogueIterationCountCheck, 7980 EPI.VectorTripCount} /* AdditionalBypass */); 7981 7982 AddRuntimeUnrollDisableMetaData(Lp); 7983 return completeLoopSkeleton(Lp, OrigLoopID); 7984 } 7985 7986 BasicBlock * 7987 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 7988 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 7989 7990 assert(EPI.TripCount && 7991 "Expected trip count to have been safed in the first pass."); 7992 assert( 7993 (!isa<Instruction>(EPI.TripCount) || 7994 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 7995 "saved trip count does not dominate insertion point."); 7996 Value *TC = EPI.TripCount; 7997 IRBuilder<> Builder(Insert->getTerminator()); 7998 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 7999 8000 // Generate code to check if the loop's trip count is less than VF * UF of the 8001 // vector epilogue loop. 8002 auto P = 8003 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8004 8005 Value *CheckMinIters = Builder.CreateICmp( 8006 P, Count, 8007 ConstantInt::get(Count->getType(), 8008 EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF), 8009 "min.epilog.iters.check"); 8010 8011 ReplaceInstWithInst( 8012 Insert->getTerminator(), 8013 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8014 8015 LoopBypassBlocks.push_back(Insert); 8016 return Insert; 8017 } 8018 8019 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8020 LLVM_DEBUG({ 8021 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8022 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 8023 << ", Main Loop UF:" << EPI.MainLoopUF 8024 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 8025 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8026 }); 8027 } 8028 8029 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8030 DEBUG_WITH_TYPE(VerboseDebug, { 8031 dbgs() << "final fn:\n" << *Induction->getFunction() << "\n"; 8032 }); 8033 } 8034 8035 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8036 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8037 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8038 bool PredicateAtRangeStart = Predicate(Range.Start); 8039 8040 for (ElementCount TmpVF = Range.Start * 2; 8041 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8042 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8043 Range.End = TmpVF; 8044 break; 8045 } 8046 8047 return PredicateAtRangeStart; 8048 } 8049 8050 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8051 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8052 /// of VF's starting at a given VF and extending it as much as possible. Each 8053 /// vectorization decision can potentially shorten this sub-range during 8054 /// buildVPlan(). 8055 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8056 ElementCount MaxVF) { 8057 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8058 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8059 VFRange SubRange = {VF, MaxVFPlusOne}; 8060 VPlans.push_back(buildVPlan(SubRange)); 8061 VF = SubRange.End; 8062 } 8063 } 8064 8065 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8066 VPlanPtr &Plan) { 8067 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8068 8069 // Look for cached value. 8070 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8071 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8072 if (ECEntryIt != EdgeMaskCache.end()) 8073 return ECEntryIt->second; 8074 8075 VPValue *SrcMask = createBlockInMask(Src, Plan); 8076 8077 // The terminator has to be a branch inst! 8078 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8079 assert(BI && "Unexpected terminator found"); 8080 8081 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8082 return EdgeMaskCache[Edge] = SrcMask; 8083 8084 // If source is an exiting block, we know the exit edge is dynamically dead 8085 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8086 // adding uses of an otherwise potentially dead instruction. 8087 if (OrigLoop->isLoopExiting(Src)) 8088 return EdgeMaskCache[Edge] = SrcMask; 8089 8090 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8091 assert(EdgeMask && "No Edge Mask found for condition"); 8092 8093 if (BI->getSuccessor(0) != Dst) 8094 EdgeMask = Builder.createNot(EdgeMask); 8095 8096 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8097 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8098 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8099 // The select version does not introduce new UB if SrcMask is false and 8100 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8101 VPValue *False = Plan->getOrAddVPValue( 8102 ConstantInt::getFalse(BI->getCondition()->getType())); 8103 EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False); 8104 } 8105 8106 return EdgeMaskCache[Edge] = EdgeMask; 8107 } 8108 8109 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8110 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8111 8112 // Look for cached value. 8113 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8114 if (BCEntryIt != BlockMaskCache.end()) 8115 return BCEntryIt->second; 8116 8117 // All-one mask is modelled as no-mask following the convention for masked 8118 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8119 VPValue *BlockMask = nullptr; 8120 8121 if (OrigLoop->getHeader() == BB) { 8122 if (!CM.blockNeedsPredication(BB)) 8123 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8124 8125 // Create the block in mask as the first non-phi instruction in the block. 8126 VPBuilder::InsertPointGuard Guard(Builder); 8127 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8128 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8129 8130 // Introduce the early-exit compare IV <= BTC to form header block mask. 8131 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8132 // Start by constructing the desired canonical IV. 8133 VPValue *IV = nullptr; 8134 if (Legal->getPrimaryInduction()) 8135 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8136 else { 8137 auto IVRecipe = new VPWidenCanonicalIVRecipe(); 8138 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8139 IV = IVRecipe->getVPValue(); 8140 } 8141 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8142 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8143 8144 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8145 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8146 // as a second argument, we only pass the IV here and extract the 8147 // tripcount from the transform state where codegen of the VP instructions 8148 // happen. 8149 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8150 } else { 8151 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8152 } 8153 return BlockMaskCache[BB] = BlockMask; 8154 } 8155 8156 // This is the block mask. We OR all incoming edges. 8157 for (auto *Predecessor : predecessors(BB)) { 8158 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8159 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8160 return BlockMaskCache[BB] = EdgeMask; 8161 8162 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8163 BlockMask = EdgeMask; 8164 continue; 8165 } 8166 8167 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8168 } 8169 8170 return BlockMaskCache[BB] = BlockMask; 8171 } 8172 8173 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range, 8174 VPlanPtr &Plan) { 8175 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8176 "Must be called with either a load or store"); 8177 8178 auto willWiden = [&](ElementCount VF) -> bool { 8179 if (VF.isScalar()) 8180 return false; 8181 LoopVectorizationCostModel::InstWidening Decision = 8182 CM.getWideningDecision(I, VF); 8183 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8184 "CM decision should be taken at this point."); 8185 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8186 return true; 8187 if (CM.isScalarAfterVectorization(I, VF) || 8188 CM.isProfitableToScalarize(I, VF)) 8189 return false; 8190 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8191 }; 8192 8193 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8194 return nullptr; 8195 8196 VPValue *Mask = nullptr; 8197 if (Legal->isMaskRequired(I)) 8198 Mask = createBlockInMask(I->getParent(), Plan); 8199 8200 VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I)); 8201 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8202 return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask); 8203 8204 StoreInst *Store = cast<StoreInst>(I); 8205 VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand()); 8206 return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask); 8207 } 8208 8209 VPWidenIntOrFpInductionRecipe * 8210 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, VPlan &Plan) const { 8211 // Check if this is an integer or fp induction. If so, build the recipe that 8212 // produces its scalar and vector values. 8213 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8214 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8215 II.getKind() == InductionDescriptor::IK_FpInduction) { 8216 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8217 const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); 8218 return new VPWidenIntOrFpInductionRecipe( 8219 Phi, Start, Casts.empty() ? nullptr : Casts.front()); 8220 } 8221 8222 return nullptr; 8223 } 8224 8225 VPWidenIntOrFpInductionRecipe * 8226 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I, VFRange &Range, 8227 VPlan &Plan) const { 8228 // Optimize the special case where the source is a constant integer 8229 // induction variable. Notice that we can only optimize the 'trunc' case 8230 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8231 // (c) other casts depend on pointer size. 8232 8233 // Determine whether \p K is a truncation based on an induction variable that 8234 // can be optimized. 8235 auto isOptimizableIVTruncate = 8236 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8237 return [=](ElementCount VF) -> bool { 8238 return CM.isOptimizableIVTruncate(K, VF); 8239 }; 8240 }; 8241 8242 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8243 isOptimizableIVTruncate(I), Range)) { 8244 8245 InductionDescriptor II = 8246 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 8247 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8248 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8249 Start, nullptr, I); 8250 } 8251 return nullptr; 8252 } 8253 8254 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) { 8255 // We know that all PHIs in non-header blocks are converted into selects, so 8256 // we don't have to worry about the insertion order and we can just use the 8257 // builder. At this point we generate the predication tree. There may be 8258 // duplications since this is a simple recursive scan, but future 8259 // optimizations will clean it up. 8260 8261 SmallVector<VPValue *, 2> Operands; 8262 unsigned NumIncoming = Phi->getNumIncomingValues(); 8263 for (unsigned In = 0; In < NumIncoming; In++) { 8264 VPValue *EdgeMask = 8265 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8266 assert((EdgeMask || NumIncoming == 1) && 8267 "Multiple predecessors with one having a full mask"); 8268 Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In))); 8269 if (EdgeMask) 8270 Operands.push_back(EdgeMask); 8271 } 8272 return new VPBlendRecipe(Phi, Operands); 8273 } 8274 8275 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range, 8276 VPlan &Plan) const { 8277 8278 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8279 [this, CI](ElementCount VF) { 8280 return CM.isScalarWithPredication(CI, VF); 8281 }, 8282 Range); 8283 8284 if (IsPredicated) 8285 return nullptr; 8286 8287 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8288 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8289 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8290 ID == Intrinsic::pseudoprobe || 8291 ID == Intrinsic::experimental_noalias_scope_decl)) 8292 return nullptr; 8293 8294 auto willWiden = [&](ElementCount VF) -> bool { 8295 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8296 // The following case may be scalarized depending on the VF. 8297 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8298 // version of the instruction. 8299 // Is it beneficial to perform intrinsic call compared to lib call? 8300 bool NeedToScalarize = false; 8301 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8302 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8303 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8304 assert(IntrinsicCost.isValid() && CallCost.isValid() && 8305 "Cannot have invalid costs while widening"); 8306 return UseVectorIntrinsic || !NeedToScalarize; 8307 }; 8308 8309 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8310 return nullptr; 8311 8312 return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands())); 8313 } 8314 8315 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8316 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8317 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8318 // Instruction should be widened, unless it is scalar after vectorization, 8319 // scalarization is profitable or it is predicated. 8320 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8321 return CM.isScalarAfterVectorization(I, VF) || 8322 CM.isProfitableToScalarize(I, VF) || 8323 CM.isScalarWithPredication(I, VF); 8324 }; 8325 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8326 Range); 8327 } 8328 8329 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const { 8330 auto IsVectorizableOpcode = [](unsigned Opcode) { 8331 switch (Opcode) { 8332 case Instruction::Add: 8333 case Instruction::And: 8334 case Instruction::AShr: 8335 case Instruction::BitCast: 8336 case Instruction::FAdd: 8337 case Instruction::FCmp: 8338 case Instruction::FDiv: 8339 case Instruction::FMul: 8340 case Instruction::FNeg: 8341 case Instruction::FPExt: 8342 case Instruction::FPToSI: 8343 case Instruction::FPToUI: 8344 case Instruction::FPTrunc: 8345 case Instruction::FRem: 8346 case Instruction::FSub: 8347 case Instruction::ICmp: 8348 case Instruction::IntToPtr: 8349 case Instruction::LShr: 8350 case Instruction::Mul: 8351 case Instruction::Or: 8352 case Instruction::PtrToInt: 8353 case Instruction::SDiv: 8354 case Instruction::Select: 8355 case Instruction::SExt: 8356 case Instruction::Shl: 8357 case Instruction::SIToFP: 8358 case Instruction::SRem: 8359 case Instruction::Sub: 8360 case Instruction::Trunc: 8361 case Instruction::UDiv: 8362 case Instruction::UIToFP: 8363 case Instruction::URem: 8364 case Instruction::Xor: 8365 case Instruction::ZExt: 8366 return true; 8367 } 8368 return false; 8369 }; 8370 8371 if (!IsVectorizableOpcode(I->getOpcode())) 8372 return nullptr; 8373 8374 // Success: widen this instruction. 8375 return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands())); 8376 } 8377 8378 VPBasicBlock *VPRecipeBuilder::handleReplication( 8379 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8380 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 8381 VPlanPtr &Plan) { 8382 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8383 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8384 Range); 8385 8386 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8387 [&](ElementCount VF) { return CM.isScalarWithPredication(I, VF); }, 8388 Range); 8389 8390 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8391 IsUniform, IsPredicated); 8392 setRecipe(I, Recipe); 8393 Plan->addVPValue(I, Recipe); 8394 8395 // Find if I uses a predicated instruction. If so, it will use its scalar 8396 // value. Avoid hoisting the insert-element which packs the scalar value into 8397 // a vector value, as that happens iff all users use the vector value. 8398 for (auto &Op : I->operands()) 8399 if (auto *PredInst = dyn_cast<Instruction>(Op)) 8400 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 8401 PredInst2Recipe[PredInst]->setAlsoPack(false); 8402 8403 // Finalize the recipe for Instr, first if it is not predicated. 8404 if (!IsPredicated) { 8405 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8406 VPBB->appendRecipe(Recipe); 8407 return VPBB; 8408 } 8409 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8410 assert(VPBB->getSuccessors().empty() && 8411 "VPBB has successors when handling predicated replication."); 8412 // Record predicated instructions for above packing optimizations. 8413 PredInst2Recipe[I] = Recipe; 8414 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8415 VPBlockUtils::insertBlockAfter(Region, VPBB); 8416 auto *RegSucc = new VPBasicBlock(); 8417 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8418 return RegSucc; 8419 } 8420 8421 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8422 VPRecipeBase *PredRecipe, 8423 VPlanPtr &Plan) { 8424 // Instructions marked for predication are replicated and placed under an 8425 // if-then construct to prevent side-effects. 8426 8427 // Generate recipes to compute the block mask for this region. 8428 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8429 8430 // Build the triangular if-then region. 8431 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8432 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8433 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8434 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8435 auto *PHIRecipe = Instr->getType()->isVoidTy() 8436 ? nullptr 8437 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8438 if (PHIRecipe) { 8439 Plan->removeVPValueFor(Instr); 8440 Plan->addVPValue(Instr, PHIRecipe); 8441 } 8442 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8443 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8444 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8445 8446 // Note: first set Entry as region entry and then connect successors starting 8447 // from it in order, to propagate the "parent" of each VPBasicBlock. 8448 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8449 VPBlockUtils::connectBlocks(Pred, Exit); 8450 8451 return Region; 8452 } 8453 8454 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8455 VFRange &Range, 8456 VPlanPtr &Plan) { 8457 // First, check for specific widening recipes that deal with calls, memory 8458 // operations, inductions and Phi nodes. 8459 if (auto *CI = dyn_cast<CallInst>(Instr)) 8460 return tryToWidenCall(CI, Range, *Plan); 8461 8462 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8463 return tryToWidenMemory(Instr, Range, Plan); 8464 8465 VPRecipeBase *Recipe; 8466 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8467 if (Phi->getParent() != OrigLoop->getHeader()) 8468 return tryToBlend(Phi, Plan); 8469 if ((Recipe = tryToOptimizeInductionPHI(Phi, *Plan))) 8470 return Recipe; 8471 8472 if (Legal->isReductionVariable(Phi)) { 8473 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 8474 VPValue *StartV = 8475 Plan->getOrAddVPValue(RdxDesc.getRecurrenceStartValue()); 8476 return new VPWidenPHIRecipe(Phi, RdxDesc, *StartV); 8477 } 8478 8479 return new VPWidenPHIRecipe(Phi); 8480 } 8481 8482 if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate( 8483 cast<TruncInst>(Instr), Range, *Plan))) 8484 return Recipe; 8485 8486 if (!shouldWiden(Instr, Range)) 8487 return nullptr; 8488 8489 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8490 return new VPWidenGEPRecipe(GEP, Plan->mapToVPValues(GEP->operands()), 8491 OrigLoop); 8492 8493 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8494 bool InvariantCond = 8495 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8496 return new VPWidenSelectRecipe(*SI, Plan->mapToVPValues(SI->operands()), 8497 InvariantCond); 8498 } 8499 8500 return tryToWiden(Instr, *Plan); 8501 } 8502 8503 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8504 ElementCount MaxVF) { 8505 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8506 8507 // Collect instructions from the original loop that will become trivially dead 8508 // in the vectorized loop. We don't need to vectorize these instructions. For 8509 // example, original induction update instructions can become dead because we 8510 // separately emit induction "steps" when generating code for the new loop. 8511 // Similarly, we create a new latch condition when setting up the structure 8512 // of the new loop, so the old one can become dead. 8513 SmallPtrSet<Instruction *, 4> DeadInstructions; 8514 collectTriviallyDeadInstructions(DeadInstructions); 8515 8516 // Add assume instructions we need to drop to DeadInstructions, to prevent 8517 // them from being added to the VPlan. 8518 // TODO: We only need to drop assumes in blocks that get flattend. If the 8519 // control flow is preserved, we should keep them. 8520 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8521 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8522 8523 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8524 // Dead instructions do not need sinking. Remove them from SinkAfter. 8525 for (Instruction *I : DeadInstructions) 8526 SinkAfter.erase(I); 8527 8528 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8529 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8530 VFRange SubRange = {VF, MaxVFPlusOne}; 8531 VPlans.push_back( 8532 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8533 VF = SubRange.End; 8534 } 8535 } 8536 8537 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8538 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8539 const DenseMap<Instruction *, Instruction *> &SinkAfter) { 8540 8541 // Hold a mapping from predicated instructions to their recipes, in order to 8542 // fix their AlsoPack behavior if a user is determined to replicate and use a 8543 // scalar instead of vector value. 8544 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 8545 8546 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8547 8548 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8549 8550 // --------------------------------------------------------------------------- 8551 // Pre-construction: record ingredients whose recipes we'll need to further 8552 // process after constructing the initial VPlan. 8553 // --------------------------------------------------------------------------- 8554 8555 // Mark instructions we'll need to sink later and their targets as 8556 // ingredients whose recipe we'll need to record. 8557 for (auto &Entry : SinkAfter) { 8558 RecipeBuilder.recordRecipeOf(Entry.first); 8559 RecipeBuilder.recordRecipeOf(Entry.second); 8560 } 8561 for (auto &Reduction : CM.getInLoopReductionChains()) { 8562 PHINode *Phi = Reduction.first; 8563 RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); 8564 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8565 8566 RecipeBuilder.recordRecipeOf(Phi); 8567 for (auto &R : ReductionOperations) { 8568 RecipeBuilder.recordRecipeOf(R); 8569 // For min/max reducitons, where we have a pair of icmp/select, we also 8570 // need to record the ICmp recipe, so it can be removed later. 8571 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8572 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8573 } 8574 } 8575 8576 // For each interleave group which is relevant for this (possibly trimmed) 8577 // Range, add it to the set of groups to be later applied to the VPlan and add 8578 // placeholders for its members' Recipes which we'll be replacing with a 8579 // single VPInterleaveRecipe. 8580 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8581 auto applyIG = [IG, this](ElementCount VF) -> bool { 8582 return (VF.isVector() && // Query is illegal for VF == 1 8583 CM.getWideningDecision(IG->getInsertPos(), VF) == 8584 LoopVectorizationCostModel::CM_Interleave); 8585 }; 8586 if (!getDecisionAndClampRange(applyIG, Range)) 8587 continue; 8588 InterleaveGroups.insert(IG); 8589 for (unsigned i = 0; i < IG->getFactor(); i++) 8590 if (Instruction *Member = IG->getMember(i)) 8591 RecipeBuilder.recordRecipeOf(Member); 8592 }; 8593 8594 // --------------------------------------------------------------------------- 8595 // Build initial VPlan: Scan the body of the loop in a topological order to 8596 // visit each basic block after having visited its predecessor basic blocks. 8597 // --------------------------------------------------------------------------- 8598 8599 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 8600 auto Plan = std::make_unique<VPlan>(); 8601 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 8602 Plan->setEntry(VPBB); 8603 8604 // Scan the body of the loop in a topological order to visit each basic block 8605 // after having visited its predecessor basic blocks. 8606 LoopBlocksDFS DFS(OrigLoop); 8607 DFS.perform(LI); 8608 8609 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8610 // Relevant instructions from basic block BB will be grouped into VPRecipe 8611 // ingredients and fill a new VPBasicBlock. 8612 unsigned VPBBsForBB = 0; 8613 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 8614 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 8615 VPBB = FirstVPBBForBB; 8616 Builder.setInsertPoint(VPBB); 8617 8618 // Introduce each ingredient into VPlan. 8619 // TODO: Model and preserve debug instrinsics in VPlan. 8620 for (Instruction &I : BB->instructionsWithoutDebug()) { 8621 Instruction *Instr = &I; 8622 8623 // First filter out irrelevant instructions, to ensure no recipes are 8624 // built for them. 8625 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 8626 continue; 8627 8628 if (auto Recipe = 8629 RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) { 8630 8631 // VPBlendRecipes with a single incoming (value, mask) pair are no-ops. 8632 // Use the incoming value directly. 8633 if (isa<VPBlendRecipe>(Recipe) && Recipe->getNumOperands() <= 2) { 8634 Plan->removeVPValueFor(Instr); 8635 Plan->addVPValue(Instr, Recipe->getOperand(0)); 8636 delete Recipe; 8637 continue; 8638 } 8639 for (auto *Def : Recipe->definedValues()) { 8640 auto *UV = Def->getUnderlyingValue(); 8641 Plan->addVPValue(UV, Def); 8642 } 8643 8644 RecipeBuilder.setRecipe(Instr, Recipe); 8645 VPBB->appendRecipe(Recipe); 8646 continue; 8647 } 8648 8649 // Otherwise, if all widening options failed, Instruction is to be 8650 // replicated. This may create a successor for VPBB. 8651 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication( 8652 Instr, Range, VPBB, PredInst2Recipe, Plan); 8653 if (NextVPBB != VPBB) { 8654 VPBB = NextVPBB; 8655 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8656 : ""); 8657 } 8658 } 8659 } 8660 8661 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 8662 // may also be empty, such as the last one VPBB, reflecting original 8663 // basic-blocks with no recipes. 8664 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 8665 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 8666 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 8667 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 8668 delete PreEntry; 8669 8670 // --------------------------------------------------------------------------- 8671 // Transform initial VPlan: Apply previously taken decisions, in order, to 8672 // bring the VPlan to its final state. 8673 // --------------------------------------------------------------------------- 8674 8675 // Apply Sink-After legal constraints. 8676 for (auto &Entry : SinkAfter) { 8677 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 8678 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 8679 // If the target is in a replication region, make sure to move Sink to the 8680 // block after it, not into the replication region itself. 8681 if (auto *Region = 8682 dyn_cast_or_null<VPRegionBlock>(Target->getParent()->getParent())) { 8683 if (Region->isReplicator()) { 8684 assert(Region->getNumSuccessors() == 1 && "Expected SESE region!"); 8685 VPBasicBlock *NextBlock = 8686 cast<VPBasicBlock>(Region->getSuccessors().front()); 8687 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 8688 continue; 8689 } 8690 } 8691 Sink->moveAfter(Target); 8692 } 8693 8694 // Interleave memory: for each Interleave Group we marked earlier as relevant 8695 // for this VPlan, replace the Recipes widening its memory instructions with a 8696 // single VPInterleaveRecipe at its insertion point. 8697 for (auto IG : InterleaveGroups) { 8698 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 8699 RecipeBuilder.getRecipe(IG->getInsertPos())); 8700 SmallVector<VPValue *, 4> StoredValues; 8701 for (unsigned i = 0; i < IG->getFactor(); ++i) 8702 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) 8703 StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0))); 8704 8705 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 8706 Recipe->getMask()); 8707 VPIG->insertBefore(Recipe); 8708 unsigned J = 0; 8709 for (unsigned i = 0; i < IG->getFactor(); ++i) 8710 if (Instruction *Member = IG->getMember(i)) { 8711 if (!Member->getType()->isVoidTy()) { 8712 VPValue *OriginalV = Plan->getVPValue(Member); 8713 Plan->removeVPValueFor(Member); 8714 Plan->addVPValue(Member, VPIG->getVPValue(J)); 8715 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 8716 J++; 8717 } 8718 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 8719 } 8720 } 8721 8722 // Adjust the recipes for any inloop reductions. 8723 if (Range.Start.isVector()) 8724 adjustRecipesForInLoopReductions(Plan, RecipeBuilder); 8725 8726 // Finally, if tail is folded by masking, introduce selects between the phi 8727 // and the live-out instruction of each reduction, at the end of the latch. 8728 if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) { 8729 Builder.setInsertPoint(VPBB); 8730 auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 8731 for (auto &Reduction : Legal->getReductionVars()) { 8732 if (CM.isInLoopReduction(Reduction.first)) 8733 continue; 8734 VPValue *Phi = Plan->getOrAddVPValue(Reduction.first); 8735 VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr()); 8736 Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi}); 8737 } 8738 } 8739 8740 std::string PlanName; 8741 raw_string_ostream RSO(PlanName); 8742 ElementCount VF = Range.Start; 8743 Plan->addVF(VF); 8744 RSO << "Initial VPlan for VF={" << VF; 8745 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 8746 Plan->addVF(VF); 8747 RSO << "," << VF; 8748 } 8749 RSO << "},UF>=1"; 8750 RSO.flush(); 8751 Plan->setName(PlanName); 8752 8753 return Plan; 8754 } 8755 8756 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 8757 // Outer loop handling: They may require CFG and instruction level 8758 // transformations before even evaluating whether vectorization is profitable. 8759 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 8760 // the vectorization pipeline. 8761 assert(!OrigLoop->isInnermost()); 8762 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 8763 8764 // Create new empty VPlan 8765 auto Plan = std::make_unique<VPlan>(); 8766 8767 // Build hierarchical CFG 8768 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 8769 HCFGBuilder.buildHierarchicalCFG(); 8770 8771 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 8772 VF *= 2) 8773 Plan->addVF(VF); 8774 8775 if (EnableVPlanPredication) { 8776 VPlanPredicator VPP(*Plan); 8777 VPP.predicate(); 8778 8779 // Avoid running transformation to recipes until masked code generation in 8780 // VPlan-native path is in place. 8781 return Plan; 8782 } 8783 8784 SmallPtrSet<Instruction *, 1> DeadInstructions; 8785 VPlanTransforms::VPInstructionsToVPRecipes( 8786 OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions); 8787 return Plan; 8788 } 8789 8790 // Adjust the recipes for any inloop reductions. The chain of instructions 8791 // leading from the loop exit instr to the phi need to be converted to 8792 // reductions, with one operand being vector and the other being the scalar 8793 // reduction chain. 8794 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions( 8795 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) { 8796 for (auto &Reduction : CM.getInLoopReductionChains()) { 8797 PHINode *Phi = Reduction.first; 8798 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 8799 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8800 8801 // ReductionOperations are orders top-down from the phi's use to the 8802 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 8803 // which of the two operands will remain scalar and which will be reduced. 8804 // For minmax the chain will be the select instructions. 8805 Instruction *Chain = Phi; 8806 for (Instruction *R : ReductionOperations) { 8807 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 8808 RecurKind Kind = RdxDesc.getRecurrenceKind(); 8809 8810 VPValue *ChainOp = Plan->getVPValue(Chain); 8811 unsigned FirstOpId; 8812 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 8813 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 8814 "Expected to replace a VPWidenSelectSC"); 8815 FirstOpId = 1; 8816 } else { 8817 assert(isa<VPWidenRecipe>(WidenRecipe) && 8818 "Expected to replace a VPWidenSC"); 8819 FirstOpId = 0; 8820 } 8821 unsigned VecOpId = 8822 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 8823 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 8824 8825 auto *CondOp = CM.foldTailByMasking() 8826 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 8827 : nullptr; 8828 VPReductionRecipe *RedRecipe = new VPReductionRecipe( 8829 &RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 8830 WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe); 8831 Plan->removeVPValueFor(R); 8832 Plan->addVPValue(R, RedRecipe); 8833 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 8834 WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe); 8835 WidenRecipe->eraseFromParent(); 8836 8837 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 8838 VPRecipeBase *CompareRecipe = 8839 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 8840 assert(isa<VPWidenRecipe>(CompareRecipe) && 8841 "Expected to replace a VPWidenSC"); 8842 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 8843 "Expected no remaining users"); 8844 CompareRecipe->eraseFromParent(); 8845 } 8846 Chain = R; 8847 } 8848 } 8849 } 8850 8851 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 8852 VPSlotTracker &SlotTracker) const { 8853 O << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 8854 IG->getInsertPos()->printAsOperand(O, false); 8855 O << ", "; 8856 getAddr()->printAsOperand(O, SlotTracker); 8857 VPValue *Mask = getMask(); 8858 if (Mask) { 8859 O << ", "; 8860 Mask->printAsOperand(O, SlotTracker); 8861 } 8862 for (unsigned i = 0; i < IG->getFactor(); ++i) 8863 if (Instruction *I = IG->getMember(i)) 8864 O << "\\l\" +\n" << Indent << "\" " << VPlanIngredient(I) << " " << i; 8865 } 8866 8867 void VPWidenCallRecipe::execute(VPTransformState &State) { 8868 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 8869 *this, State); 8870 } 8871 8872 void VPWidenSelectRecipe::execute(VPTransformState &State) { 8873 State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), 8874 this, *this, InvariantCond, State); 8875 } 8876 8877 void VPWidenRecipe::execute(VPTransformState &State) { 8878 State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State); 8879 } 8880 8881 void VPWidenGEPRecipe::execute(VPTransformState &State) { 8882 State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, 8883 *this, State.UF, State.VF, IsPtrLoopInvariant, 8884 IsIndexLoopInvariant, State); 8885 } 8886 8887 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 8888 assert(!State.Instance && "Int or FP induction being replicated."); 8889 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 8890 getTruncInst(), getVPValue(0), 8891 getCastValue(), State); 8892 } 8893 8894 void VPWidenPHIRecipe::execute(VPTransformState &State) { 8895 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), RdxDesc, 8896 getStartValue(), this, State); 8897 } 8898 8899 void VPBlendRecipe::execute(VPTransformState &State) { 8900 State.ILV->setDebugLocFromInst(State.Builder, Phi); 8901 // We know that all PHIs in non-header blocks are converted into 8902 // selects, so we don't have to worry about the insertion order and we 8903 // can just use the builder. 8904 // At this point we generate the predication tree. There may be 8905 // duplications since this is a simple recursive scan, but future 8906 // optimizations will clean it up. 8907 8908 unsigned NumIncoming = getNumIncomingValues(); 8909 8910 // Generate a sequence of selects of the form: 8911 // SELECT(Mask3, In3, 8912 // SELECT(Mask2, In2, 8913 // SELECT(Mask1, In1, 8914 // In0))) 8915 // Note that Mask0 is never used: lanes for which no path reaches this phi and 8916 // are essentially undef are taken from In0. 8917 InnerLoopVectorizer::VectorParts Entry(State.UF); 8918 for (unsigned In = 0; In < NumIncoming; ++In) { 8919 for (unsigned Part = 0; Part < State.UF; ++Part) { 8920 // We might have single edge PHIs (blocks) - use an identity 8921 // 'select' for the first PHI operand. 8922 Value *In0 = State.get(getIncomingValue(In), Part); 8923 if (In == 0) 8924 Entry[Part] = In0; // Initialize with the first incoming value. 8925 else { 8926 // Select between the current value and the previous incoming edge 8927 // based on the incoming mask. 8928 Value *Cond = State.get(getMask(In), Part); 8929 Entry[Part] = 8930 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 8931 } 8932 } 8933 } 8934 for (unsigned Part = 0; Part < State.UF; ++Part) 8935 State.set(this, Entry[Part], Part); 8936 } 8937 8938 void VPInterleaveRecipe::execute(VPTransformState &State) { 8939 assert(!State.Instance && "Interleave group being replicated."); 8940 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 8941 getStoredValues(), getMask()); 8942 } 8943 8944 void VPReductionRecipe::execute(VPTransformState &State) { 8945 assert(!State.Instance && "Reduction being replicated."); 8946 for (unsigned Part = 0; Part < State.UF; ++Part) { 8947 RecurKind Kind = RdxDesc->getRecurrenceKind(); 8948 Value *NewVecOp = State.get(getVecOp(), Part); 8949 if (VPValue *Cond = getCondOp()) { 8950 Value *NewCond = State.get(Cond, Part); 8951 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 8952 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 8953 Kind, VecTy->getElementType()); 8954 Constant *IdenVec = 8955 ConstantVector::getSplat(VecTy->getElementCount(), Iden); 8956 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 8957 NewVecOp = Select; 8958 } 8959 Value *NewRed = 8960 createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 8961 Value *PrevInChain = State.get(getChainOp(), Part); 8962 Value *NextInChain; 8963 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 8964 NextInChain = 8965 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 8966 NewRed, PrevInChain); 8967 } else { 8968 NextInChain = State.Builder.CreateBinOp( 8969 (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed, 8970 PrevInChain); 8971 } 8972 State.set(this, NextInChain, Part); 8973 } 8974 } 8975 8976 void VPReplicateRecipe::execute(VPTransformState &State) { 8977 if (State.Instance) { // Generate a single instance. 8978 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 8979 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 8980 *State.Instance, IsPredicated, State); 8981 // Insert scalar instance packing it into a vector. 8982 if (AlsoPack && State.VF.isVector()) { 8983 // If we're constructing lane 0, initialize to start from poison. 8984 if (State.Instance->Lane == 0) { 8985 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 8986 Value *Poison = PoisonValue::get( 8987 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 8988 State.set(this, Poison, State.Instance->Part); 8989 } 8990 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 8991 } 8992 return; 8993 } 8994 8995 // Generate scalar instances for all VF lanes of all UF parts, unless the 8996 // instruction is uniform inwhich case generate only the first lane for each 8997 // of the UF parts. 8998 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 8999 assert((!State.VF.isScalable() || IsUniform) && 9000 "Can't scalarize a scalable vector"); 9001 for (unsigned Part = 0; Part < State.UF; ++Part) 9002 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9003 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9004 VPIteration(Part, Lane), IsPredicated, 9005 State); 9006 } 9007 9008 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9009 assert(State.Instance && "Branch on Mask works only on single instance."); 9010 9011 unsigned Part = State.Instance->Part; 9012 unsigned Lane = State.Instance->Lane; 9013 9014 Value *ConditionBit = nullptr; 9015 VPValue *BlockInMask = getMask(); 9016 if (BlockInMask) { 9017 ConditionBit = State.get(BlockInMask, Part); 9018 if (ConditionBit->getType()->isVectorTy()) 9019 ConditionBit = State.Builder.CreateExtractElement( 9020 ConditionBit, State.Builder.getInt32(Lane)); 9021 } else // Block in mask is all-one. 9022 ConditionBit = State.Builder.getTrue(); 9023 9024 // Replace the temporary unreachable terminator with a new conditional branch, 9025 // whose two destinations will be set later when they are created. 9026 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9027 assert(isa<UnreachableInst>(CurrentTerminator) && 9028 "Expected to replace unreachable terminator with conditional branch."); 9029 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9030 CondBr->setSuccessor(0, nullptr); 9031 ReplaceInstWithInst(CurrentTerminator, CondBr); 9032 } 9033 9034 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9035 assert(State.Instance && "Predicated instruction PHI works per instance."); 9036 Instruction *ScalarPredInst = 9037 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9038 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9039 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9040 assert(PredicatingBB && "Predicated block has no single predecessor."); 9041 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9042 "operand must be VPReplicateRecipe"); 9043 9044 // By current pack/unpack logic we need to generate only a single phi node: if 9045 // a vector value for the predicated instruction exists at this point it means 9046 // the instruction has vector users only, and a phi for the vector value is 9047 // needed. In this case the recipe of the predicated instruction is marked to 9048 // also do that packing, thereby "hoisting" the insert-element sequence. 9049 // Otherwise, a phi node for the scalar value is needed. 9050 unsigned Part = State.Instance->Part; 9051 if (State.hasVectorValue(getOperand(0), Part)) { 9052 Value *VectorValue = State.get(getOperand(0), Part); 9053 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9054 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9055 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9056 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9057 if (State.hasVectorValue(this, Part)) 9058 State.reset(this, VPhi, Part); 9059 else 9060 State.set(this, VPhi, Part); 9061 // NOTE: Currently we need to update the value of the operand, so the next 9062 // predicated iteration inserts its generated value in the correct vector. 9063 State.reset(getOperand(0), VPhi, Part); 9064 } else { 9065 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9066 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9067 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9068 PredicatingBB); 9069 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9070 if (State.hasScalarValue(this, *State.Instance)) 9071 State.reset(this, Phi, *State.Instance); 9072 else 9073 State.set(this, Phi, *State.Instance); 9074 // NOTE: Currently we need to update the value of the operand, so the next 9075 // predicated iteration inserts its generated value in the correct vector. 9076 State.reset(getOperand(0), Phi, *State.Instance); 9077 } 9078 } 9079 9080 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9081 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9082 State.ILV->vectorizeMemoryInstruction(&Ingredient, State, 9083 StoredValue ? nullptr : getVPValue(), 9084 getAddr(), StoredValue, getMask()); 9085 } 9086 9087 // Determine how to lower the scalar epilogue, which depends on 1) optimising 9088 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 9089 // predication, and 4) a TTI hook that analyses whether the loop is suitable 9090 // for predication. 9091 static ScalarEpilogueLowering getScalarEpilogueLowering( 9092 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 9093 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 9094 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 9095 LoopVectorizationLegality &LVL) { 9096 // 1) OptSize takes precedence over all other options, i.e. if this is set, 9097 // don't look at hints or options, and don't request a scalar epilogue. 9098 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 9099 // LoopAccessInfo (due to code dependency and not being able to reliably get 9100 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 9101 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 9102 // versioning when the vectorization is forced, unlike hasOptSize. So revert 9103 // back to the old way and vectorize with versioning when forced. See D81345.) 9104 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 9105 PGSOQueryType::IRPass) && 9106 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 9107 return CM_ScalarEpilogueNotAllowedOptSize; 9108 9109 // 2) If set, obey the directives 9110 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 9111 switch (PreferPredicateOverEpilogue) { 9112 case PreferPredicateTy::ScalarEpilogue: 9113 return CM_ScalarEpilogueAllowed; 9114 case PreferPredicateTy::PredicateElseScalarEpilogue: 9115 return CM_ScalarEpilogueNotNeededUsePredicate; 9116 case PreferPredicateTy::PredicateOrDontVectorize: 9117 return CM_ScalarEpilogueNotAllowedUsePredicate; 9118 }; 9119 } 9120 9121 // 3) If set, obey the hints 9122 switch (Hints.getPredicate()) { 9123 case LoopVectorizeHints::FK_Enabled: 9124 return CM_ScalarEpilogueNotNeededUsePredicate; 9125 case LoopVectorizeHints::FK_Disabled: 9126 return CM_ScalarEpilogueAllowed; 9127 }; 9128 9129 // 4) if the TTI hook indicates this is profitable, request predication. 9130 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 9131 LVL.getLAI())) 9132 return CM_ScalarEpilogueNotNeededUsePredicate; 9133 9134 return CM_ScalarEpilogueAllowed; 9135 } 9136 9137 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 9138 // If Values have been set for this Def return the one relevant for \p Part. 9139 if (hasVectorValue(Def, Part)) 9140 return Data.PerPartOutput[Def][Part]; 9141 9142 if (!hasScalarValue(Def, {Part, 0})) { 9143 Value *IRV = Def->getLiveInIRValue(); 9144 Value *B = ILV->getBroadcastInstrs(IRV); 9145 set(Def, B, Part); 9146 return B; 9147 } 9148 9149 Value *ScalarValue = get(Def, {Part, 0}); 9150 // If we aren't vectorizing, we can just copy the scalar map values over 9151 // to the vector map. 9152 if (VF.isScalar()) { 9153 set(Def, ScalarValue, Part); 9154 return ScalarValue; 9155 } 9156 9157 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 9158 bool IsUniform = RepR && RepR->isUniform(); 9159 9160 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 9161 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 9162 9163 // Set the insert point after the last scalarized instruction. This 9164 // ensures the insertelement sequence will directly follow the scalar 9165 // definitions. 9166 auto OldIP = Builder.saveIP(); 9167 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 9168 Builder.SetInsertPoint(&*NewIP); 9169 9170 // However, if we are vectorizing, we need to construct the vector values. 9171 // If the value is known to be uniform after vectorization, we can just 9172 // broadcast the scalar value corresponding to lane zero for each unroll 9173 // iteration. Otherwise, we construct the vector values using 9174 // insertelement instructions. Since the resulting vectors are stored in 9175 // State, we will only generate the insertelements once. 9176 Value *VectorValue = nullptr; 9177 if (IsUniform) { 9178 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 9179 set(Def, VectorValue, Part); 9180 } else { 9181 // Initialize packing with insertelements to start from undef. 9182 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 9183 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 9184 set(Def, Undef, Part); 9185 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 9186 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 9187 VectorValue = get(Def, Part); 9188 } 9189 Builder.restoreIP(OldIP); 9190 return VectorValue; 9191 } 9192 9193 // Process the loop in the VPlan-native vectorization path. This path builds 9194 // VPlan upfront in the vectorization pipeline, which allows to apply 9195 // VPlan-to-VPlan transformations from the very beginning without modifying the 9196 // input LLVM IR. 9197 static bool processLoopInVPlanNativePath( 9198 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 9199 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 9200 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 9201 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 9202 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) { 9203 9204 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 9205 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 9206 return false; 9207 } 9208 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 9209 Function *F = L->getHeader()->getParent(); 9210 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 9211 9212 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9213 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 9214 9215 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 9216 &Hints, IAI); 9217 // Use the planner for outer loop vectorization. 9218 // TODO: CM is not used at this point inside the planner. Turn CM into an 9219 // optional argument if we don't need it in the future. 9220 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE); 9221 9222 // Get user vectorization factor. 9223 ElementCount UserVF = Hints.getWidth(); 9224 9225 // Plan how to best vectorize, return the best VF and its cost. 9226 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 9227 9228 // If we are stress testing VPlan builds, do not attempt to generate vector 9229 // code. Masked vector code generation support will follow soon. 9230 // Also, do not attempt to vectorize if no vector code will be produced. 9231 if (VPlanBuildStressTest || EnableVPlanPredication || 9232 VectorizationFactor::Disabled() == VF) 9233 return false; 9234 9235 LVP.setBestPlan(VF.Width, 1); 9236 9237 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 9238 &CM, BFI, PSI); 9239 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 9240 << L->getHeader()->getParent()->getName() << "\"\n"); 9241 LVP.executePlan(LB, DT); 9242 9243 // Mark the loop as already vectorized to avoid vectorizing again. 9244 Hints.setAlreadyVectorized(); 9245 9246 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9247 return true; 9248 } 9249 9250 // Emit a remark if there are stores to floats that required a floating point 9251 // extension. If the vectorized loop was generated with floating point there 9252 // will be a performance penalty from the conversion overhead and the change in 9253 // the vector width. 9254 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 9255 SmallVector<Instruction *, 4> Worklist; 9256 for (BasicBlock *BB : L->getBlocks()) { 9257 for (Instruction &Inst : *BB) { 9258 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 9259 if (S->getValueOperand()->getType()->isFloatTy()) 9260 Worklist.push_back(S); 9261 } 9262 } 9263 } 9264 9265 // Traverse the floating point stores upwards searching, for floating point 9266 // conversions. 9267 SmallPtrSet<const Instruction *, 4> Visited; 9268 SmallPtrSet<const Instruction *, 4> EmittedRemark; 9269 while (!Worklist.empty()) { 9270 auto *I = Worklist.pop_back_val(); 9271 if (!L->contains(I)) 9272 continue; 9273 if (!Visited.insert(I).second) 9274 continue; 9275 9276 // Emit a remark if the floating point store required a floating 9277 // point conversion. 9278 // TODO: More work could be done to identify the root cause such as a 9279 // constant or a function return type and point the user to it. 9280 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 9281 ORE->emit([&]() { 9282 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 9283 I->getDebugLoc(), L->getHeader()) 9284 << "floating point conversion changes vector width. " 9285 << "Mixed floating point precision requires an up/down " 9286 << "cast that will negatively impact performance."; 9287 }); 9288 9289 for (Use &Op : I->operands()) 9290 if (auto *OpI = dyn_cast<Instruction>(Op)) 9291 Worklist.push_back(OpI); 9292 } 9293 } 9294 9295 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 9296 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 9297 !EnableLoopInterleaving), 9298 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 9299 !EnableLoopVectorization) {} 9300 9301 bool LoopVectorizePass::processLoop(Loop *L) { 9302 assert((EnableVPlanNativePath || L->isInnermost()) && 9303 "VPlan-native path is not enabled. Only process inner loops."); 9304 9305 #ifndef NDEBUG 9306 const std::string DebugLocStr = getDebugLocString(L); 9307 #endif /* NDEBUG */ 9308 9309 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 9310 << L->getHeader()->getParent()->getName() << "\" from " 9311 << DebugLocStr << "\n"); 9312 9313 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 9314 9315 LLVM_DEBUG( 9316 dbgs() << "LV: Loop hints:" 9317 << " force=" 9318 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 9319 ? "disabled" 9320 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 9321 ? "enabled" 9322 : "?")) 9323 << " width=" << Hints.getWidth() 9324 << " unroll=" << Hints.getInterleave() << "\n"); 9325 9326 // Function containing loop 9327 Function *F = L->getHeader()->getParent(); 9328 9329 // Looking at the diagnostic output is the only way to determine if a loop 9330 // was vectorized (other than looking at the IR or machine code), so it 9331 // is important to generate an optimization remark for each loop. Most of 9332 // these messages are generated as OptimizationRemarkAnalysis. Remarks 9333 // generated as OptimizationRemark and OptimizationRemarkMissed are 9334 // less verbose reporting vectorized loops and unvectorized loops that may 9335 // benefit from vectorization, respectively. 9336 9337 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 9338 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 9339 return false; 9340 } 9341 9342 PredicatedScalarEvolution PSE(*SE, *L); 9343 9344 // Check if it is legal to vectorize the loop. 9345 LoopVectorizationRequirements Requirements(*ORE); 9346 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 9347 &Requirements, &Hints, DB, AC, BFI, PSI); 9348 if (!LVL.canVectorize(EnableVPlanNativePath)) { 9349 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 9350 Hints.emitRemarkWithHints(); 9351 return false; 9352 } 9353 9354 // Check the function attributes and profiles to find out if this function 9355 // should be optimized for size. 9356 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9357 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 9358 9359 // Entrance to the VPlan-native vectorization path. Outer loops are processed 9360 // here. They may require CFG and instruction level transformations before 9361 // even evaluating whether vectorization is profitable. Since we cannot modify 9362 // the incoming IR, we need to build VPlan upfront in the vectorization 9363 // pipeline. 9364 if (!L->isInnermost()) 9365 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 9366 ORE, BFI, PSI, Hints); 9367 9368 assert(L->isInnermost() && "Inner loop expected."); 9369 9370 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 9371 // count by optimizing for size, to minimize overheads. 9372 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 9373 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 9374 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 9375 << "This loop is worth vectorizing only if no scalar " 9376 << "iteration overheads are incurred."); 9377 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 9378 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 9379 else { 9380 LLVM_DEBUG(dbgs() << "\n"); 9381 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 9382 } 9383 } 9384 9385 // Check the function attributes to see if implicit floats are allowed. 9386 // FIXME: This check doesn't seem possibly correct -- what if the loop is 9387 // an integer loop and the vector instructions selected are purely integer 9388 // vector instructions? 9389 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 9390 reportVectorizationFailure( 9391 "Can't vectorize when the NoImplicitFloat attribute is used", 9392 "loop not vectorized due to NoImplicitFloat attribute", 9393 "NoImplicitFloat", ORE, L); 9394 Hints.emitRemarkWithHints(); 9395 return false; 9396 } 9397 9398 // Check if the target supports potentially unsafe FP vectorization. 9399 // FIXME: Add a check for the type of safety issue (denormal, signaling) 9400 // for the target we're vectorizing for, to make sure none of the 9401 // additional fp-math flags can help. 9402 if (Hints.isPotentiallyUnsafe() && 9403 TTI->isFPVectorizationPotentiallyUnsafe()) { 9404 reportVectorizationFailure( 9405 "Potentially unsafe FP op prevents vectorization", 9406 "loop not vectorized due to unsafe FP support.", 9407 "UnsafeFP", ORE, L); 9408 Hints.emitRemarkWithHints(); 9409 return false; 9410 } 9411 9412 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 9413 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 9414 9415 // If an override option has been passed in for interleaved accesses, use it. 9416 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 9417 UseInterleaved = EnableInterleavedMemAccesses; 9418 9419 // Analyze interleaved memory accesses. 9420 if (UseInterleaved) { 9421 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 9422 } 9423 9424 // Use the cost model. 9425 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 9426 F, &Hints, IAI); 9427 CM.collectValuesToIgnore(); 9428 9429 // Use the planner for vectorization. 9430 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE); 9431 9432 // Get user vectorization factor and interleave count. 9433 ElementCount UserVF = Hints.getWidth(); 9434 unsigned UserIC = Hints.getInterleave(); 9435 9436 // Plan how to best vectorize, return the best VF and its cost. 9437 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 9438 9439 VectorizationFactor VF = VectorizationFactor::Disabled(); 9440 unsigned IC = 1; 9441 9442 if (MaybeVF) { 9443 VF = *MaybeVF; 9444 // Select the interleave count. 9445 IC = CM.selectInterleaveCount(VF.Width, VF.Cost); 9446 } 9447 9448 // Identify the diagnostic messages that should be produced. 9449 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 9450 bool VectorizeLoop = true, InterleaveLoop = true; 9451 if (Requirements.doesNotMeet(F, L, Hints)) { 9452 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 9453 "requirements.\n"); 9454 Hints.emitRemarkWithHints(); 9455 return false; 9456 } 9457 9458 if (VF.Width.isScalar()) { 9459 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 9460 VecDiagMsg = std::make_pair( 9461 "VectorizationNotBeneficial", 9462 "the cost-model indicates that vectorization is not beneficial"); 9463 VectorizeLoop = false; 9464 } 9465 9466 if (!MaybeVF && UserIC > 1) { 9467 // Tell the user interleaving was avoided up-front, despite being explicitly 9468 // requested. 9469 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 9470 "interleaving should be avoided up front\n"); 9471 IntDiagMsg = std::make_pair( 9472 "InterleavingAvoided", 9473 "Ignoring UserIC, because interleaving was avoided up front"); 9474 InterleaveLoop = false; 9475 } else if (IC == 1 && UserIC <= 1) { 9476 // Tell the user interleaving is not beneficial. 9477 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 9478 IntDiagMsg = std::make_pair( 9479 "InterleavingNotBeneficial", 9480 "the cost-model indicates that interleaving is not beneficial"); 9481 InterleaveLoop = false; 9482 if (UserIC == 1) { 9483 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 9484 IntDiagMsg.second += 9485 " and is explicitly disabled or interleave count is set to 1"; 9486 } 9487 } else if (IC > 1 && UserIC == 1) { 9488 // Tell the user interleaving is beneficial, but it explicitly disabled. 9489 LLVM_DEBUG( 9490 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 9491 IntDiagMsg = std::make_pair( 9492 "InterleavingBeneficialButDisabled", 9493 "the cost-model indicates that interleaving is beneficial " 9494 "but is explicitly disabled or interleave count is set to 1"); 9495 InterleaveLoop = false; 9496 } 9497 9498 // Override IC if user provided an interleave count. 9499 IC = UserIC > 0 ? UserIC : IC; 9500 9501 // Emit diagnostic messages, if any. 9502 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 9503 if (!VectorizeLoop && !InterleaveLoop) { 9504 // Do not vectorize or interleaving the loop. 9505 ORE->emit([&]() { 9506 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 9507 L->getStartLoc(), L->getHeader()) 9508 << VecDiagMsg.second; 9509 }); 9510 ORE->emit([&]() { 9511 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 9512 L->getStartLoc(), L->getHeader()) 9513 << IntDiagMsg.second; 9514 }); 9515 return false; 9516 } else if (!VectorizeLoop && InterleaveLoop) { 9517 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 9518 ORE->emit([&]() { 9519 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 9520 L->getStartLoc(), L->getHeader()) 9521 << VecDiagMsg.second; 9522 }); 9523 } else if (VectorizeLoop && !InterleaveLoop) { 9524 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 9525 << ") in " << DebugLocStr << '\n'); 9526 ORE->emit([&]() { 9527 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 9528 L->getStartLoc(), L->getHeader()) 9529 << IntDiagMsg.second; 9530 }); 9531 } else if (VectorizeLoop && InterleaveLoop) { 9532 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 9533 << ") in " << DebugLocStr << '\n'); 9534 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 9535 } 9536 9537 LVP.setBestPlan(VF.Width, IC); 9538 9539 using namespace ore; 9540 bool DisableRuntimeUnroll = false; 9541 MDNode *OrigLoopID = L->getLoopID(); 9542 9543 if (!VectorizeLoop) { 9544 assert(IC > 1 && "interleave count should not be 1 or 0"); 9545 // If we decided that it is not legal to vectorize the loop, then 9546 // interleave it. 9547 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, &CM, 9548 BFI, PSI); 9549 LVP.executePlan(Unroller, DT); 9550 9551 ORE->emit([&]() { 9552 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 9553 L->getHeader()) 9554 << "interleaved loop (interleaved count: " 9555 << NV("InterleaveCount", IC) << ")"; 9556 }); 9557 } else { 9558 // If we decided that it is *legal* to vectorize the loop, then do it. 9559 9560 // Consider vectorizing the epilogue too if it's profitable. 9561 VectorizationFactor EpilogueVF = 9562 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 9563 if (EpilogueVF.Width.isVector()) { 9564 9565 // The first pass vectorizes the main loop and creates a scalar epilogue 9566 // to be vectorized by executing the plan (potentially with a different 9567 // factor) again shortly afterwards. 9568 EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC, 9569 EpilogueVF.Width.getKnownMinValue(), 1); 9570 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, EPI, 9571 &LVL, &CM, BFI, PSI); 9572 9573 LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF); 9574 LVP.executePlan(MainILV, DT); 9575 ++LoopsVectorized; 9576 9577 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 9578 formLCSSARecursively(*L, *DT, LI, SE); 9579 9580 // Second pass vectorizes the epilogue and adjusts the control flow 9581 // edges from the first pass. 9582 LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF); 9583 EPI.MainLoopVF = EPI.EpilogueVF; 9584 EPI.MainLoopUF = EPI.EpilogueUF; 9585 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 9586 ORE, EPI, &LVL, &CM, BFI, PSI); 9587 LVP.executePlan(EpilogILV, DT); 9588 ++LoopsEpilogueVectorized; 9589 9590 if (!MainILV.areSafetyChecksAdded()) 9591 DisableRuntimeUnroll = true; 9592 } else { 9593 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 9594 &LVL, &CM, BFI, PSI); 9595 LVP.executePlan(LB, DT); 9596 ++LoopsVectorized; 9597 9598 // Add metadata to disable runtime unrolling a scalar loop when there are 9599 // no runtime checks about strides and memory. A scalar loop that is 9600 // rarely used is not worth unrolling. 9601 if (!LB.areSafetyChecksAdded()) 9602 DisableRuntimeUnroll = true; 9603 } 9604 9605 // Report the vectorization decision. 9606 ORE->emit([&]() { 9607 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 9608 L->getHeader()) 9609 << "vectorized loop (vectorization width: " 9610 << NV("VectorizationFactor", VF.Width) 9611 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 9612 }); 9613 9614 if (ORE->allowExtraAnalysis(LV_NAME)) 9615 checkMixedPrecision(L, ORE); 9616 } 9617 9618 Optional<MDNode *> RemainderLoopID = 9619 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 9620 LLVMLoopVectorizeFollowupEpilogue}); 9621 if (RemainderLoopID.hasValue()) { 9622 L->setLoopID(RemainderLoopID.getValue()); 9623 } else { 9624 if (DisableRuntimeUnroll) 9625 AddRuntimeUnrollDisableMetaData(L); 9626 9627 // Mark the loop as already vectorized to avoid vectorizing again. 9628 Hints.setAlreadyVectorized(); 9629 } 9630 9631 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9632 return true; 9633 } 9634 9635 LoopVectorizeResult LoopVectorizePass::runImpl( 9636 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 9637 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 9638 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 9639 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 9640 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 9641 SE = &SE_; 9642 LI = &LI_; 9643 TTI = &TTI_; 9644 DT = &DT_; 9645 BFI = &BFI_; 9646 TLI = TLI_; 9647 AA = &AA_; 9648 AC = &AC_; 9649 GetLAA = &GetLAA_; 9650 DB = &DB_; 9651 ORE = &ORE_; 9652 PSI = PSI_; 9653 9654 // Don't attempt if 9655 // 1. the target claims to have no vector registers, and 9656 // 2. interleaving won't help ILP. 9657 // 9658 // The second condition is necessary because, even if the target has no 9659 // vector registers, loop vectorization may still enable scalar 9660 // interleaving. 9661 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 9662 TTI->getMaxInterleaveFactor(1) < 2) 9663 return LoopVectorizeResult(false, false); 9664 9665 bool Changed = false, CFGChanged = false; 9666 9667 // The vectorizer requires loops to be in simplified form. 9668 // Since simplification may add new inner loops, it has to run before the 9669 // legality and profitability checks. This means running the loop vectorizer 9670 // will simplify all loops, regardless of whether anything end up being 9671 // vectorized. 9672 for (auto &L : *LI) 9673 Changed |= CFGChanged |= 9674 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 9675 9676 // Build up a worklist of inner-loops to vectorize. This is necessary as 9677 // the act of vectorizing or partially unrolling a loop creates new loops 9678 // and can invalidate iterators across the loops. 9679 SmallVector<Loop *, 8> Worklist; 9680 9681 for (Loop *L : *LI) 9682 collectSupportedLoops(*L, LI, ORE, Worklist); 9683 9684 LoopsAnalyzed += Worklist.size(); 9685 9686 // Now walk the identified inner loops. 9687 while (!Worklist.empty()) { 9688 Loop *L = Worklist.pop_back_val(); 9689 9690 // For the inner loops we actually process, form LCSSA to simplify the 9691 // transform. 9692 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 9693 9694 Changed |= CFGChanged |= processLoop(L); 9695 } 9696 9697 // Process each loop nest in the function. 9698 return LoopVectorizeResult(Changed, CFGChanged); 9699 } 9700 9701 PreservedAnalyses LoopVectorizePass::run(Function &F, 9702 FunctionAnalysisManager &AM) { 9703 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 9704 auto &LI = AM.getResult<LoopAnalysis>(F); 9705 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 9706 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 9707 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 9708 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 9709 auto &AA = AM.getResult<AAManager>(F); 9710 auto &AC = AM.getResult<AssumptionAnalysis>(F); 9711 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 9712 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 9713 MemorySSA *MSSA = EnableMSSALoopDependency 9714 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 9715 : nullptr; 9716 9717 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 9718 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 9719 [&](Loop &L) -> const LoopAccessInfo & { 9720 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 9721 TLI, TTI, nullptr, MSSA}; 9722 return LAM.getResult<LoopAccessAnalysis>(L, AR); 9723 }; 9724 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 9725 ProfileSummaryInfo *PSI = 9726 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 9727 LoopVectorizeResult Result = 9728 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 9729 if (!Result.MadeAnyChange) 9730 return PreservedAnalyses::all(); 9731 PreservedAnalyses PA; 9732 9733 // We currently do not preserve loopinfo/dominator analyses with outer loop 9734 // vectorization. Until this is addressed, mark these analyses as preserved 9735 // only for non-VPlan-native path. 9736 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 9737 if (!EnableVPlanNativePath) { 9738 PA.preserve<LoopAnalysis>(); 9739 PA.preserve<DominatorTreeAnalysis>(); 9740 } 9741 PA.preserve<BasicAA>(); 9742 PA.preserve<GlobalsAA>(); 9743 if (!Result.MadeCFGChange) 9744 PA.preserveSet<CFGAnalyses>(); 9745 return PA; 9746 } 9747