1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/MemorySSA.h" 91 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 92 #include "llvm/Analysis/ProfileSummaryInfo.h" 93 #include "llvm/Analysis/ScalarEvolution.h" 94 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 95 #include "llvm/Analysis/TargetLibraryInfo.h" 96 #include "llvm/Analysis/TargetTransformInfo.h" 97 #include "llvm/Analysis/VectorUtils.h" 98 #include "llvm/IR/Attributes.h" 99 #include "llvm/IR/BasicBlock.h" 100 #include "llvm/IR/CFG.h" 101 #include "llvm/IR/Constant.h" 102 #include "llvm/IR/Constants.h" 103 #include "llvm/IR/DataLayout.h" 104 #include "llvm/IR/DebugInfoMetadata.h" 105 #include "llvm/IR/DebugLoc.h" 106 #include "llvm/IR/DerivedTypes.h" 107 #include "llvm/IR/DiagnosticInfo.h" 108 #include "llvm/IR/Dominators.h" 109 #include "llvm/IR/Function.h" 110 #include "llvm/IR/IRBuilder.h" 111 #include "llvm/IR/InstrTypes.h" 112 #include "llvm/IR/Instruction.h" 113 #include "llvm/IR/Instructions.h" 114 #include "llvm/IR/IntrinsicInst.h" 115 #include "llvm/IR/Intrinsics.h" 116 #include "llvm/IR/LLVMContext.h" 117 #include "llvm/IR/Metadata.h" 118 #include "llvm/IR/Module.h" 119 #include "llvm/IR/Operator.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 202 // that predication is preferred, and this lists all options. I.e., the 203 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 204 // and predicate the instructions accordingly. If tail-folding fails, there are 205 // different fallback strategies depending on these values: 206 namespace PreferPredicateTy { 207 enum Option { 208 ScalarEpilogue = 0, 209 PredicateElseScalarEpilogue, 210 PredicateOrDontVectorize 211 }; 212 } // namespace PreferPredicateTy 213 214 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 215 "prefer-predicate-over-epilogue", 216 cl::init(PreferPredicateTy::ScalarEpilogue), 217 cl::Hidden, 218 cl::desc("Tail-folding and predication preferences over creating a scalar " 219 "epilogue loop."), 220 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 221 "scalar-epilogue", 222 "Don't tail-predicate loops, create scalar epilogue"), 223 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 224 "predicate-else-scalar-epilogue", 225 "prefer tail-folding, create scalar epilogue if tail " 226 "folding fails."), 227 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 228 "predicate-dont-vectorize", 229 "prefers tail-folding, don't attempt vectorization if " 230 "tail-folding fails."))); 231 232 static cl::opt<bool> MaximizeBandwidth( 233 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 234 cl::desc("Maximize bandwidth when selecting vectorization factor which " 235 "will be determined by the smallest type in loop.")); 236 237 static cl::opt<bool> EnableInterleavedMemAccesses( 238 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 239 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 240 241 /// An interleave-group may need masking if it resides in a block that needs 242 /// predication, or in order to mask away gaps. 243 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 244 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 245 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 246 247 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 248 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 249 cl::desc("We don't interleave loops with a estimated constant trip count " 250 "below this number")); 251 252 static cl::opt<unsigned> ForceTargetNumScalarRegs( 253 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 254 cl::desc("A flag that overrides the target's number of scalar registers.")); 255 256 static cl::opt<unsigned> ForceTargetNumVectorRegs( 257 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 258 cl::desc("A flag that overrides the target's number of vector registers.")); 259 260 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 261 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 262 cl::desc("A flag that overrides the target's max interleave factor for " 263 "scalar loops.")); 264 265 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 266 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "vectorized loops.")); 269 270 static cl::opt<unsigned> ForceTargetInstructionCost( 271 "force-target-instruction-cost", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's expected cost for " 273 "an instruction to a single constant value. Mostly " 274 "useful for getting consistent testing.")); 275 276 static cl::opt<bool> ForceTargetSupportsScalableVectors( 277 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 278 cl::desc( 279 "Pretend that scalable vectors are supported, even if the target does " 280 "not support them. This flag should only be used for testing.")); 281 282 static cl::opt<unsigned> SmallLoopCost( 283 "small-loop-cost", cl::init(20), cl::Hidden, 284 cl::desc( 285 "The cost of a loop that is considered 'small' by the interleaver.")); 286 287 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 288 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 289 cl::desc("Enable the use of the block frequency analysis to access PGO " 290 "heuristics minimizing code growth in cold regions and being more " 291 "aggressive in hot regions.")); 292 293 // Runtime interleave loops for load/store throughput. 294 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 295 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 296 cl::desc( 297 "Enable runtime interleaving until load/store ports are saturated")); 298 299 /// Interleave small loops with scalar reductions. 300 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 301 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 302 cl::desc("Enable interleaving for loops with small iteration counts that " 303 "contain scalar reductions to expose ILP.")); 304 305 /// The number of stores in a loop that are allowed to need predication. 306 static cl::opt<unsigned> NumberOfStoresToPredicate( 307 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 308 cl::desc("Max number of stores to be predicated behind an if.")); 309 310 static cl::opt<bool> EnableIndVarRegisterHeur( 311 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 312 cl::desc("Count the induction variable only once when interleaving")); 313 314 static cl::opt<bool> EnableCondStoresVectorization( 315 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 316 cl::desc("Enable if predication of stores during vectorization.")); 317 318 static cl::opt<unsigned> MaxNestedScalarReductionIC( 319 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 320 cl::desc("The maximum interleave count to use when interleaving a scalar " 321 "reduction in a nested loop.")); 322 323 static cl::opt<bool> 324 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 325 cl::Hidden, 326 cl::desc("Prefer in-loop vector reductions, " 327 "overriding the targets preference.")); 328 329 static cl::opt<bool> PreferPredicatedReductionSelect( 330 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 331 cl::desc( 332 "Prefer predicating a reduction operation over an after loop select.")); 333 334 cl::opt<bool> EnableVPlanNativePath( 335 "enable-vplan-native-path", cl::init(false), cl::Hidden, 336 cl::desc("Enable VPlan-native vectorization path with " 337 "support for outer loop vectorization.")); 338 339 // FIXME: Remove this switch once we have divergence analysis. Currently we 340 // assume divergent non-backedge branches when this switch is true. 341 cl::opt<bool> EnableVPlanPredication( 342 "enable-vplan-predication", cl::init(false), cl::Hidden, 343 cl::desc("Enable VPlan-native vectorization path predicator with " 344 "support for outer loop vectorization.")); 345 346 // This flag enables the stress testing of the VPlan H-CFG construction in the 347 // VPlan-native vectorization path. It must be used in conjuction with 348 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 349 // verification of the H-CFGs built. 350 static cl::opt<bool> VPlanBuildStressTest( 351 "vplan-build-stress-test", cl::init(false), cl::Hidden, 352 cl::desc( 353 "Build VPlan for every supported loop nest in the function and bail " 354 "out right after the build (stress test the VPlan H-CFG construction " 355 "in the VPlan-native vectorization path).")); 356 357 cl::opt<bool> llvm::EnableLoopInterleaving( 358 "interleave-loops", cl::init(true), cl::Hidden, 359 cl::desc("Enable loop interleaving in Loop vectorization passes")); 360 cl::opt<bool> llvm::EnableLoopVectorization( 361 "vectorize-loops", cl::init(true), cl::Hidden, 362 cl::desc("Run the Loop vectorization passes")); 363 364 /// A helper function that returns the type of loaded or stored value. 365 static Type *getMemInstValueType(Value *I) { 366 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 367 "Expected Load or Store instruction"); 368 if (auto *LI = dyn_cast<LoadInst>(I)) 369 return LI->getType(); 370 return cast<StoreInst>(I)->getValueOperand()->getType(); 371 } 372 373 /// A helper function that returns true if the given type is irregular. The 374 /// type is irregular if its allocated size doesn't equal the store size of an 375 /// element of the corresponding vector type at the given vectorization factor. 376 static bool hasIrregularType(Type *Ty, const DataLayout &DL, ElementCount VF) { 377 // Determine if an array of VF elements of type Ty is "bitcast compatible" 378 // with a <VF x Ty> vector. 379 if (VF.isVector()) { 380 auto *VectorTy = VectorType::get(Ty, VF); 381 return TypeSize::get(VF.getKnownMinValue() * 382 DL.getTypeAllocSize(Ty).getFixedValue(), 383 VF.isScalable()) != DL.getTypeStoreSize(VectorTy); 384 } 385 386 // If the vectorization factor is one, we just check if an array of type Ty 387 // requires padding between elements. 388 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 389 } 390 391 /// A helper function that returns the reciprocal of the block probability of 392 /// predicated blocks. If we return X, we are assuming the predicated block 393 /// will execute once for every X iterations of the loop header. 394 /// 395 /// TODO: We should use actual block probability here, if available. Currently, 396 /// we always assume predicated blocks have a 50% chance of executing. 397 static unsigned getReciprocalPredBlockProb() { return 2; } 398 399 /// A helper function that adds a 'fast' flag to floating-point operations. 400 static Value *addFastMathFlag(Value *V) { 401 if (isa<FPMathOperator>(V)) 402 cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast()); 403 return V; 404 } 405 406 /// A helper function that returns an integer or floating-point constant with 407 /// value C. 408 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 409 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 410 : ConstantFP::get(Ty, C); 411 } 412 413 /// Returns "best known" trip count for the specified loop \p L as defined by 414 /// the following procedure: 415 /// 1) Returns exact trip count if it is known. 416 /// 2) Returns expected trip count according to profile data if any. 417 /// 3) Returns upper bound estimate if it is known. 418 /// 4) Returns None if all of the above failed. 419 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 420 // Check if exact trip count is known. 421 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 422 return ExpectedTC; 423 424 // Check if there is an expected trip count available from profile data. 425 if (LoopVectorizeWithBlockFrequency) 426 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 427 return EstimatedTC; 428 429 // Check if upper bound estimate is known. 430 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 431 return ExpectedTC; 432 433 return None; 434 } 435 436 namespace llvm { 437 438 /// InnerLoopVectorizer vectorizes loops which contain only one basic 439 /// block to a specified vectorization factor (VF). 440 /// This class performs the widening of scalars into vectors, or multiple 441 /// scalars. This class also implements the following features: 442 /// * It inserts an epilogue loop for handling loops that don't have iteration 443 /// counts that are known to be a multiple of the vectorization factor. 444 /// * It handles the code generation for reduction variables. 445 /// * Scalarization (implementation using scalars) of un-vectorizable 446 /// instructions. 447 /// InnerLoopVectorizer does not perform any vectorization-legality 448 /// checks, and relies on the caller to check for the different legality 449 /// aspects. The InnerLoopVectorizer relies on the 450 /// LoopVectorizationLegality class to provide information about the induction 451 /// and reduction variables that were found to a given vectorization factor. 452 class InnerLoopVectorizer { 453 public: 454 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 455 LoopInfo *LI, DominatorTree *DT, 456 const TargetLibraryInfo *TLI, 457 const TargetTransformInfo *TTI, AssumptionCache *AC, 458 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 459 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 460 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 461 ProfileSummaryInfo *PSI) 462 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 463 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 464 Builder(PSE.getSE()->getContext()), 465 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM), 466 BFI(BFI), PSI(PSI) { 467 // Query this against the original loop and save it here because the profile 468 // of the original loop header may change as the transformation happens. 469 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 470 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 471 } 472 473 virtual ~InnerLoopVectorizer() = default; 474 475 /// Create a new empty loop that will contain vectorized instructions later 476 /// on, while the old loop will be used as the scalar remainder. Control flow 477 /// is generated around the vectorized (and scalar epilogue) loops consisting 478 /// of various checks and bypasses. Return the pre-header block of the new 479 /// loop. 480 /// In the case of epilogue vectorization, this function is overriden to 481 /// handle the more complex control flow around the loops. 482 virtual BasicBlock *createVectorizedLoopSkeleton(); 483 484 /// Widen a single instruction within the innermost loop. 485 void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands, 486 VPTransformState &State); 487 488 /// Widen a single call instruction within the innermost loop. 489 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 490 VPTransformState &State); 491 492 /// Widen a single select instruction within the innermost loop. 493 void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, 494 bool InvariantCond, VPTransformState &State); 495 496 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 497 void fixVectorizedLoop(); 498 499 // Return true if any runtime check is added. 500 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 501 502 /// A type for vectorized values in the new loop. Each value from the 503 /// original loop, when vectorized, is represented by UF vector values in the 504 /// new unrolled loop, where UF is the unroll factor. 505 using VectorParts = SmallVector<Value *, 2>; 506 507 /// Vectorize a single GetElementPtrInst based on information gathered and 508 /// decisions taken during planning. 509 void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices, 510 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant, 511 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); 512 513 /// Vectorize a single PHINode in a block. This method handles the induction 514 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 515 /// arbitrary length vectors. 516 void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc, 517 Value *StartV, unsigned UF, ElementCount VF); 518 519 /// A helper function to scalarize a single Instruction in the innermost loop. 520 /// Generates a sequence of scalar instances for each lane between \p MinLane 521 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 522 /// inclusive. Uses the VPValue operands from \p Operands instead of \p 523 /// Instr's operands. 524 void scalarizeInstruction(Instruction *Instr, VPUser &Operands, 525 const VPIteration &Instance, bool IfPredicateInstr, 526 VPTransformState &State); 527 528 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 529 /// is provided, the integer induction variable will first be truncated to 530 /// the corresponding type. 531 void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, 532 VPValue *Def, VPValue *CastDef, 533 VPTransformState &State); 534 535 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 536 /// vector or scalar value on-demand if one is not yet available. When 537 /// vectorizing a loop, we visit the definition of an instruction before its 538 /// uses. When visiting the definition, we either vectorize or scalarize the 539 /// instruction, creating an entry for it in the corresponding map. (In some 540 /// cases, such as induction variables, we will create both vector and scalar 541 /// entries.) Then, as we encounter uses of the definition, we derive values 542 /// for each scalar or vector use unless such a value is already available. 543 /// For example, if we scalarize a definition and one of its uses is vector, 544 /// we build the required vector on-demand with an insertelement sequence 545 /// when visiting the use. Otherwise, if the use is scalar, we can use the 546 /// existing scalar definition. 547 /// 548 /// Return a value in the new loop corresponding to \p V from the original 549 /// loop at unroll index \p Part. If the value has already been vectorized, 550 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 551 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 552 /// a new vector value on-demand by inserting the scalar values into a vector 553 /// with an insertelement sequence. If the value has been neither vectorized 554 /// nor scalarized, it must be loop invariant, so we simply broadcast the 555 /// value into a vector. 556 Value *getOrCreateVectorValue(Value *V, unsigned Part); 557 558 void setVectorValue(Value *Scalar, unsigned Part, Value *Vector) { 559 VectorLoopValueMap.setVectorValue(Scalar, Part, Vector); 560 } 561 562 void setScalarValue(Value *Scalar, const VPIteration &Instance, Value *V) { 563 VectorLoopValueMap.setScalarValue(Scalar, Instance, V); 564 } 565 566 /// Return a value in the new loop corresponding to \p V from the original 567 /// loop at unroll and vector indices \p Instance. If the value has been 568 /// vectorized but not scalarized, the necessary extractelement instruction 569 /// will be generated. 570 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 571 572 /// Construct the vector value of a scalarized value \p V one lane at a time. 573 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 574 575 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 576 VPTransformState &State); 577 578 /// Try to vectorize interleaved access group \p Group with the base address 579 /// given in \p Addr, optionally masking the vector operations if \p 580 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 581 /// values in the vectorized loop. 582 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 583 ArrayRef<VPValue *> VPDefs, 584 VPTransformState &State, VPValue *Addr, 585 ArrayRef<VPValue *> StoredValues, 586 VPValue *BlockInMask = nullptr); 587 588 /// Vectorize Load and Store instructions with the base address given in \p 589 /// Addr, optionally masking the vector operations if \p BlockInMask is 590 /// non-null. Use \p State to translate given VPValues to IR values in the 591 /// vectorized loop. 592 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 593 VPValue *Def, VPValue *Addr, 594 VPValue *StoredValue, VPValue *BlockInMask); 595 596 /// Set the debug location in the builder using the debug location in 597 /// the instruction. 598 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 599 600 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 601 void fixNonInductionPHIs(void); 602 603 /// Create a broadcast instruction. This method generates a broadcast 604 /// instruction (shuffle) for loop invariant values and for the induction 605 /// value. If this is the induction variable then we extend it to N, N+1, ... 606 /// this is needed because each iteration in the loop corresponds to a SIMD 607 /// element. 608 virtual Value *getBroadcastInstrs(Value *V); 609 610 protected: 611 friend class LoopVectorizationPlanner; 612 613 /// A small list of PHINodes. 614 using PhiVector = SmallVector<PHINode *, 4>; 615 616 /// A type for scalarized values in the new loop. Each value from the 617 /// original loop, when scalarized, is represented by UF x VF scalar values 618 /// in the new unrolled loop, where UF is the unroll factor and VF is the 619 /// vectorization factor. 620 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 621 622 /// Set up the values of the IVs correctly when exiting the vector loop. 623 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 624 Value *CountRoundDown, Value *EndValue, 625 BasicBlock *MiddleBlock); 626 627 /// Create a new induction variable inside L. 628 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 629 Value *Step, Instruction *DL); 630 631 /// Handle all cross-iteration phis in the header. 632 void fixCrossIterationPHIs(); 633 634 /// Fix a first-order recurrence. This is the second phase of vectorizing 635 /// this phi node. 636 void fixFirstOrderRecurrence(PHINode *Phi); 637 638 /// Fix a reduction cross-iteration phi. This is the second phase of 639 /// vectorizing this phi node. 640 void fixReduction(PHINode *Phi); 641 642 /// Clear NSW/NUW flags from reduction instructions if necessary. 643 void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc); 644 645 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 646 /// means we need to add the appropriate incoming value from the middle 647 /// block as exiting edges from the scalar epilogue loop (if present) are 648 /// already in place, and we exit the vector loop exclusively to the middle 649 /// block. 650 void fixLCSSAPHIs(); 651 652 /// Iteratively sink the scalarized operands of a predicated instruction into 653 /// the block that was created for it. 654 void sinkScalarOperands(Instruction *PredInst); 655 656 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 657 /// represented as. 658 void truncateToMinimalBitwidths(); 659 660 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 661 /// to each vector element of Val. The sequence starts at StartIndex. 662 /// \p Opcode is relevant for FP induction variable. 663 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 664 Instruction::BinaryOps Opcode = 665 Instruction::BinaryOpsEnd); 666 667 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 668 /// variable on which to base the steps, \p Step is the size of the step, and 669 /// \p EntryVal is the value from the original loop that maps to the steps. 670 /// Note that \p EntryVal doesn't have to be an induction variable - it 671 /// can also be a truncate instruction. 672 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 673 const InductionDescriptor &ID, VPValue *Def, 674 VPValue *CastDef, VPTransformState &State); 675 676 /// Create a vector induction phi node based on an existing scalar one. \p 677 /// EntryVal is the value from the original loop that maps to the vector phi 678 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 679 /// truncate instruction, instead of widening the original IV, we widen a 680 /// version of the IV truncated to \p EntryVal's type. 681 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 682 Value *Step, Value *Start, 683 Instruction *EntryVal, VPValue *Def, 684 VPValue *CastDef, 685 VPTransformState &State); 686 687 /// Returns true if an instruction \p I should be scalarized instead of 688 /// vectorized for the chosen vectorization factor. 689 bool shouldScalarizeInstruction(Instruction *I) const; 690 691 /// Returns true if we should generate a scalar version of \p IV. 692 bool needsScalarInduction(Instruction *IV) const; 693 694 /// If there is a cast involved in the induction variable \p ID, which should 695 /// be ignored in the vectorized loop body, this function records the 696 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 697 /// cast. We had already proved that the casted Phi is equal to the uncasted 698 /// Phi in the vectorized loop (under a runtime guard), and therefore 699 /// there is no need to vectorize the cast - the same value can be used in the 700 /// vector loop for both the Phi and the cast. 701 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 702 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 703 /// 704 /// \p EntryVal is the value from the original loop that maps to the vector 705 /// phi node and is used to distinguish what is the IV currently being 706 /// processed - original one (if \p EntryVal is a phi corresponding to the 707 /// original IV) or the "newly-created" one based on the proof mentioned above 708 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 709 /// latter case \p EntryVal is a TruncInst and we must not record anything for 710 /// that IV, but it's error-prone to expect callers of this routine to care 711 /// about that, hence this explicit parameter. 712 void recordVectorLoopValueForInductionCast( 713 const InductionDescriptor &ID, const Instruction *EntryVal, 714 Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, 715 unsigned Part, unsigned Lane = UINT_MAX); 716 717 /// Generate a shuffle sequence that will reverse the vector Vec. 718 virtual Value *reverseVector(Value *Vec); 719 720 /// Returns (and creates if needed) the original loop trip count. 721 Value *getOrCreateTripCount(Loop *NewLoop); 722 723 /// Returns (and creates if needed) the trip count of the widened loop. 724 Value *getOrCreateVectorTripCount(Loop *NewLoop); 725 726 /// Returns a bitcasted value to the requested vector type. 727 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 728 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 729 const DataLayout &DL); 730 731 /// Emit a bypass check to see if the vector trip count is zero, including if 732 /// it overflows. 733 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 734 735 /// Emit a bypass check to see if all of the SCEV assumptions we've 736 /// had to make are correct. 737 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 738 739 /// Emit bypass checks to check any memory assumptions we may have made. 740 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 741 742 /// Compute the transformed value of Index at offset StartValue using step 743 /// StepValue. 744 /// For integer induction, returns StartValue + Index * StepValue. 745 /// For pointer induction, returns StartValue[Index * StepValue]. 746 /// FIXME: The newly created binary instructions should contain nsw/nuw 747 /// flags, which can be found from the original scalar operations. 748 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 749 const DataLayout &DL, 750 const InductionDescriptor &ID) const; 751 752 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 753 /// vector loop preheader, middle block and scalar preheader. Also 754 /// allocate a loop object for the new vector loop and return it. 755 Loop *createVectorLoopSkeleton(StringRef Prefix); 756 757 /// Create new phi nodes for the induction variables to resume iteration count 758 /// in the scalar epilogue, from where the vectorized loop left off (given by 759 /// \p VectorTripCount). 760 /// In cases where the loop skeleton is more complicated (eg. epilogue 761 /// vectorization) and the resume values can come from an additional bypass 762 /// block, the \p AdditionalBypass pair provides information about the bypass 763 /// block and the end value on the edge from bypass to this loop. 764 void createInductionResumeValues( 765 Loop *L, Value *VectorTripCount, 766 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 767 768 /// Complete the loop skeleton by adding debug MDs, creating appropriate 769 /// conditional branches in the middle block, preparing the builder and 770 /// running the verifier. Take in the vector loop \p L as argument, and return 771 /// the preheader of the completed vector loop. 772 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 773 774 /// Add additional metadata to \p To that was not present on \p Orig. 775 /// 776 /// Currently this is used to add the noalias annotations based on the 777 /// inserted memchecks. Use this for instructions that are *cloned* into the 778 /// vector loop. 779 void addNewMetadata(Instruction *To, const Instruction *Orig); 780 781 /// Add metadata from one instruction to another. 782 /// 783 /// This includes both the original MDs from \p From and additional ones (\see 784 /// addNewMetadata). Use this for *newly created* instructions in the vector 785 /// loop. 786 void addMetadata(Instruction *To, Instruction *From); 787 788 /// Similar to the previous function but it adds the metadata to a 789 /// vector of instructions. 790 void addMetadata(ArrayRef<Value *> To, Instruction *From); 791 792 /// Allow subclasses to override and print debug traces before/after vplan 793 /// execution, when trace information is requested. 794 virtual void printDebugTracesAtStart(){}; 795 virtual void printDebugTracesAtEnd(){}; 796 797 /// The original loop. 798 Loop *OrigLoop; 799 800 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 801 /// dynamic knowledge to simplify SCEV expressions and converts them to a 802 /// more usable form. 803 PredicatedScalarEvolution &PSE; 804 805 /// Loop Info. 806 LoopInfo *LI; 807 808 /// Dominator Tree. 809 DominatorTree *DT; 810 811 /// Alias Analysis. 812 AAResults *AA; 813 814 /// Target Library Info. 815 const TargetLibraryInfo *TLI; 816 817 /// Target Transform Info. 818 const TargetTransformInfo *TTI; 819 820 /// Assumption Cache. 821 AssumptionCache *AC; 822 823 /// Interface to emit optimization remarks. 824 OptimizationRemarkEmitter *ORE; 825 826 /// LoopVersioning. It's only set up (non-null) if memchecks were 827 /// used. 828 /// 829 /// This is currently only used to add no-alias metadata based on the 830 /// memchecks. The actually versioning is performed manually. 831 std::unique_ptr<LoopVersioning> LVer; 832 833 /// The vectorization SIMD factor to use. Each vector will have this many 834 /// vector elements. 835 ElementCount VF; 836 837 /// The vectorization unroll factor to use. Each scalar is vectorized to this 838 /// many different vector instructions. 839 unsigned UF; 840 841 /// The builder that we use 842 IRBuilder<> Builder; 843 844 // --- Vectorization state --- 845 846 /// The vector-loop preheader. 847 BasicBlock *LoopVectorPreHeader; 848 849 /// The scalar-loop preheader. 850 BasicBlock *LoopScalarPreHeader; 851 852 /// Middle Block between the vector and the scalar. 853 BasicBlock *LoopMiddleBlock; 854 855 /// The (unique) ExitBlock of the scalar loop. Note that 856 /// there can be multiple exiting edges reaching this block. 857 BasicBlock *LoopExitBlock; 858 859 /// The vector loop body. 860 BasicBlock *LoopVectorBody; 861 862 /// The scalar loop body. 863 BasicBlock *LoopScalarBody; 864 865 /// A list of all bypass blocks. The first block is the entry of the loop. 866 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 867 868 /// The new Induction variable which was added to the new block. 869 PHINode *Induction = nullptr; 870 871 /// The induction variable of the old basic block. 872 PHINode *OldInduction = nullptr; 873 874 /// Maps values from the original loop to their corresponding values in the 875 /// vectorized loop. A key value can map to either vector values, scalar 876 /// values or both kinds of values, depending on whether the key was 877 /// vectorized and scalarized. 878 VectorizerValueMap VectorLoopValueMap; 879 880 /// Store instructions that were predicated. 881 SmallVector<Instruction *, 4> PredicatedInstructions; 882 883 /// Trip count of the original loop. 884 Value *TripCount = nullptr; 885 886 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 887 Value *VectorTripCount = nullptr; 888 889 /// The legality analysis. 890 LoopVectorizationLegality *Legal; 891 892 /// The profitablity analysis. 893 LoopVectorizationCostModel *Cost; 894 895 // Record whether runtime checks are added. 896 bool AddedSafetyChecks = false; 897 898 // Holds the end values for each induction variable. We save the end values 899 // so we can later fix-up the external users of the induction variables. 900 DenseMap<PHINode *, Value *> IVEndValues; 901 902 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 903 // fixed up at the end of vector code generation. 904 SmallVector<PHINode *, 8> OrigPHIsToFix; 905 906 /// BFI and PSI are used to check for profile guided size optimizations. 907 BlockFrequencyInfo *BFI; 908 ProfileSummaryInfo *PSI; 909 910 // Whether this loop should be optimized for size based on profile guided size 911 // optimizatios. 912 bool OptForSizeBasedOnProfile; 913 }; 914 915 class InnerLoopUnroller : public InnerLoopVectorizer { 916 public: 917 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 918 LoopInfo *LI, DominatorTree *DT, 919 const TargetLibraryInfo *TLI, 920 const TargetTransformInfo *TTI, AssumptionCache *AC, 921 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 922 LoopVectorizationLegality *LVL, 923 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 924 ProfileSummaryInfo *PSI) 925 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 926 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 927 BFI, PSI) {} 928 929 private: 930 Value *getBroadcastInstrs(Value *V) override; 931 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 932 Instruction::BinaryOps Opcode = 933 Instruction::BinaryOpsEnd) override; 934 Value *reverseVector(Value *Vec) override; 935 }; 936 937 /// Encapsulate information regarding vectorization of a loop and its epilogue. 938 /// This information is meant to be updated and used across two stages of 939 /// epilogue vectorization. 940 struct EpilogueLoopVectorizationInfo { 941 ElementCount MainLoopVF = ElementCount::getFixed(0); 942 unsigned MainLoopUF = 0; 943 ElementCount EpilogueVF = ElementCount::getFixed(0); 944 unsigned EpilogueUF = 0; 945 BasicBlock *MainLoopIterationCountCheck = nullptr; 946 BasicBlock *EpilogueIterationCountCheck = nullptr; 947 BasicBlock *SCEVSafetyCheck = nullptr; 948 BasicBlock *MemSafetyCheck = nullptr; 949 Value *TripCount = nullptr; 950 Value *VectorTripCount = nullptr; 951 952 EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF, 953 unsigned EUF) 954 : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF), 955 EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) { 956 assert(EUF == 1 && 957 "A high UF for the epilogue loop is likely not beneficial."); 958 } 959 }; 960 961 /// An extension of the inner loop vectorizer that creates a skeleton for a 962 /// vectorized loop that has its epilogue (residual) also vectorized. 963 /// The idea is to run the vplan on a given loop twice, firstly to setup the 964 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 965 /// from the first step and vectorize the epilogue. This is achieved by 966 /// deriving two concrete strategy classes from this base class and invoking 967 /// them in succession from the loop vectorizer planner. 968 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 969 public: 970 InnerLoopAndEpilogueVectorizer( 971 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 972 DominatorTree *DT, const TargetLibraryInfo *TLI, 973 const TargetTransformInfo *TTI, AssumptionCache *AC, 974 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 975 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 976 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 977 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 978 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI), 979 EPI(EPI) {} 980 981 // Override this function to handle the more complex control flow around the 982 // three loops. 983 BasicBlock *createVectorizedLoopSkeleton() final override { 984 return createEpilogueVectorizedLoopSkeleton(); 985 } 986 987 /// The interface for creating a vectorized skeleton using one of two 988 /// different strategies, each corresponding to one execution of the vplan 989 /// as described above. 990 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 991 992 /// Holds and updates state information required to vectorize the main loop 993 /// and its epilogue in two separate passes. This setup helps us avoid 994 /// regenerating and recomputing runtime safety checks. It also helps us to 995 /// shorten the iteration-count-check path length for the cases where the 996 /// iteration count of the loop is so small that the main vector loop is 997 /// completely skipped. 998 EpilogueLoopVectorizationInfo &EPI; 999 }; 1000 1001 /// A specialized derived class of inner loop vectorizer that performs 1002 /// vectorization of *main* loops in the process of vectorizing loops and their 1003 /// epilogues. 1004 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 1005 public: 1006 EpilogueVectorizerMainLoop( 1007 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 1008 DominatorTree *DT, const TargetLibraryInfo *TLI, 1009 const TargetTransformInfo *TTI, AssumptionCache *AC, 1010 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 1011 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 1012 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 1013 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1014 EPI, LVL, CM, BFI, PSI) {} 1015 /// Implements the interface for creating a vectorized skeleton using the 1016 /// *main loop* strategy (ie the first pass of vplan execution). 1017 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1018 1019 protected: 1020 /// Emits an iteration count bypass check once for the main loop (when \p 1021 /// ForEpilogue is false) and once for the epilogue loop (when \p 1022 /// ForEpilogue is true). 1023 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 1024 bool ForEpilogue); 1025 void printDebugTracesAtStart() override; 1026 void printDebugTracesAtEnd() override; 1027 }; 1028 1029 // A specialized derived class of inner loop vectorizer that performs 1030 // vectorization of *epilogue* loops in the process of vectorizing loops and 1031 // their epilogues. 1032 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 1033 public: 1034 EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 1035 LoopInfo *LI, DominatorTree *DT, 1036 const TargetLibraryInfo *TLI, 1037 const TargetTransformInfo *TTI, AssumptionCache *AC, 1038 OptimizationRemarkEmitter *ORE, 1039 EpilogueLoopVectorizationInfo &EPI, 1040 LoopVectorizationLegality *LVL, 1041 llvm::LoopVectorizationCostModel *CM, 1042 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 1043 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1044 EPI, LVL, CM, BFI, PSI) {} 1045 /// Implements the interface for creating a vectorized skeleton using the 1046 /// *epilogue loop* strategy (ie the second pass of vplan execution). 1047 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1048 1049 protected: 1050 /// Emits an iteration count bypass check after the main vector loop has 1051 /// finished to see if there are any iterations left to execute by either 1052 /// the vector epilogue or the scalar epilogue. 1053 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1054 BasicBlock *Bypass, 1055 BasicBlock *Insert); 1056 void printDebugTracesAtStart() override; 1057 void printDebugTracesAtEnd() override; 1058 }; 1059 } // end namespace llvm 1060 1061 /// Look for a meaningful debug location on the instruction or it's 1062 /// operands. 1063 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1064 if (!I) 1065 return I; 1066 1067 DebugLoc Empty; 1068 if (I->getDebugLoc() != Empty) 1069 return I; 1070 1071 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 1072 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 1073 if (OpInst->getDebugLoc() != Empty) 1074 return OpInst; 1075 } 1076 1077 return I; 1078 } 1079 1080 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 1081 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 1082 const DILocation *DIL = Inst->getDebugLoc(); 1083 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1084 !isa<DbgInfoIntrinsic>(Inst)) { 1085 assert(!VF.isScalable() && "scalable vectors not yet supported."); 1086 auto NewDIL = 1087 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1088 if (NewDIL) 1089 B.SetCurrentDebugLocation(NewDIL.getValue()); 1090 else 1091 LLVM_DEBUG(dbgs() 1092 << "Failed to create new discriminator: " 1093 << DIL->getFilename() << " Line: " << DIL->getLine()); 1094 } 1095 else 1096 B.SetCurrentDebugLocation(DIL); 1097 } else 1098 B.SetCurrentDebugLocation(DebugLoc()); 1099 } 1100 1101 /// Write a record \p DebugMsg about vectorization failure to the debug 1102 /// output stream. If \p I is passed, it is an instruction that prevents 1103 /// vectorization. 1104 #ifndef NDEBUG 1105 static void debugVectorizationFailure(const StringRef DebugMsg, 1106 Instruction *I) { 1107 dbgs() << "LV: Not vectorizing: " << DebugMsg; 1108 if (I != nullptr) 1109 dbgs() << " " << *I; 1110 else 1111 dbgs() << '.'; 1112 dbgs() << '\n'; 1113 } 1114 #endif 1115 1116 /// Create an analysis remark that explains why vectorization failed 1117 /// 1118 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1119 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1120 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1121 /// the location of the remark. \return the remark object that can be 1122 /// streamed to. 1123 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1124 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1125 Value *CodeRegion = TheLoop->getHeader(); 1126 DebugLoc DL = TheLoop->getStartLoc(); 1127 1128 if (I) { 1129 CodeRegion = I->getParent(); 1130 // If there is no debug location attached to the instruction, revert back to 1131 // using the loop's. 1132 if (I->getDebugLoc()) 1133 DL = I->getDebugLoc(); 1134 } 1135 1136 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 1137 R << "loop not vectorized: "; 1138 return R; 1139 } 1140 1141 /// Return a value for Step multiplied by VF. 1142 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) { 1143 assert(isa<ConstantInt>(Step) && "Expected an integer step"); 1144 Constant *StepVal = ConstantInt::get( 1145 Step->getType(), 1146 cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue()); 1147 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1148 } 1149 1150 namespace llvm { 1151 1152 void reportVectorizationFailure(const StringRef DebugMsg, 1153 const StringRef OREMsg, const StringRef ORETag, 1154 OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) { 1155 LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I)); 1156 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1157 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), 1158 ORETag, TheLoop, I) << OREMsg); 1159 } 1160 1161 } // end namespace llvm 1162 1163 #ifndef NDEBUG 1164 /// \return string containing a file name and a line # for the given loop. 1165 static std::string getDebugLocString(const Loop *L) { 1166 std::string Result; 1167 if (L) { 1168 raw_string_ostream OS(Result); 1169 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1170 LoopDbgLoc.print(OS); 1171 else 1172 // Just print the module name. 1173 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1174 OS.flush(); 1175 } 1176 return Result; 1177 } 1178 #endif 1179 1180 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1181 const Instruction *Orig) { 1182 // If the loop was versioned with memchecks, add the corresponding no-alias 1183 // metadata. 1184 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1185 LVer->annotateInstWithNoAlias(To, Orig); 1186 } 1187 1188 void InnerLoopVectorizer::addMetadata(Instruction *To, 1189 Instruction *From) { 1190 propagateMetadata(To, From); 1191 addNewMetadata(To, From); 1192 } 1193 1194 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1195 Instruction *From) { 1196 for (Value *V : To) { 1197 if (Instruction *I = dyn_cast<Instruction>(V)) 1198 addMetadata(I, From); 1199 } 1200 } 1201 1202 namespace llvm { 1203 1204 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1205 // lowered. 1206 enum ScalarEpilogueLowering { 1207 1208 // The default: allowing scalar epilogues. 1209 CM_ScalarEpilogueAllowed, 1210 1211 // Vectorization with OptForSize: don't allow epilogues. 1212 CM_ScalarEpilogueNotAllowedOptSize, 1213 1214 // A special case of vectorisation with OptForSize: loops with a very small 1215 // trip count are considered for vectorization under OptForSize, thereby 1216 // making sure the cost of their loop body is dominant, free of runtime 1217 // guards and scalar iteration overheads. 1218 CM_ScalarEpilogueNotAllowedLowTripLoop, 1219 1220 // Loop hint predicate indicating an epilogue is undesired. 1221 CM_ScalarEpilogueNotNeededUsePredicate, 1222 1223 // Directive indicating we must either tail fold or not vectorize 1224 CM_ScalarEpilogueNotAllowedUsePredicate 1225 }; 1226 1227 /// LoopVectorizationCostModel - estimates the expected speedups due to 1228 /// vectorization. 1229 /// In many cases vectorization is not profitable. This can happen because of 1230 /// a number of reasons. In this class we mainly attempt to predict the 1231 /// expected speedup/slowdowns due to the supported instruction set. We use the 1232 /// TargetTransformInfo to query the different backends for the cost of 1233 /// different operations. 1234 class LoopVectorizationCostModel { 1235 public: 1236 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1237 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1238 LoopVectorizationLegality *Legal, 1239 const TargetTransformInfo &TTI, 1240 const TargetLibraryInfo *TLI, DemandedBits *DB, 1241 AssumptionCache *AC, 1242 OptimizationRemarkEmitter *ORE, const Function *F, 1243 const LoopVectorizeHints *Hints, 1244 InterleavedAccessInfo &IAI) 1245 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1246 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1247 Hints(Hints), InterleaveInfo(IAI) {} 1248 1249 /// \return An upper bound for the vectorization factor, or None if 1250 /// vectorization and interleaving should be avoided up front. 1251 Optional<ElementCount> computeMaxVF(ElementCount UserVF, unsigned UserIC); 1252 1253 /// \return True if runtime checks are required for vectorization, and false 1254 /// otherwise. 1255 bool runtimeChecksRequired(); 1256 1257 /// \return The most profitable vectorization factor and the cost of that VF. 1258 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 1259 /// then this vectorization factor will be selected if vectorization is 1260 /// possible. 1261 VectorizationFactor selectVectorizationFactor(ElementCount MaxVF); 1262 VectorizationFactor 1263 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1264 const LoopVectorizationPlanner &LVP); 1265 1266 /// Setup cost-based decisions for user vectorization factor. 1267 void selectUserVectorizationFactor(ElementCount UserVF) { 1268 collectUniformsAndScalars(UserVF); 1269 collectInstsToScalarize(UserVF); 1270 } 1271 1272 /// \return The size (in bits) of the smallest and widest types in the code 1273 /// that needs to be vectorized. We ignore values that remain scalar such as 1274 /// 64 bit loop indices. 1275 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1276 1277 /// \return The desired interleave count. 1278 /// If interleave count has been specified by metadata it will be returned. 1279 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1280 /// are the selected vectorization factor and the cost of the selected VF. 1281 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1282 1283 /// Memory access instruction may be vectorized in more than one way. 1284 /// Form of instruction after vectorization depends on cost. 1285 /// This function takes cost-based decisions for Load/Store instructions 1286 /// and collects them in a map. This decisions map is used for building 1287 /// the lists of loop-uniform and loop-scalar instructions. 1288 /// The calculated cost is saved with widening decision in order to 1289 /// avoid redundant calculations. 1290 void setCostBasedWideningDecision(ElementCount VF); 1291 1292 /// A struct that represents some properties of the register usage 1293 /// of a loop. 1294 struct RegisterUsage { 1295 /// Holds the number of loop invariant values that are used in the loop. 1296 /// The key is ClassID of target-provided register class. 1297 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1298 /// Holds the maximum number of concurrent live intervals in the loop. 1299 /// The key is ClassID of target-provided register class. 1300 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1301 }; 1302 1303 /// \return Returns information about the register usages of the loop for the 1304 /// given vectorization factors. 1305 SmallVector<RegisterUsage, 8> 1306 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1307 1308 /// Collect values we want to ignore in the cost model. 1309 void collectValuesToIgnore(); 1310 1311 /// Split reductions into those that happen in the loop, and those that happen 1312 /// outside. In loop reductions are collected into InLoopReductionChains. 1313 void collectInLoopReductions(); 1314 1315 /// \returns The smallest bitwidth each instruction can be represented with. 1316 /// The vector equivalents of these instructions should be truncated to this 1317 /// type. 1318 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1319 return MinBWs; 1320 } 1321 1322 /// \returns True if it is more profitable to scalarize instruction \p I for 1323 /// vectorization factor \p VF. 1324 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1325 assert(VF.isVector() && 1326 "Profitable to scalarize relevant only for VF > 1."); 1327 1328 // Cost model is not run in the VPlan-native path - return conservative 1329 // result until this changes. 1330 if (EnableVPlanNativePath) 1331 return false; 1332 1333 auto Scalars = InstsToScalarize.find(VF); 1334 assert(Scalars != InstsToScalarize.end() && 1335 "VF not yet analyzed for scalarization profitability"); 1336 return Scalars->second.find(I) != Scalars->second.end(); 1337 } 1338 1339 /// Returns true if \p I is known to be uniform after vectorization. 1340 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1341 if (VF.isScalar()) 1342 return true; 1343 1344 // Cost model is not run in the VPlan-native path - return conservative 1345 // result until this changes. 1346 if (EnableVPlanNativePath) 1347 return false; 1348 1349 auto UniformsPerVF = Uniforms.find(VF); 1350 assert(UniformsPerVF != Uniforms.end() && 1351 "VF not yet analyzed for uniformity"); 1352 return UniformsPerVF->second.count(I); 1353 } 1354 1355 /// Returns true if \p I is known to be scalar after vectorization. 1356 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1357 if (VF.isScalar()) 1358 return true; 1359 1360 // Cost model is not run in the VPlan-native path - return conservative 1361 // result until this changes. 1362 if (EnableVPlanNativePath) 1363 return false; 1364 1365 auto ScalarsPerVF = Scalars.find(VF); 1366 assert(ScalarsPerVF != Scalars.end() && 1367 "Scalar values are not calculated for VF"); 1368 return ScalarsPerVF->second.count(I); 1369 } 1370 1371 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1372 /// for vectorization factor \p VF. 1373 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1374 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1375 !isProfitableToScalarize(I, VF) && 1376 !isScalarAfterVectorization(I, VF); 1377 } 1378 1379 /// Decision that was taken during cost calculation for memory instruction. 1380 enum InstWidening { 1381 CM_Unknown, 1382 CM_Widen, // For consecutive accesses with stride +1. 1383 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1384 CM_Interleave, 1385 CM_GatherScatter, 1386 CM_Scalarize 1387 }; 1388 1389 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1390 /// instruction \p I and vector width \p VF. 1391 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1392 InstructionCost Cost) { 1393 assert(VF.isVector() && "Expected VF >=2"); 1394 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1395 } 1396 1397 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1398 /// interleaving group \p Grp and vector width \p VF. 1399 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1400 ElementCount VF, InstWidening W, 1401 InstructionCost Cost) { 1402 assert(VF.isVector() && "Expected VF >=2"); 1403 /// Broadcast this decicion to all instructions inside the group. 1404 /// But the cost will be assigned to one instruction only. 1405 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1406 if (auto *I = Grp->getMember(i)) { 1407 if (Grp->getInsertPos() == I) 1408 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1409 else 1410 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1411 } 1412 } 1413 } 1414 1415 /// Return the cost model decision for the given instruction \p I and vector 1416 /// width \p VF. Return CM_Unknown if this instruction did not pass 1417 /// through the cost modeling. 1418 InstWidening getWideningDecision(Instruction *I, ElementCount VF) { 1419 assert(VF.isVector() && "Expected VF to be a vector VF"); 1420 // Cost model is not run in the VPlan-native path - return conservative 1421 // result until this changes. 1422 if (EnableVPlanNativePath) 1423 return CM_GatherScatter; 1424 1425 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1426 auto Itr = WideningDecisions.find(InstOnVF); 1427 if (Itr == WideningDecisions.end()) 1428 return CM_Unknown; 1429 return Itr->second.first; 1430 } 1431 1432 /// Return the vectorization cost for the given instruction \p I and vector 1433 /// width \p VF. 1434 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1435 assert(VF.isVector() && "Expected VF >=2"); 1436 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1437 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1438 "The cost is not calculated"); 1439 return WideningDecisions[InstOnVF].second; 1440 } 1441 1442 /// Return True if instruction \p I is an optimizable truncate whose operand 1443 /// is an induction variable. Such a truncate will be removed by adding a new 1444 /// induction variable with the destination type. 1445 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1446 // If the instruction is not a truncate, return false. 1447 auto *Trunc = dyn_cast<TruncInst>(I); 1448 if (!Trunc) 1449 return false; 1450 1451 // Get the source and destination types of the truncate. 1452 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1453 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1454 1455 // If the truncate is free for the given types, return false. Replacing a 1456 // free truncate with an induction variable would add an induction variable 1457 // update instruction to each iteration of the loop. We exclude from this 1458 // check the primary induction variable since it will need an update 1459 // instruction regardless. 1460 Value *Op = Trunc->getOperand(0); 1461 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1462 return false; 1463 1464 // If the truncated value is not an induction variable, return false. 1465 return Legal->isInductionPhi(Op); 1466 } 1467 1468 /// Collects the instructions to scalarize for each predicated instruction in 1469 /// the loop. 1470 void collectInstsToScalarize(ElementCount VF); 1471 1472 /// Collect Uniform and Scalar values for the given \p VF. 1473 /// The sets depend on CM decision for Load/Store instructions 1474 /// that may be vectorized as interleave, gather-scatter or scalarized. 1475 void collectUniformsAndScalars(ElementCount VF) { 1476 // Do the analysis once. 1477 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1478 return; 1479 setCostBasedWideningDecision(VF); 1480 collectLoopUniforms(VF); 1481 collectLoopScalars(VF); 1482 } 1483 1484 /// Returns true if the target machine supports masked store operation 1485 /// for the given \p DataType and kind of access to \p Ptr. 1486 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) { 1487 return Legal->isConsecutivePtr(Ptr) && 1488 TTI.isLegalMaskedStore(DataType, Alignment); 1489 } 1490 1491 /// Returns true if the target machine supports masked load operation 1492 /// for the given \p DataType and kind of access to \p Ptr. 1493 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) { 1494 return Legal->isConsecutivePtr(Ptr) && 1495 TTI.isLegalMaskedLoad(DataType, Alignment); 1496 } 1497 1498 /// Returns true if the target machine supports masked scatter operation 1499 /// for the given \p DataType. 1500 bool isLegalMaskedScatter(Type *DataType, Align Alignment) { 1501 return TTI.isLegalMaskedScatter(DataType, Alignment); 1502 } 1503 1504 /// Returns true if the target machine supports masked gather operation 1505 /// for the given \p DataType. 1506 bool isLegalMaskedGather(Type *DataType, Align Alignment) { 1507 return TTI.isLegalMaskedGather(DataType, Alignment); 1508 } 1509 1510 /// Returns true if the target machine can represent \p V as a masked gather 1511 /// or scatter operation. 1512 bool isLegalGatherOrScatter(Value *V) { 1513 bool LI = isa<LoadInst>(V); 1514 bool SI = isa<StoreInst>(V); 1515 if (!LI && !SI) 1516 return false; 1517 auto *Ty = getMemInstValueType(V); 1518 Align Align = getLoadStoreAlignment(V); 1519 return (LI && isLegalMaskedGather(Ty, Align)) || 1520 (SI && isLegalMaskedScatter(Ty, Align)); 1521 } 1522 1523 /// Returns true if \p I is an instruction that will be scalarized with 1524 /// predication. Such instructions include conditional stores and 1525 /// instructions that may divide by zero. 1526 /// If a non-zero VF has been calculated, we check if I will be scalarized 1527 /// predication for that VF. 1528 bool isScalarWithPredication(Instruction *I, 1529 ElementCount VF = ElementCount::getFixed(1)); 1530 1531 // Returns true if \p I is an instruction that will be predicated either 1532 // through scalar predication or masked load/store or masked gather/scatter. 1533 // Superset of instructions that return true for isScalarWithPredication. 1534 bool isPredicatedInst(Instruction *I) { 1535 if (!blockNeedsPredication(I->getParent())) 1536 return false; 1537 // Loads and stores that need some form of masked operation are predicated 1538 // instructions. 1539 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1540 return Legal->isMaskRequired(I); 1541 return isScalarWithPredication(I); 1542 } 1543 1544 /// Returns true if \p I is a memory instruction with consecutive memory 1545 /// access that can be widened. 1546 bool 1547 memoryInstructionCanBeWidened(Instruction *I, 1548 ElementCount VF = ElementCount::getFixed(1)); 1549 1550 /// Returns true if \p I is a memory instruction in an interleaved-group 1551 /// of memory accesses that can be vectorized with wide vector loads/stores 1552 /// and shuffles. 1553 bool 1554 interleavedAccessCanBeWidened(Instruction *I, 1555 ElementCount VF = ElementCount::getFixed(1)); 1556 1557 /// Check if \p Instr belongs to any interleaved access group. 1558 bool isAccessInterleaved(Instruction *Instr) { 1559 return InterleaveInfo.isInterleaved(Instr); 1560 } 1561 1562 /// Get the interleaved access group that \p Instr belongs to. 1563 const InterleaveGroup<Instruction> * 1564 getInterleavedAccessGroup(Instruction *Instr) { 1565 return InterleaveInfo.getInterleaveGroup(Instr); 1566 } 1567 1568 /// Returns true if we're required to use a scalar epilogue for at least 1569 /// the final iteration of the original loop. 1570 bool requiresScalarEpilogue() const { 1571 if (!isScalarEpilogueAllowed()) 1572 return false; 1573 // If we might exit from anywhere but the latch, must run the exiting 1574 // iteration in scalar form. 1575 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1576 return true; 1577 return InterleaveInfo.requiresScalarEpilogue(); 1578 } 1579 1580 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1581 /// loop hint annotation. 1582 bool isScalarEpilogueAllowed() const { 1583 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1584 } 1585 1586 /// Returns true if all loop blocks should be masked to fold tail loop. 1587 bool foldTailByMasking() const { return FoldTailByMasking; } 1588 1589 bool blockNeedsPredication(BasicBlock *BB) { 1590 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1591 } 1592 1593 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1594 /// nodes to the chain of instructions representing the reductions. Uses a 1595 /// MapVector to ensure deterministic iteration order. 1596 using ReductionChainMap = 1597 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1598 1599 /// Return the chain of instructions representing an inloop reduction. 1600 const ReductionChainMap &getInLoopReductionChains() const { 1601 return InLoopReductionChains; 1602 } 1603 1604 /// Returns true if the Phi is part of an inloop reduction. 1605 bool isInLoopReduction(PHINode *Phi) const { 1606 return InLoopReductionChains.count(Phi); 1607 } 1608 1609 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1610 /// with factor VF. Return the cost of the instruction, including 1611 /// scalarization overhead if it's needed. 1612 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF); 1613 1614 /// Estimate cost of a call instruction CI if it were vectorized with factor 1615 /// VF. Return the cost of the instruction, including scalarization overhead 1616 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1617 /// scalarized - 1618 /// i.e. either vector version isn't available, or is too expensive. 1619 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1620 bool &NeedToScalarize); 1621 1622 /// Invalidates decisions already taken by the cost model. 1623 void invalidateCostModelingDecisions() { 1624 WideningDecisions.clear(); 1625 Uniforms.clear(); 1626 Scalars.clear(); 1627 } 1628 1629 private: 1630 unsigned NumPredStores = 0; 1631 1632 /// \return An upper bound for the vectorization factor, a power-of-2 larger 1633 /// than zero. One is returned if vectorization should best be avoided due 1634 /// to cost. 1635 ElementCount computeFeasibleMaxVF(unsigned ConstTripCount, 1636 ElementCount UserVF); 1637 1638 /// The vectorization cost is a combination of the cost itself and a boolean 1639 /// indicating whether any of the contributing operations will actually 1640 /// operate on 1641 /// vector values after type legalization in the backend. If this latter value 1642 /// is 1643 /// false, then all operations will be scalarized (i.e. no vectorization has 1644 /// actually taken place). 1645 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1646 1647 /// Returns the expected execution cost. The unit of the cost does 1648 /// not matter because we use the 'cost' units to compare different 1649 /// vector widths. The cost that is returned is *not* normalized by 1650 /// the factor width. 1651 VectorizationCostTy expectedCost(ElementCount VF); 1652 1653 /// Returns the execution time cost of an instruction for a given vector 1654 /// width. Vector width of one means scalar. 1655 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1656 1657 /// The cost-computation logic from getInstructionCost which provides 1658 /// the vector type as an output parameter. 1659 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1660 Type *&VectorTy); 1661 1662 /// Return the cost of instructions in an inloop reduction pattern, if I is 1663 /// part of that pattern. 1664 InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF, 1665 Type *VectorTy, 1666 TTI::TargetCostKind CostKind); 1667 1668 /// Calculate vectorization cost of memory instruction \p I. 1669 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1670 1671 /// The cost computation for scalarized memory instruction. 1672 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1673 1674 /// The cost computation for interleaving group of memory instructions. 1675 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1676 1677 /// The cost computation for Gather/Scatter instruction. 1678 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1679 1680 /// The cost computation for widening instruction \p I with consecutive 1681 /// memory access. 1682 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1683 1684 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1685 /// Load: scalar load + broadcast. 1686 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1687 /// element) 1688 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1689 1690 /// Estimate the overhead of scalarizing an instruction. This is a 1691 /// convenience wrapper for the type-based getScalarizationOverhead API. 1692 InstructionCost getScalarizationOverhead(Instruction *I, ElementCount VF); 1693 1694 /// Returns whether the instruction is a load or store and will be a emitted 1695 /// as a vector operation. 1696 bool isConsecutiveLoadOrStore(Instruction *I); 1697 1698 /// Returns true if an artificially high cost for emulated masked memrefs 1699 /// should be used. 1700 bool useEmulatedMaskMemRefHack(Instruction *I); 1701 1702 /// Map of scalar integer values to the smallest bitwidth they can be legally 1703 /// represented as. The vector equivalents of these values should be truncated 1704 /// to this type. 1705 MapVector<Instruction *, uint64_t> MinBWs; 1706 1707 /// A type representing the costs for instructions if they were to be 1708 /// scalarized rather than vectorized. The entries are Instruction-Cost 1709 /// pairs. 1710 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1711 1712 /// A set containing all BasicBlocks that are known to present after 1713 /// vectorization as a predicated block. 1714 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1715 1716 /// Records whether it is allowed to have the original scalar loop execute at 1717 /// least once. This may be needed as a fallback loop in case runtime 1718 /// aliasing/dependence checks fail, or to handle the tail/remainder 1719 /// iterations when the trip count is unknown or doesn't divide by the VF, 1720 /// or as a peel-loop to handle gaps in interleave-groups. 1721 /// Under optsize and when the trip count is very small we don't allow any 1722 /// iterations to execute in the scalar loop. 1723 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1724 1725 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1726 bool FoldTailByMasking = false; 1727 1728 /// A map holding scalar costs for different vectorization factors. The 1729 /// presence of a cost for an instruction in the mapping indicates that the 1730 /// instruction will be scalarized when vectorizing with the associated 1731 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1732 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1733 1734 /// Holds the instructions known to be uniform after vectorization. 1735 /// The data is collected per VF. 1736 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1737 1738 /// Holds the instructions known to be scalar after vectorization. 1739 /// The data is collected per VF. 1740 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1741 1742 /// Holds the instructions (address computations) that are forced to be 1743 /// scalarized. 1744 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1745 1746 /// PHINodes of the reductions that should be expanded in-loop along with 1747 /// their associated chains of reduction operations, in program order from top 1748 /// (PHI) to bottom 1749 ReductionChainMap InLoopReductionChains; 1750 1751 /// A Map of inloop reduction operations and their immediate chain operand. 1752 /// FIXME: This can be removed once reductions can be costed correctly in 1753 /// vplan. This was added to allow quick lookup to the inloop operations, 1754 /// without having to loop through InLoopReductionChains. 1755 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1756 1757 /// Returns the expected difference in cost from scalarizing the expression 1758 /// feeding a predicated instruction \p PredInst. The instructions to 1759 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1760 /// non-negative return value implies the expression will be scalarized. 1761 /// Currently, only single-use chains are considered for scalarization. 1762 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1763 ElementCount VF); 1764 1765 /// Collect the instructions that are uniform after vectorization. An 1766 /// instruction is uniform if we represent it with a single scalar value in 1767 /// the vectorized loop corresponding to each vector iteration. Examples of 1768 /// uniform instructions include pointer operands of consecutive or 1769 /// interleaved memory accesses. Note that although uniformity implies an 1770 /// instruction will be scalar, the reverse is not true. In general, a 1771 /// scalarized instruction will be represented by VF scalar values in the 1772 /// vectorized loop, each corresponding to an iteration of the original 1773 /// scalar loop. 1774 void collectLoopUniforms(ElementCount VF); 1775 1776 /// Collect the instructions that are scalar after vectorization. An 1777 /// instruction is scalar if it is known to be uniform or will be scalarized 1778 /// during vectorization. Non-uniform scalarized instructions will be 1779 /// represented by VF values in the vectorized loop, each corresponding to an 1780 /// iteration of the original scalar loop. 1781 void collectLoopScalars(ElementCount VF); 1782 1783 /// Keeps cost model vectorization decision and cost for instructions. 1784 /// Right now it is used for memory instructions only. 1785 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1786 std::pair<InstWidening, InstructionCost>>; 1787 1788 DecisionList WideningDecisions; 1789 1790 /// Returns true if \p V is expected to be vectorized and it needs to be 1791 /// extracted. 1792 bool needsExtract(Value *V, ElementCount VF) const { 1793 Instruction *I = dyn_cast<Instruction>(V); 1794 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1795 TheLoop->isLoopInvariant(I)) 1796 return false; 1797 1798 // Assume we can vectorize V (and hence we need extraction) if the 1799 // scalars are not computed yet. This can happen, because it is called 1800 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1801 // the scalars are collected. That should be a safe assumption in most 1802 // cases, because we check if the operands have vectorizable types 1803 // beforehand in LoopVectorizationLegality. 1804 return Scalars.find(VF) == Scalars.end() || 1805 !isScalarAfterVectorization(I, VF); 1806 }; 1807 1808 /// Returns a range containing only operands needing to be extracted. 1809 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1810 ElementCount VF) { 1811 return SmallVector<Value *, 4>(make_filter_range( 1812 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1813 } 1814 1815 /// Determines if we have the infrastructure to vectorize loop \p L and its 1816 /// epilogue, assuming the main loop is vectorized by \p VF. 1817 bool isCandidateForEpilogueVectorization(const Loop &L, 1818 const ElementCount VF) const; 1819 1820 /// Returns true if epilogue vectorization is considered profitable, and 1821 /// false otherwise. 1822 /// \p VF is the vectorization factor chosen for the original loop. 1823 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1824 1825 public: 1826 /// The loop that we evaluate. 1827 Loop *TheLoop; 1828 1829 /// Predicated scalar evolution analysis. 1830 PredicatedScalarEvolution &PSE; 1831 1832 /// Loop Info analysis. 1833 LoopInfo *LI; 1834 1835 /// Vectorization legality. 1836 LoopVectorizationLegality *Legal; 1837 1838 /// Vector target information. 1839 const TargetTransformInfo &TTI; 1840 1841 /// Target Library Info. 1842 const TargetLibraryInfo *TLI; 1843 1844 /// Demanded bits analysis. 1845 DemandedBits *DB; 1846 1847 /// Assumption cache. 1848 AssumptionCache *AC; 1849 1850 /// Interface to emit optimization remarks. 1851 OptimizationRemarkEmitter *ORE; 1852 1853 const Function *TheFunction; 1854 1855 /// Loop Vectorize Hint. 1856 const LoopVectorizeHints *Hints; 1857 1858 /// The interleave access information contains groups of interleaved accesses 1859 /// with the same stride and close to each other. 1860 InterleavedAccessInfo &InterleaveInfo; 1861 1862 /// Values to ignore in the cost model. 1863 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1864 1865 /// Values to ignore in the cost model when VF > 1. 1866 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1867 1868 /// Profitable vector factors. 1869 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1870 }; 1871 1872 } // end namespace llvm 1873 1874 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1875 // vectorization. The loop needs to be annotated with #pragma omp simd 1876 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1877 // vector length information is not provided, vectorization is not considered 1878 // explicit. Interleave hints are not allowed either. These limitations will be 1879 // relaxed in the future. 1880 // Please, note that we are currently forced to abuse the pragma 'clang 1881 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1882 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1883 // provides *explicit vectorization hints* (LV can bypass legal checks and 1884 // assume that vectorization is legal). However, both hints are implemented 1885 // using the same metadata (llvm.loop.vectorize, processed by 1886 // LoopVectorizeHints). This will be fixed in the future when the native IR 1887 // representation for pragma 'omp simd' is introduced. 1888 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1889 OptimizationRemarkEmitter *ORE) { 1890 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 1891 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1892 1893 // Only outer loops with an explicit vectorization hint are supported. 1894 // Unannotated outer loops are ignored. 1895 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1896 return false; 1897 1898 Function *Fn = OuterLp->getHeader()->getParent(); 1899 if (!Hints.allowVectorization(Fn, OuterLp, 1900 true /*VectorizeOnlyWhenForced*/)) { 1901 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1902 return false; 1903 } 1904 1905 if (Hints.getInterleave() > 1) { 1906 // TODO: Interleave support is future work. 1907 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1908 "outer loops.\n"); 1909 Hints.emitRemarkWithHints(); 1910 return false; 1911 } 1912 1913 return true; 1914 } 1915 1916 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1917 OptimizationRemarkEmitter *ORE, 1918 SmallVectorImpl<Loop *> &V) { 1919 // Collect inner loops and outer loops without irreducible control flow. For 1920 // now, only collect outer loops that have explicit vectorization hints. If we 1921 // are stress testing the VPlan H-CFG construction, we collect the outermost 1922 // loop of every loop nest. 1923 if (L.isInnermost() || VPlanBuildStressTest || 1924 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1925 LoopBlocksRPO RPOT(&L); 1926 RPOT.perform(LI); 1927 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1928 V.push_back(&L); 1929 // TODO: Collect inner loops inside marked outer loops in case 1930 // vectorization fails for the outer loop. Do not invoke 1931 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1932 // already known to be reducible. We can use an inherited attribute for 1933 // that. 1934 return; 1935 } 1936 } 1937 for (Loop *InnerL : L) 1938 collectSupportedLoops(*InnerL, LI, ORE, V); 1939 } 1940 1941 namespace { 1942 1943 /// The LoopVectorize Pass. 1944 struct LoopVectorize : public FunctionPass { 1945 /// Pass identification, replacement for typeid 1946 static char ID; 1947 1948 LoopVectorizePass Impl; 1949 1950 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 1951 bool VectorizeOnlyWhenForced = false) 1952 : FunctionPass(ID), 1953 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 1954 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1955 } 1956 1957 bool runOnFunction(Function &F) override { 1958 if (skipFunction(F)) 1959 return false; 1960 1961 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1962 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1963 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1964 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1965 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1966 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1967 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 1968 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1969 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1970 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1971 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1972 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1973 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 1974 1975 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1976 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1977 1978 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1979 GetLAA, *ORE, PSI).MadeAnyChange; 1980 } 1981 1982 void getAnalysisUsage(AnalysisUsage &AU) const override { 1983 AU.addRequired<AssumptionCacheTracker>(); 1984 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1985 AU.addRequired<DominatorTreeWrapperPass>(); 1986 AU.addRequired<LoopInfoWrapperPass>(); 1987 AU.addRequired<ScalarEvolutionWrapperPass>(); 1988 AU.addRequired<TargetTransformInfoWrapperPass>(); 1989 AU.addRequired<AAResultsWrapperPass>(); 1990 AU.addRequired<LoopAccessLegacyAnalysis>(); 1991 AU.addRequired<DemandedBitsWrapperPass>(); 1992 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1993 AU.addRequired<InjectTLIMappingsLegacy>(); 1994 1995 // We currently do not preserve loopinfo/dominator analyses with outer loop 1996 // vectorization. Until this is addressed, mark these analyses as preserved 1997 // only for non-VPlan-native path. 1998 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 1999 if (!EnableVPlanNativePath) { 2000 AU.addPreserved<LoopInfoWrapperPass>(); 2001 AU.addPreserved<DominatorTreeWrapperPass>(); 2002 } 2003 2004 AU.addPreserved<BasicAAWrapperPass>(); 2005 AU.addPreserved<GlobalsAAWrapperPass>(); 2006 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2007 } 2008 }; 2009 2010 } // end anonymous namespace 2011 2012 //===----------------------------------------------------------------------===// 2013 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2014 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2015 //===----------------------------------------------------------------------===// 2016 2017 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2018 // We need to place the broadcast of invariant variables outside the loop, 2019 // but only if it's proven safe to do so. Else, broadcast will be inside 2020 // vector loop body. 2021 Instruction *Instr = dyn_cast<Instruction>(V); 2022 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2023 (!Instr || 2024 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2025 // Place the code for broadcasting invariant variables in the new preheader. 2026 IRBuilder<>::InsertPointGuard Guard(Builder); 2027 if (SafeToHoist) 2028 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2029 2030 // Broadcast the scalar into all locations in the vector. 2031 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2032 2033 return Shuf; 2034 } 2035 2036 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2037 const InductionDescriptor &II, Value *Step, Value *Start, 2038 Instruction *EntryVal, VPValue *Def, VPValue *CastDef, 2039 VPTransformState &State) { 2040 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2041 "Expected either an induction phi-node or a truncate of it!"); 2042 2043 // Construct the initial value of the vector IV in the vector loop preheader 2044 auto CurrIP = Builder.saveIP(); 2045 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2046 if (isa<TruncInst>(EntryVal)) { 2047 assert(Start->getType()->isIntegerTy() && 2048 "Truncation requires an integer type"); 2049 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2050 Step = Builder.CreateTrunc(Step, TruncType); 2051 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2052 } 2053 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2054 Value *SteppedStart = 2055 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 2056 2057 // We create vector phi nodes for both integer and floating-point induction 2058 // variables. Here, we determine the kind of arithmetic we will perform. 2059 Instruction::BinaryOps AddOp; 2060 Instruction::BinaryOps MulOp; 2061 if (Step->getType()->isIntegerTy()) { 2062 AddOp = Instruction::Add; 2063 MulOp = Instruction::Mul; 2064 } else { 2065 AddOp = II.getInductionOpcode(); 2066 MulOp = Instruction::FMul; 2067 } 2068 2069 // Multiply the vectorization factor by the step using integer or 2070 // floating-point arithmetic as appropriate. 2071 Value *ConstVF = 2072 getSignedIntOrFpConstant(Step->getType(), VF.getKnownMinValue()); 2073 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 2074 2075 // Create a vector splat to use in the induction update. 2076 // 2077 // FIXME: If the step is non-constant, we create the vector splat with 2078 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2079 // handle a constant vector splat. 2080 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2081 Value *SplatVF = isa<Constant>(Mul) 2082 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2083 : Builder.CreateVectorSplat(VF, Mul); 2084 Builder.restoreIP(CurrIP); 2085 2086 // We may need to add the step a number of times, depending on the unroll 2087 // factor. The last of those goes into the PHI. 2088 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2089 &*LoopVectorBody->getFirstInsertionPt()); 2090 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2091 Instruction *LastInduction = VecInd; 2092 for (unsigned Part = 0; Part < UF; ++Part) { 2093 State.set(Def, EntryVal, LastInduction, Part); 2094 2095 if (isa<TruncInst>(EntryVal)) 2096 addMetadata(LastInduction, EntryVal); 2097 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, 2098 State, Part); 2099 2100 LastInduction = cast<Instruction>(addFastMathFlag( 2101 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 2102 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2103 } 2104 2105 // Move the last step to the end of the latch block. This ensures consistent 2106 // placement of all induction updates. 2107 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2108 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2109 auto *ICmp = cast<Instruction>(Br->getCondition()); 2110 LastInduction->moveBefore(ICmp); 2111 LastInduction->setName("vec.ind.next"); 2112 2113 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2114 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2115 } 2116 2117 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2118 return Cost->isScalarAfterVectorization(I, VF) || 2119 Cost->isProfitableToScalarize(I, VF); 2120 } 2121 2122 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2123 if (shouldScalarizeInstruction(IV)) 2124 return true; 2125 auto isScalarInst = [&](User *U) -> bool { 2126 auto *I = cast<Instruction>(U); 2127 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2128 }; 2129 return llvm::any_of(IV->users(), isScalarInst); 2130 } 2131 2132 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2133 const InductionDescriptor &ID, const Instruction *EntryVal, 2134 Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, 2135 unsigned Part, unsigned Lane) { 2136 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2137 "Expected either an induction phi-node or a truncate of it!"); 2138 2139 // This induction variable is not the phi from the original loop but the 2140 // newly-created IV based on the proof that casted Phi is equal to the 2141 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2142 // re-uses the same InductionDescriptor that original IV uses but we don't 2143 // have to do any recording in this case - that is done when original IV is 2144 // processed. 2145 if (isa<TruncInst>(EntryVal)) 2146 return; 2147 2148 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 2149 if (Casts.empty()) 2150 return; 2151 // Only the first Cast instruction in the Casts vector is of interest. 2152 // The rest of the Casts (if exist) have no uses outside the 2153 // induction update chain itself. 2154 if (Lane < UINT_MAX) 2155 State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); 2156 else 2157 State.set(CastDef, VectorLoopVal, Part); 2158 } 2159 2160 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2161 TruncInst *Trunc, VPValue *Def, 2162 VPValue *CastDef, 2163 VPTransformState &State) { 2164 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2165 "Primary induction variable must have an integer type"); 2166 2167 auto II = Legal->getInductionVars().find(IV); 2168 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2169 2170 auto ID = II->second; 2171 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2172 2173 // The value from the original loop to which we are mapping the new induction 2174 // variable. 2175 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2176 2177 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2178 2179 // Generate code for the induction step. Note that induction steps are 2180 // required to be loop-invariant 2181 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2182 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2183 "Induction step should be loop invariant"); 2184 if (PSE.getSE()->isSCEVable(IV->getType())) { 2185 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2186 return Exp.expandCodeFor(Step, Step->getType(), 2187 LoopVectorPreHeader->getTerminator()); 2188 } 2189 return cast<SCEVUnknown>(Step)->getValue(); 2190 }; 2191 2192 // The scalar value to broadcast. This is derived from the canonical 2193 // induction variable. If a truncation type is given, truncate the canonical 2194 // induction variable and step. Otherwise, derive these values from the 2195 // induction descriptor. 2196 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2197 Value *ScalarIV = Induction; 2198 if (IV != OldInduction) { 2199 ScalarIV = IV->getType()->isIntegerTy() 2200 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2201 : Builder.CreateCast(Instruction::SIToFP, Induction, 2202 IV->getType()); 2203 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2204 ScalarIV->setName("offset.idx"); 2205 } 2206 if (Trunc) { 2207 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2208 assert(Step->getType()->isIntegerTy() && 2209 "Truncation requires an integer step"); 2210 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2211 Step = Builder.CreateTrunc(Step, TruncType); 2212 } 2213 return ScalarIV; 2214 }; 2215 2216 // Create the vector values from the scalar IV, in the absence of creating a 2217 // vector IV. 2218 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2219 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2220 for (unsigned Part = 0; Part < UF; ++Part) { 2221 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2222 Value *EntryPart = 2223 getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step, 2224 ID.getInductionOpcode()); 2225 State.set(Def, EntryVal, EntryPart, Part); 2226 if (Trunc) 2227 addMetadata(EntryPart, Trunc); 2228 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, 2229 State, Part); 2230 } 2231 }; 2232 2233 // Now do the actual transformations, and start with creating the step value. 2234 Value *Step = CreateStepValue(ID.getStep()); 2235 if (VF.isZero() || VF.isScalar()) { 2236 Value *ScalarIV = CreateScalarIV(Step); 2237 CreateSplatIV(ScalarIV, Step); 2238 return; 2239 } 2240 2241 // Determine if we want a scalar version of the induction variable. This is 2242 // true if the induction variable itself is not widened, or if it has at 2243 // least one user in the loop that is not widened. 2244 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2245 if (!NeedsScalarIV) { 2246 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2247 State); 2248 return; 2249 } 2250 2251 // Try to create a new independent vector induction variable. If we can't 2252 // create the phi node, we will splat the scalar induction variable in each 2253 // loop iteration. 2254 if (!shouldScalarizeInstruction(EntryVal)) { 2255 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2256 State); 2257 Value *ScalarIV = CreateScalarIV(Step); 2258 // Create scalar steps that can be used by instructions we will later 2259 // scalarize. Note that the addition of the scalar steps will not increase 2260 // the number of instructions in the loop in the common case prior to 2261 // InstCombine. We will be trading one vector extract for each scalar step. 2262 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2263 return; 2264 } 2265 2266 // All IV users are scalar instructions, so only emit a scalar IV, not a 2267 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2268 // predicate used by the masked loads/stores. 2269 Value *ScalarIV = CreateScalarIV(Step); 2270 if (!Cost->isScalarEpilogueAllowed()) 2271 CreateSplatIV(ScalarIV, Step); 2272 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2273 } 2274 2275 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2276 Instruction::BinaryOps BinOp) { 2277 // Create and check the types. 2278 auto *ValVTy = cast<FixedVectorType>(Val->getType()); 2279 int VLen = ValVTy->getNumElements(); 2280 2281 Type *STy = Val->getType()->getScalarType(); 2282 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2283 "Induction Step must be an integer or FP"); 2284 assert(Step->getType() == STy && "Step has wrong type"); 2285 2286 SmallVector<Constant *, 8> Indices; 2287 2288 if (STy->isIntegerTy()) { 2289 // Create a vector of consecutive numbers from zero to VF. 2290 for (int i = 0; i < VLen; ++i) 2291 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2292 2293 // Add the consecutive indices to the vector value. 2294 Constant *Cv = ConstantVector::get(Indices); 2295 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2296 Step = Builder.CreateVectorSplat(VLen, Step); 2297 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2298 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2299 // which can be found from the original scalar operations. 2300 Step = Builder.CreateMul(Cv, Step); 2301 return Builder.CreateAdd(Val, Step, "induction"); 2302 } 2303 2304 // Floating point induction. 2305 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2306 "Binary Opcode should be specified for FP induction"); 2307 // Create a vector of consecutive numbers from zero to VF. 2308 for (int i = 0; i < VLen; ++i) 2309 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2310 2311 // Add the consecutive indices to the vector value. 2312 Constant *Cv = ConstantVector::get(Indices); 2313 2314 Step = Builder.CreateVectorSplat(VLen, Step); 2315 2316 // Floating point operations had to be 'fast' to enable the induction. 2317 FastMathFlags Flags; 2318 Flags.setFast(); 2319 2320 Value *MulOp = Builder.CreateFMul(Cv, Step); 2321 if (isa<Instruction>(MulOp)) 2322 // Have to check, MulOp may be a constant 2323 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2324 2325 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2326 if (isa<Instruction>(BOp)) 2327 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2328 return BOp; 2329 } 2330 2331 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2332 Instruction *EntryVal, 2333 const InductionDescriptor &ID, 2334 VPValue *Def, VPValue *CastDef, 2335 VPTransformState &State) { 2336 // We shouldn't have to build scalar steps if we aren't vectorizing. 2337 assert(VF.isVector() && "VF should be greater than one"); 2338 // Get the value type and ensure it and the step have the same integer type. 2339 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2340 assert(ScalarIVTy == Step->getType() && 2341 "Val and Step should have the same type"); 2342 2343 // We build scalar steps for both integer and floating-point induction 2344 // variables. Here, we determine the kind of arithmetic we will perform. 2345 Instruction::BinaryOps AddOp; 2346 Instruction::BinaryOps MulOp; 2347 if (ScalarIVTy->isIntegerTy()) { 2348 AddOp = Instruction::Add; 2349 MulOp = Instruction::Mul; 2350 } else { 2351 AddOp = ID.getInductionOpcode(); 2352 MulOp = Instruction::FMul; 2353 } 2354 2355 // Determine the number of scalars we need to generate for each unroll 2356 // iteration. If EntryVal is uniform, we only need to generate the first 2357 // lane. Otherwise, we generate all VF values. 2358 unsigned Lanes = 2359 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) 2360 ? 1 2361 : VF.getKnownMinValue(); 2362 assert((!VF.isScalable() || Lanes == 1) && 2363 "Should never scalarize a scalable vector"); 2364 // Compute the scalar steps and save the results in VectorLoopValueMap. 2365 for (unsigned Part = 0; Part < UF; ++Part) { 2366 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2367 auto *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2368 ScalarIVTy->getScalarSizeInBits()); 2369 Value *StartIdx = 2370 createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF); 2371 if (ScalarIVTy->isFloatingPointTy()) 2372 StartIdx = Builder.CreateSIToFP(StartIdx, ScalarIVTy); 2373 StartIdx = addFastMathFlag(Builder.CreateBinOp( 2374 AddOp, StartIdx, getSignedIntOrFpConstant(ScalarIVTy, Lane))); 2375 // The step returned by `createStepForVF` is a runtime-evaluated value 2376 // when VF is scalable. Otherwise, it should be folded into a Constant. 2377 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2378 "Expected StartIdx to be folded to a constant when VF is not " 2379 "scalable"); 2380 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 2381 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 2382 State.set(Def, Add, VPIteration(Part, Lane)); 2383 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2384 Part, Lane); 2385 } 2386 } 2387 } 2388 2389 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 2390 assert(V != Induction && "The new induction variable should not be used."); 2391 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2392 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2393 2394 // If we have a stride that is replaced by one, do it here. Defer this for 2395 // the VPlan-native path until we start running Legal checks in that path. 2396 if (!EnableVPlanNativePath && Legal->hasStride(V)) 2397 V = ConstantInt::get(V->getType(), 1); 2398 2399 // If we have a vector mapped to this value, return it. 2400 if (VectorLoopValueMap.hasVectorValue(V, Part)) 2401 return VectorLoopValueMap.getVectorValue(V, Part); 2402 2403 // If the value has not been vectorized, check if it has been scalarized 2404 // instead. If it has been scalarized, and we actually need the value in 2405 // vector form, we will construct the vector values on demand. 2406 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 2407 Value *ScalarValue = 2408 VectorLoopValueMap.getScalarValue(V, VPIteration(Part, 0)); 2409 2410 // If we've scalarized a value, that value should be an instruction. 2411 auto *I = cast<Instruction>(V); 2412 2413 // If we aren't vectorizing, we can just copy the scalar map values over to 2414 // the vector map. 2415 if (VF.isScalar()) { 2416 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 2417 return ScalarValue; 2418 } 2419 2420 // Get the last scalar instruction we generated for V and Part. If the value 2421 // is known to be uniform after vectorization, this corresponds to lane zero 2422 // of the Part unroll iteration. Otherwise, the last instruction is the one 2423 // we created for the last vector lane of the Part unroll iteration. 2424 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) 2425 ? 0 2426 : VF.getKnownMinValue() - 1; 2427 assert((!VF.isScalable() || LastLane == 0) && 2428 "Scalable vectorization can't lead to any scalarized values."); 2429 auto *LastInst = cast<Instruction>( 2430 VectorLoopValueMap.getScalarValue(V, VPIteration(Part, LastLane))); 2431 2432 // Set the insert point after the last scalarized instruction. This ensures 2433 // the insertelement sequence will directly follow the scalar definitions. 2434 auto OldIP = Builder.saveIP(); 2435 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2436 Builder.SetInsertPoint(&*NewIP); 2437 2438 // However, if we are vectorizing, we need to construct the vector values. 2439 // If the value is known to be uniform after vectorization, we can just 2440 // broadcast the scalar value corresponding to lane zero for each unroll 2441 // iteration. Otherwise, we construct the vector values using insertelement 2442 // instructions. Since the resulting vectors are stored in 2443 // VectorLoopValueMap, we will only generate the insertelements once. 2444 Value *VectorValue = nullptr; 2445 if (Cost->isUniformAfterVectorization(I, VF)) { 2446 VectorValue = getBroadcastInstrs(ScalarValue); 2447 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 2448 } else { 2449 // Initialize packing with insertelements to start from poison. 2450 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2451 Value *Poison = PoisonValue::get(VectorType::get(V->getType(), VF)); 2452 VectorLoopValueMap.setVectorValue(V, Part, Poison); 2453 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 2454 packScalarIntoVectorValue(V, VPIteration(Part, Lane)); 2455 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 2456 } 2457 Builder.restoreIP(OldIP); 2458 return VectorValue; 2459 } 2460 2461 // If this scalar is unknown, assume that it is a constant or that it is 2462 // loop invariant. Broadcast V and save the value for future uses. 2463 Value *B = getBroadcastInstrs(V); 2464 VectorLoopValueMap.setVectorValue(V, Part, B); 2465 return B; 2466 } 2467 2468 Value * 2469 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 2470 const VPIteration &Instance) { 2471 // If the value is not an instruction contained in the loop, it should 2472 // already be scalar. 2473 if (OrigLoop->isLoopInvariant(V)) 2474 return V; 2475 2476 assert(Instance.Lane > 0 2477 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 2478 : true && "Uniform values only have lane zero"); 2479 2480 // If the value from the original loop has not been vectorized, it is 2481 // represented by UF x VF scalar values in the new loop. Return the requested 2482 // scalar value. 2483 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 2484 return VectorLoopValueMap.getScalarValue(V, Instance); 2485 2486 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2487 // for the given unroll part. If this entry is not a vector type (i.e., the 2488 // vectorization factor is one), there is no need to generate an 2489 // extractelement instruction. 2490 auto *U = getOrCreateVectorValue(V, Instance.Part); 2491 if (!U->getType()->isVectorTy()) { 2492 assert(VF.isScalar() && "Value not scalarized has non-vector type"); 2493 return U; 2494 } 2495 2496 // Otherwise, the value from the original loop has been vectorized and is 2497 // represented by UF vector values. Extract and return the requested scalar 2498 // value from the appropriate vector lane. 2499 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 2500 } 2501 2502 void InnerLoopVectorizer::packScalarIntoVectorValue( 2503 Value *V, const VPIteration &Instance) { 2504 assert(V != Induction && "The new induction variable should not be used."); 2505 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 2506 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2507 2508 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 2509 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 2510 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 2511 Builder.getInt32(Instance.Lane)); 2512 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 2513 } 2514 2515 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2516 const VPIteration &Instance, 2517 VPTransformState &State) { 2518 Value *ScalarInst = State.get(Def, Instance); 2519 Value *VectorValue = State.get(Def, Instance.Part); 2520 VectorValue = Builder.CreateInsertElement( 2521 VectorValue, ScalarInst, State.Builder.getInt32(Instance.Lane)); 2522 State.set(Def, VectorValue, Instance.Part); 2523 } 2524 2525 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2526 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2527 assert(!VF.isScalable() && "Cannot reverse scalable vectors"); 2528 SmallVector<int, 8> ShuffleMask; 2529 for (unsigned i = 0; i < VF.getKnownMinValue(); ++i) 2530 ShuffleMask.push_back(VF.getKnownMinValue() - i - 1); 2531 2532 return Builder.CreateShuffleVector(Vec, ShuffleMask, "reverse"); 2533 } 2534 2535 // Return whether we allow using masked interleave-groups (for dealing with 2536 // strided loads/stores that reside in predicated blocks, or for dealing 2537 // with gaps). 2538 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2539 // If an override option has been passed in for interleaved accesses, use it. 2540 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2541 return EnableMaskedInterleavedMemAccesses; 2542 2543 return TTI.enableMaskedInterleavedAccessVectorization(); 2544 } 2545 2546 // Try to vectorize the interleave group that \p Instr belongs to. 2547 // 2548 // E.g. Translate following interleaved load group (factor = 3): 2549 // for (i = 0; i < N; i+=3) { 2550 // R = Pic[i]; // Member of index 0 2551 // G = Pic[i+1]; // Member of index 1 2552 // B = Pic[i+2]; // Member of index 2 2553 // ... // do something to R, G, B 2554 // } 2555 // To: 2556 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2557 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2558 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2559 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2560 // 2561 // Or translate following interleaved store group (factor = 3): 2562 // for (i = 0; i < N; i+=3) { 2563 // ... do something to R, G, B 2564 // Pic[i] = R; // Member of index 0 2565 // Pic[i+1] = G; // Member of index 1 2566 // Pic[i+2] = B; // Member of index 2 2567 // } 2568 // To: 2569 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2570 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2571 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2572 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2573 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2574 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2575 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2576 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2577 VPValue *BlockInMask) { 2578 Instruction *Instr = Group->getInsertPos(); 2579 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2580 2581 // Prepare for the vector type of the interleaved load/store. 2582 Type *ScalarTy = getMemInstValueType(Instr); 2583 unsigned InterleaveFactor = Group->getFactor(); 2584 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2585 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2586 2587 // Prepare for the new pointers. 2588 SmallVector<Value *, 2> AddrParts; 2589 unsigned Index = Group->getIndex(Instr); 2590 2591 // TODO: extend the masked interleaved-group support to reversed access. 2592 assert((!BlockInMask || !Group->isReverse()) && 2593 "Reversed masked interleave-group not supported."); 2594 2595 // If the group is reverse, adjust the index to refer to the last vector lane 2596 // instead of the first. We adjust the index from the first vector lane, 2597 // rather than directly getting the pointer for lane VF - 1, because the 2598 // pointer operand of the interleaved access is supposed to be uniform. For 2599 // uniform instructions, we're only required to generate a value for the 2600 // first vector lane in each unroll iteration. 2601 assert(!VF.isScalable() && 2602 "scalable vector reverse operation is not implemented"); 2603 if (Group->isReverse()) 2604 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2605 2606 for (unsigned Part = 0; Part < UF; Part++) { 2607 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2608 setDebugLocFromInst(Builder, AddrPart); 2609 2610 // Notice current instruction could be any index. Need to adjust the address 2611 // to the member of index 0. 2612 // 2613 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2614 // b = A[i]; // Member of index 0 2615 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2616 // 2617 // E.g. A[i+1] = a; // Member of index 1 2618 // A[i] = b; // Member of index 0 2619 // A[i+2] = c; // Member of index 2 (Current instruction) 2620 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2621 2622 bool InBounds = false; 2623 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2624 InBounds = gep->isInBounds(); 2625 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2626 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2627 2628 // Cast to the vector pointer type. 2629 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2630 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2631 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2632 } 2633 2634 setDebugLocFromInst(Builder, Instr); 2635 Value *PoisonVec = PoisonValue::get(VecTy); 2636 2637 Value *MaskForGaps = nullptr; 2638 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2639 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2640 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2641 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2642 } 2643 2644 // Vectorize the interleaved load group. 2645 if (isa<LoadInst>(Instr)) { 2646 // For each unroll part, create a wide load for the group. 2647 SmallVector<Value *, 2> NewLoads; 2648 for (unsigned Part = 0; Part < UF; Part++) { 2649 Instruction *NewLoad; 2650 if (BlockInMask || MaskForGaps) { 2651 assert(useMaskedInterleavedAccesses(*TTI) && 2652 "masked interleaved groups are not allowed."); 2653 Value *GroupMask = MaskForGaps; 2654 if (BlockInMask) { 2655 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2656 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2657 Value *ShuffledMask = Builder.CreateShuffleVector( 2658 BlockInMaskPart, 2659 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2660 "interleaved.mask"); 2661 GroupMask = MaskForGaps 2662 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2663 MaskForGaps) 2664 : ShuffledMask; 2665 } 2666 NewLoad = 2667 Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(), 2668 GroupMask, PoisonVec, "wide.masked.vec"); 2669 } 2670 else 2671 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2672 Group->getAlign(), "wide.vec"); 2673 Group->addMetadata(NewLoad); 2674 NewLoads.push_back(NewLoad); 2675 } 2676 2677 // For each member in the group, shuffle out the appropriate data from the 2678 // wide loads. 2679 unsigned J = 0; 2680 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2681 Instruction *Member = Group->getMember(I); 2682 2683 // Skip the gaps in the group. 2684 if (!Member) 2685 continue; 2686 2687 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2688 auto StrideMask = 2689 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2690 for (unsigned Part = 0; Part < UF; Part++) { 2691 Value *StridedVec = Builder.CreateShuffleVector( 2692 NewLoads[Part], StrideMask, "strided.vec"); 2693 2694 // If this member has different type, cast the result type. 2695 if (Member->getType() != ScalarTy) { 2696 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2697 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2698 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2699 } 2700 2701 if (Group->isReverse()) 2702 StridedVec = reverseVector(StridedVec); 2703 2704 State.set(VPDefs[J], Member, StridedVec, Part); 2705 } 2706 ++J; 2707 } 2708 return; 2709 } 2710 2711 // The sub vector type for current instruction. 2712 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2713 auto *SubVT = VectorType::get(ScalarTy, VF); 2714 2715 // Vectorize the interleaved store group. 2716 for (unsigned Part = 0; Part < UF; Part++) { 2717 // Collect the stored vector from each member. 2718 SmallVector<Value *, 4> StoredVecs; 2719 for (unsigned i = 0; i < InterleaveFactor; i++) { 2720 // Interleaved store group doesn't allow a gap, so each index has a member 2721 assert(Group->getMember(i) && "Fail to get a member from an interleaved store group"); 2722 2723 Value *StoredVec = State.get(StoredValues[i], Part); 2724 2725 if (Group->isReverse()) 2726 StoredVec = reverseVector(StoredVec); 2727 2728 // If this member has different type, cast it to a unified type. 2729 2730 if (StoredVec->getType() != SubVT) 2731 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2732 2733 StoredVecs.push_back(StoredVec); 2734 } 2735 2736 // Concatenate all vectors into a wide vector. 2737 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2738 2739 // Interleave the elements in the wide vector. 2740 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2741 Value *IVec = Builder.CreateShuffleVector( 2742 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2743 "interleaved.vec"); 2744 2745 Instruction *NewStoreInstr; 2746 if (BlockInMask) { 2747 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2748 Value *ShuffledMask = Builder.CreateShuffleVector( 2749 BlockInMaskPart, 2750 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2751 "interleaved.mask"); 2752 NewStoreInstr = Builder.CreateMaskedStore( 2753 IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); 2754 } 2755 else 2756 NewStoreInstr = 2757 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2758 2759 Group->addMetadata(NewStoreInstr); 2760 } 2761 } 2762 2763 void InnerLoopVectorizer::vectorizeMemoryInstruction( 2764 Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, 2765 VPValue *StoredValue, VPValue *BlockInMask) { 2766 // Attempt to issue a wide load. 2767 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2768 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2769 2770 assert((LI || SI) && "Invalid Load/Store instruction"); 2771 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2772 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2773 2774 LoopVectorizationCostModel::InstWidening Decision = 2775 Cost->getWideningDecision(Instr, VF); 2776 assert((Decision == LoopVectorizationCostModel::CM_Widen || 2777 Decision == LoopVectorizationCostModel::CM_Widen_Reverse || 2778 Decision == LoopVectorizationCostModel::CM_GatherScatter) && 2779 "CM decision is not to widen the memory instruction"); 2780 2781 Type *ScalarDataTy = getMemInstValueType(Instr); 2782 2783 auto *DataTy = VectorType::get(ScalarDataTy, VF); 2784 const Align Alignment = getLoadStoreAlignment(Instr); 2785 2786 // Determine if the pointer operand of the access is either consecutive or 2787 // reverse consecutive. 2788 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2789 bool ConsecutiveStride = 2790 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2791 bool CreateGatherScatter = 2792 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2793 2794 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2795 // gather/scatter. Otherwise Decision should have been to Scalarize. 2796 assert((ConsecutiveStride || CreateGatherScatter) && 2797 "The instruction should be scalarized"); 2798 (void)ConsecutiveStride; 2799 2800 VectorParts BlockInMaskParts(UF); 2801 bool isMaskRequired = BlockInMask; 2802 if (isMaskRequired) 2803 for (unsigned Part = 0; Part < UF; ++Part) 2804 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2805 2806 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2807 // Calculate the pointer for the specific unroll-part. 2808 GetElementPtrInst *PartPtr = nullptr; 2809 2810 bool InBounds = false; 2811 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2812 InBounds = gep->isInBounds(); 2813 2814 if (Reverse) { 2815 assert(!VF.isScalable() && 2816 "Reversing vectors is not yet supported for scalable vectors."); 2817 2818 // If the address is consecutive but reversed, then the 2819 // wide store needs to start at the last vector element. 2820 PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP( 2821 ScalarDataTy, Ptr, Builder.getInt32(-Part * VF.getKnownMinValue()))); 2822 PartPtr->setIsInBounds(InBounds); 2823 PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP( 2824 ScalarDataTy, PartPtr, Builder.getInt32(1 - VF.getKnownMinValue()))); 2825 PartPtr->setIsInBounds(InBounds); 2826 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2827 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2828 } else { 2829 Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF); 2830 PartPtr = cast<GetElementPtrInst>( 2831 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 2832 PartPtr->setIsInBounds(InBounds); 2833 } 2834 2835 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2836 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2837 }; 2838 2839 // Handle Stores: 2840 if (SI) { 2841 setDebugLocFromInst(Builder, SI); 2842 2843 for (unsigned Part = 0; Part < UF; ++Part) { 2844 Instruction *NewSI = nullptr; 2845 Value *StoredVal = State.get(StoredValue, Part); 2846 if (CreateGatherScatter) { 2847 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2848 Value *VectorGep = State.get(Addr, Part); 2849 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2850 MaskPart); 2851 } else { 2852 if (Reverse) { 2853 // If we store to reverse consecutive memory locations, then we need 2854 // to reverse the order of elements in the stored value. 2855 StoredVal = reverseVector(StoredVal); 2856 // We don't want to update the value in the map as it might be used in 2857 // another expression. So don't call resetVectorValue(StoredVal). 2858 } 2859 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 2860 if (isMaskRequired) 2861 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2862 BlockInMaskParts[Part]); 2863 else 2864 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2865 } 2866 addMetadata(NewSI, SI); 2867 } 2868 return; 2869 } 2870 2871 // Handle loads. 2872 assert(LI && "Must have a load instruction"); 2873 setDebugLocFromInst(Builder, LI); 2874 for (unsigned Part = 0; Part < UF; ++Part) { 2875 Value *NewLI; 2876 if (CreateGatherScatter) { 2877 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2878 Value *VectorGep = State.get(Addr, Part); 2879 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2880 nullptr, "wide.masked.gather"); 2881 addMetadata(NewLI, LI); 2882 } else { 2883 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 2884 if (isMaskRequired) 2885 NewLI = Builder.CreateMaskedLoad( 2886 VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy), 2887 "wide.masked.load"); 2888 else 2889 NewLI = 2890 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2891 2892 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2893 addMetadata(NewLI, LI); 2894 if (Reverse) 2895 NewLI = reverseVector(NewLI); 2896 } 2897 2898 State.set(Def, Instr, NewLI, Part); 2899 } 2900 } 2901 2902 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPUser &User, 2903 const VPIteration &Instance, 2904 bool IfPredicateInstr, 2905 VPTransformState &State) { 2906 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2907 2908 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2909 // the first lane and part. 2910 if (isa<NoAliasScopeDeclInst>(Instr)) 2911 if (!Instance.isFirstIteration()) 2912 return; 2913 2914 setDebugLocFromInst(Builder, Instr); 2915 2916 // Does this instruction return a value ? 2917 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2918 2919 Instruction *Cloned = Instr->clone(); 2920 if (!IsVoidRetTy) 2921 Cloned->setName(Instr->getName() + ".cloned"); 2922 2923 // Replace the operands of the cloned instructions with their scalar 2924 // equivalents in the new loop. 2925 for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { 2926 auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); 2927 auto InputInstance = Instance; 2928 if (!Operand || !OrigLoop->contains(Operand) || 2929 (Cost->isUniformAfterVectorization(Operand, State.VF))) 2930 InputInstance.Lane = 0; 2931 auto *NewOp = State.get(User.getOperand(op), InputInstance); 2932 Cloned->setOperand(op, NewOp); 2933 } 2934 addNewMetadata(Cloned, Instr); 2935 2936 // Place the cloned scalar in the new loop. 2937 Builder.Insert(Cloned); 2938 2939 // TODO: Set result for VPValue of VPReciplicateRecipe. This requires 2940 // representing scalar values in VPTransformState. Add the cloned scalar to 2941 // the scalar map entry. 2942 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 2943 2944 // If we just cloned a new assumption, add it the assumption cache. 2945 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2946 if (II->getIntrinsicID() == Intrinsic::assume) 2947 AC->registerAssumption(II); 2948 2949 // End if-block. 2950 if (IfPredicateInstr) 2951 PredicatedInstructions.push_back(Cloned); 2952 } 2953 2954 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2955 Value *End, Value *Step, 2956 Instruction *DL) { 2957 BasicBlock *Header = L->getHeader(); 2958 BasicBlock *Latch = L->getLoopLatch(); 2959 // As we're just creating this loop, it's possible no latch exists 2960 // yet. If so, use the header as this will be a single block loop. 2961 if (!Latch) 2962 Latch = Header; 2963 2964 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2965 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2966 setDebugLocFromInst(Builder, OldInst); 2967 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2968 2969 Builder.SetInsertPoint(Latch->getTerminator()); 2970 setDebugLocFromInst(Builder, OldInst); 2971 2972 // Create i+1 and fill the PHINode. 2973 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2974 Induction->addIncoming(Start, L->getLoopPreheader()); 2975 Induction->addIncoming(Next, Latch); 2976 // Create the compare. 2977 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2978 Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 2979 2980 // Now we have two terminators. Remove the old one from the block. 2981 Latch->getTerminator()->eraseFromParent(); 2982 2983 return Induction; 2984 } 2985 2986 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2987 if (TripCount) 2988 return TripCount; 2989 2990 assert(L && "Create Trip Count for null loop."); 2991 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2992 // Find the loop boundaries. 2993 ScalarEvolution *SE = PSE.getSE(); 2994 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2995 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 2996 "Invalid loop count"); 2997 2998 Type *IdxTy = Legal->getWidestInductionType(); 2999 assert(IdxTy && "No type for induction"); 3000 3001 // The exit count might have the type of i64 while the phi is i32. This can 3002 // happen if we have an induction variable that is sign extended before the 3003 // compare. The only way that we get a backedge taken count is that the 3004 // induction variable was signed and as such will not overflow. In such a case 3005 // truncation is legal. 3006 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3007 IdxTy->getPrimitiveSizeInBits()) 3008 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3009 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3010 3011 // Get the total trip count from the count by adding 1. 3012 const SCEV *ExitCount = SE->getAddExpr( 3013 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3014 3015 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3016 3017 // Expand the trip count and place the new instructions in the preheader. 3018 // Notice that the pre-header does not change, only the loop body. 3019 SCEVExpander Exp(*SE, DL, "induction"); 3020 3021 // Count holds the overall loop count (N). 3022 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3023 L->getLoopPreheader()->getTerminator()); 3024 3025 if (TripCount->getType()->isPointerTy()) 3026 TripCount = 3027 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3028 L->getLoopPreheader()->getTerminator()); 3029 3030 return TripCount; 3031 } 3032 3033 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3034 if (VectorTripCount) 3035 return VectorTripCount; 3036 3037 Value *TC = getOrCreateTripCount(L); 3038 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3039 3040 Type *Ty = TC->getType(); 3041 // This is where we can make the step a runtime constant. 3042 Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF); 3043 3044 // If the tail is to be folded by masking, round the number of iterations N 3045 // up to a multiple of Step instead of rounding down. This is done by first 3046 // adding Step-1 and then rounding down. Note that it's ok if this addition 3047 // overflows: the vector induction variable will eventually wrap to zero given 3048 // that it starts at zero and its Step is a power of two; the loop will then 3049 // exit, with the last early-exit vector comparison also producing all-true. 3050 if (Cost->foldTailByMasking()) { 3051 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3052 "VF*UF must be a power of 2 when folding tail by masking"); 3053 assert(!VF.isScalable() && 3054 "Tail folding not yet supported for scalable vectors"); 3055 TC = Builder.CreateAdd( 3056 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 3057 } 3058 3059 // Now we need to generate the expression for the part of the loop that the 3060 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3061 // iterations are not required for correctness, or N - Step, otherwise. Step 3062 // is equal to the vectorization factor (number of SIMD elements) times the 3063 // unroll factor (number of SIMD instructions). 3064 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3065 3066 // There are two cases where we need to ensure (at least) the last iteration 3067 // runs in the scalar remainder loop. Thus, if the step evenly divides 3068 // the trip count, we set the remainder to be equal to the step. If the step 3069 // does not evenly divide the trip count, no adjustment is necessary since 3070 // there will already be scalar iterations. Note that the minimum iterations 3071 // check ensures that N >= Step. The cases are: 3072 // 1) If there is a non-reversed interleaved group that may speculatively 3073 // access memory out-of-bounds. 3074 // 2) If any instruction may follow a conditionally taken exit. That is, if 3075 // the loop contains multiple exiting blocks, or a single exiting block 3076 // which is not the latch. 3077 if (VF.isVector() && Cost->requiresScalarEpilogue()) { 3078 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3079 R = Builder.CreateSelect(IsZero, Step, R); 3080 } 3081 3082 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3083 3084 return VectorTripCount; 3085 } 3086 3087 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3088 const DataLayout &DL) { 3089 // Verify that V is a vector type with same number of elements as DstVTy. 3090 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3091 unsigned VF = DstFVTy->getNumElements(); 3092 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3093 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3094 Type *SrcElemTy = SrcVecTy->getElementType(); 3095 Type *DstElemTy = DstFVTy->getElementType(); 3096 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3097 "Vector elements must have same size"); 3098 3099 // Do a direct cast if element types are castable. 3100 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3101 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3102 } 3103 // V cannot be directly casted to desired vector type. 3104 // May happen when V is a floating point vector but DstVTy is a vector of 3105 // pointers or vice-versa. Handle this using a two-step bitcast using an 3106 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3107 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3108 "Only one type should be a pointer type"); 3109 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3110 "Only one type should be a floating point type"); 3111 Type *IntTy = 3112 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3113 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3114 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3115 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3116 } 3117 3118 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3119 BasicBlock *Bypass) { 3120 Value *Count = getOrCreateTripCount(L); 3121 // Reuse existing vector loop preheader for TC checks. 3122 // Note that new preheader block is generated for vector loop. 3123 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3124 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3125 3126 // Generate code to check if the loop's trip count is less than VF * UF, or 3127 // equal to it in case a scalar epilogue is required; this implies that the 3128 // vector trip count is zero. This check also covers the case where adding one 3129 // to the backedge-taken count overflowed leading to an incorrect trip count 3130 // of zero. In this case we will also jump to the scalar loop. 3131 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 3132 : ICmpInst::ICMP_ULT; 3133 3134 // If tail is to be folded, vector loop takes care of all iterations. 3135 Value *CheckMinIters = Builder.getFalse(); 3136 if (!Cost->foldTailByMasking()) { 3137 Value *Step = 3138 createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF); 3139 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3140 } 3141 // Create new preheader for vector loop. 3142 LoopVectorPreHeader = 3143 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3144 "vector.ph"); 3145 3146 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3147 DT->getNode(Bypass)->getIDom()) && 3148 "TC check is expected to dominate Bypass"); 3149 3150 // Update dominator for Bypass & LoopExit. 3151 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3152 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3153 3154 ReplaceInstWithInst( 3155 TCCheckBlock->getTerminator(), 3156 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3157 LoopBypassBlocks.push_back(TCCheckBlock); 3158 } 3159 3160 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3161 // Reuse existing vector loop preheader for SCEV checks. 3162 // Note that new preheader block is generated for vector loop. 3163 BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader; 3164 3165 // Generate the code to check that the SCEV assumptions that we made. 3166 // We want the new basic block to start at the first instruction in a 3167 // sequence of instructions that form a check. 3168 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3169 "scev.check"); 3170 Value *SCEVCheck = Exp.expandCodeForPredicate( 3171 &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator()); 3172 3173 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3174 if (C->isZero()) 3175 return; 3176 3177 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3178 (OptForSizeBasedOnProfile && 3179 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3180 "Cannot SCEV check stride or overflow when optimizing for size"); 3181 3182 SCEVCheckBlock->setName("vector.scevcheck"); 3183 // Create new preheader for vector loop. 3184 LoopVectorPreHeader = 3185 SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI, 3186 nullptr, "vector.ph"); 3187 3188 // Update dominator only if this is first RT check. 3189 if (LoopBypassBlocks.empty()) { 3190 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3191 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3192 } 3193 3194 ReplaceInstWithInst( 3195 SCEVCheckBlock->getTerminator(), 3196 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck)); 3197 LoopBypassBlocks.push_back(SCEVCheckBlock); 3198 AddedSafetyChecks = true; 3199 } 3200 3201 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3202 // VPlan-native path does not do any analysis for runtime checks currently. 3203 if (EnableVPlanNativePath) 3204 return; 3205 3206 // Reuse existing vector loop preheader for runtime memory checks. 3207 // Note that new preheader block is generated for vector loop. 3208 BasicBlock *const MemCheckBlock = L->getLoopPreheader(); 3209 3210 // Generate the code that checks in runtime if arrays overlap. We put the 3211 // checks into a separate block to make the more common case of few elements 3212 // faster. 3213 auto *LAI = Legal->getLAI(); 3214 const auto &RtPtrChecking = *LAI->getRuntimePointerChecking(); 3215 if (!RtPtrChecking.Need) 3216 return; 3217 3218 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3219 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3220 "Cannot emit memory checks when optimizing for size, unless forced " 3221 "to vectorize."); 3222 ORE->emit([&]() { 3223 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3224 L->getStartLoc(), L->getHeader()) 3225 << "Code-size may be reduced by not forcing " 3226 "vectorization, or by source-code modifications " 3227 "eliminating the need for runtime checks " 3228 "(e.g., adding 'restrict')."; 3229 }); 3230 } 3231 3232 MemCheckBlock->setName("vector.memcheck"); 3233 // Create new preheader for vector loop. 3234 LoopVectorPreHeader = 3235 SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr, 3236 "vector.ph"); 3237 3238 auto *CondBranch = cast<BranchInst>( 3239 Builder.CreateCondBr(Builder.getTrue(), Bypass, LoopVectorPreHeader)); 3240 ReplaceInstWithInst(MemCheckBlock->getTerminator(), CondBranch); 3241 LoopBypassBlocks.push_back(MemCheckBlock); 3242 AddedSafetyChecks = true; 3243 3244 // Update dominator only if this is first RT check. 3245 if (LoopBypassBlocks.empty()) { 3246 DT->changeImmediateDominator(Bypass, MemCheckBlock); 3247 DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock); 3248 } 3249 3250 Instruction *FirstCheckInst; 3251 Instruction *MemRuntimeCheck; 3252 SCEVExpander Exp(*PSE.getSE(), MemCheckBlock->getModule()->getDataLayout(), 3253 "induction"); 3254 std::tie(FirstCheckInst, MemRuntimeCheck) = addRuntimeChecks( 3255 MemCheckBlock->getTerminator(), OrigLoop, RtPtrChecking.getChecks(), Exp); 3256 assert(MemRuntimeCheck && "no RT checks generated although RtPtrChecking " 3257 "claimed checks are required"); 3258 CondBranch->setCondition(MemRuntimeCheck); 3259 3260 // We currently don't use LoopVersioning for the actual loop cloning but we 3261 // still use it to add the noalias metadata. 3262 LVer = std::make_unique<LoopVersioning>( 3263 *Legal->getLAI(), 3264 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3265 DT, PSE.getSE()); 3266 LVer->prepareNoAliasMetadata(); 3267 } 3268 3269 Value *InnerLoopVectorizer::emitTransformedIndex( 3270 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3271 const InductionDescriptor &ID) const { 3272 3273 SCEVExpander Exp(*SE, DL, "induction"); 3274 auto Step = ID.getStep(); 3275 auto StartValue = ID.getStartValue(); 3276 assert(Index->getType() == Step->getType() && 3277 "Index type does not match StepValue type"); 3278 3279 // Note: the IR at this point is broken. We cannot use SE to create any new 3280 // SCEV and then expand it, hoping that SCEV's simplification will give us 3281 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3282 // lead to various SCEV crashes. So all we can do is to use builder and rely 3283 // on InstCombine for future simplifications. Here we handle some trivial 3284 // cases only. 3285 auto CreateAdd = [&B](Value *X, Value *Y) { 3286 assert(X->getType() == Y->getType() && "Types don't match!"); 3287 if (auto *CX = dyn_cast<ConstantInt>(X)) 3288 if (CX->isZero()) 3289 return Y; 3290 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3291 if (CY->isZero()) 3292 return X; 3293 return B.CreateAdd(X, Y); 3294 }; 3295 3296 auto CreateMul = [&B](Value *X, Value *Y) { 3297 assert(X->getType() == Y->getType() && "Types don't match!"); 3298 if (auto *CX = dyn_cast<ConstantInt>(X)) 3299 if (CX->isOne()) 3300 return Y; 3301 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3302 if (CY->isOne()) 3303 return X; 3304 return B.CreateMul(X, Y); 3305 }; 3306 3307 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3308 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3309 // the DomTree is not kept up-to-date for additional blocks generated in the 3310 // vector loop. By using the header as insertion point, we guarantee that the 3311 // expanded instructions dominate all their uses. 3312 auto GetInsertPoint = [this, &B]() { 3313 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3314 if (InsertBB != LoopVectorBody && 3315 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3316 return LoopVectorBody->getTerminator(); 3317 return &*B.GetInsertPoint(); 3318 }; 3319 switch (ID.getKind()) { 3320 case InductionDescriptor::IK_IntInduction: { 3321 assert(Index->getType() == StartValue->getType() && 3322 "Index type does not match StartValue type"); 3323 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3324 return B.CreateSub(StartValue, Index); 3325 auto *Offset = CreateMul( 3326 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3327 return CreateAdd(StartValue, Offset); 3328 } 3329 case InductionDescriptor::IK_PtrInduction: { 3330 assert(isa<SCEVConstant>(Step) && 3331 "Expected constant step for pointer induction"); 3332 return B.CreateGEP( 3333 StartValue->getType()->getPointerElementType(), StartValue, 3334 CreateMul(Index, 3335 Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()))); 3336 } 3337 case InductionDescriptor::IK_FpInduction: { 3338 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3339 auto InductionBinOp = ID.getInductionBinOp(); 3340 assert(InductionBinOp && 3341 (InductionBinOp->getOpcode() == Instruction::FAdd || 3342 InductionBinOp->getOpcode() == Instruction::FSub) && 3343 "Original bin op should be defined for FP induction"); 3344 3345 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3346 3347 // Floating point operations had to be 'fast' to enable the induction. 3348 FastMathFlags Flags; 3349 Flags.setFast(); 3350 3351 Value *MulExp = B.CreateFMul(StepValue, Index); 3352 if (isa<Instruction>(MulExp)) 3353 // We have to check, the MulExp may be a constant. 3354 cast<Instruction>(MulExp)->setFastMathFlags(Flags); 3355 3356 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3357 "induction"); 3358 if (isa<Instruction>(BOp)) 3359 cast<Instruction>(BOp)->setFastMathFlags(Flags); 3360 3361 return BOp; 3362 } 3363 case InductionDescriptor::IK_NoInduction: 3364 return nullptr; 3365 } 3366 llvm_unreachable("invalid enum"); 3367 } 3368 3369 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3370 LoopScalarBody = OrigLoop->getHeader(); 3371 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3372 LoopExitBlock = OrigLoop->getUniqueExitBlock(); 3373 assert(LoopExitBlock && "Must have an exit block"); 3374 assert(LoopVectorPreHeader && "Invalid loop structure"); 3375 3376 LoopMiddleBlock = 3377 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3378 LI, nullptr, Twine(Prefix) + "middle.block"); 3379 LoopScalarPreHeader = 3380 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3381 nullptr, Twine(Prefix) + "scalar.ph"); 3382 3383 // Set up branch from middle block to the exit and scalar preheader blocks. 3384 // completeLoopSkeleton will update the condition to use an iteration check, 3385 // if required to decide whether to execute the remainder. 3386 BranchInst *BrInst = 3387 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue()); 3388 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3389 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3390 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3391 3392 // We intentionally don't let SplitBlock to update LoopInfo since 3393 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3394 // LoopVectorBody is explicitly added to the correct place few lines later. 3395 LoopVectorBody = 3396 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3397 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3398 3399 // Update dominator for loop exit. 3400 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3401 3402 // Create and register the new vector loop. 3403 Loop *Lp = LI->AllocateLoop(); 3404 Loop *ParentLoop = OrigLoop->getParentLoop(); 3405 3406 // Insert the new loop into the loop nest and register the new basic blocks 3407 // before calling any utilities such as SCEV that require valid LoopInfo. 3408 if (ParentLoop) { 3409 ParentLoop->addChildLoop(Lp); 3410 } else { 3411 LI->addTopLevelLoop(Lp); 3412 } 3413 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3414 return Lp; 3415 } 3416 3417 void InnerLoopVectorizer::createInductionResumeValues( 3418 Loop *L, Value *VectorTripCount, 3419 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3420 assert(VectorTripCount && L && "Expected valid arguments"); 3421 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3422 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3423 "Inconsistent information about additional bypass."); 3424 // We are going to resume the execution of the scalar loop. 3425 // Go over all of the induction variables that we found and fix the 3426 // PHIs that are left in the scalar version of the loop. 3427 // The starting values of PHI nodes depend on the counter of the last 3428 // iteration in the vectorized loop. 3429 // If we come from a bypass edge then we need to start from the original 3430 // start value. 3431 for (auto &InductionEntry : Legal->getInductionVars()) { 3432 PHINode *OrigPhi = InductionEntry.first; 3433 InductionDescriptor II = InductionEntry.second; 3434 3435 // Create phi nodes to merge from the backedge-taken check block. 3436 PHINode *BCResumeVal = 3437 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3438 LoopScalarPreHeader->getTerminator()); 3439 // Copy original phi DL over to the new one. 3440 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3441 Value *&EndValue = IVEndValues[OrigPhi]; 3442 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3443 if (OrigPhi == OldInduction) { 3444 // We know what the end value is. 3445 EndValue = VectorTripCount; 3446 } else { 3447 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3448 Type *StepType = II.getStep()->getType(); 3449 Instruction::CastOps CastOp = 3450 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3451 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3452 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3453 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3454 EndValue->setName("ind.end"); 3455 3456 // Compute the end value for the additional bypass (if applicable). 3457 if (AdditionalBypass.first) { 3458 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3459 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3460 StepType, true); 3461 CRD = 3462 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3463 EndValueFromAdditionalBypass = 3464 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3465 EndValueFromAdditionalBypass->setName("ind.end"); 3466 } 3467 } 3468 // The new PHI merges the original incoming value, in case of a bypass, 3469 // or the value at the end of the vectorized loop. 3470 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3471 3472 // Fix the scalar body counter (PHI node). 3473 // The old induction's phi node in the scalar body needs the truncated 3474 // value. 3475 for (BasicBlock *BB : LoopBypassBlocks) 3476 BCResumeVal->addIncoming(II.getStartValue(), BB); 3477 3478 if (AdditionalBypass.first) 3479 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3480 EndValueFromAdditionalBypass); 3481 3482 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3483 } 3484 } 3485 3486 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3487 MDNode *OrigLoopID) { 3488 assert(L && "Expected valid loop."); 3489 3490 // The trip counts should be cached by now. 3491 Value *Count = getOrCreateTripCount(L); 3492 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3493 3494 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3495 3496 // Add a check in the middle block to see if we have completed 3497 // all of the iterations in the first vector loop. 3498 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3499 // If tail is to be folded, we know we don't need to run the remainder. 3500 if (!Cost->foldTailByMasking()) { 3501 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3502 Count, VectorTripCount, "cmp.n", 3503 LoopMiddleBlock->getTerminator()); 3504 3505 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3506 // of the corresponding compare because they may have ended up with 3507 // different line numbers and we want to avoid awkward line stepping while 3508 // debugging. Eg. if the compare has got a line number inside the loop. 3509 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3510 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3511 } 3512 3513 // Get ready to start creating new instructions into the vectorized body. 3514 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3515 "Inconsistent vector loop preheader"); 3516 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3517 3518 Optional<MDNode *> VectorizedLoopID = 3519 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3520 LLVMLoopVectorizeFollowupVectorized}); 3521 if (VectorizedLoopID.hasValue()) { 3522 L->setLoopID(VectorizedLoopID.getValue()); 3523 3524 // Do not setAlreadyVectorized if loop attributes have been defined 3525 // explicitly. 3526 return LoopVectorPreHeader; 3527 } 3528 3529 // Keep all loop hints from the original loop on the vector loop (we'll 3530 // replace the vectorizer-specific hints below). 3531 if (MDNode *LID = OrigLoop->getLoopID()) 3532 L->setLoopID(LID); 3533 3534 LoopVectorizeHints Hints(L, true, *ORE); 3535 Hints.setAlreadyVectorized(); 3536 3537 #ifdef EXPENSIVE_CHECKS 3538 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3539 LI->verify(*DT); 3540 #endif 3541 3542 return LoopVectorPreHeader; 3543 } 3544 3545 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3546 /* 3547 In this function we generate a new loop. The new loop will contain 3548 the vectorized instructions while the old loop will continue to run the 3549 scalar remainder. 3550 3551 [ ] <-- loop iteration number check. 3552 / | 3553 / v 3554 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3555 | / | 3556 | / v 3557 || [ ] <-- vector pre header. 3558 |/ | 3559 | v 3560 | [ ] \ 3561 | [ ]_| <-- vector loop. 3562 | | 3563 | v 3564 | -[ ] <--- middle-block. 3565 | / | 3566 | / v 3567 -|- >[ ] <--- new preheader. 3568 | | 3569 | v 3570 | [ ] \ 3571 | [ ]_| <-- old scalar loop to handle remainder. 3572 \ | 3573 \ v 3574 >[ ] <-- exit block. 3575 ... 3576 */ 3577 3578 // Get the metadata of the original loop before it gets modified. 3579 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3580 3581 // Create an empty vector loop, and prepare basic blocks for the runtime 3582 // checks. 3583 Loop *Lp = createVectorLoopSkeleton(""); 3584 3585 // Now, compare the new count to zero. If it is zero skip the vector loop and 3586 // jump to the scalar loop. This check also covers the case where the 3587 // backedge-taken count is uint##_max: adding one to it will overflow leading 3588 // to an incorrect trip count of zero. In this (rare) case we will also jump 3589 // to the scalar loop. 3590 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3591 3592 // Generate the code to check any assumptions that we've made for SCEV 3593 // expressions. 3594 emitSCEVChecks(Lp, LoopScalarPreHeader); 3595 3596 // Generate the code that checks in runtime if arrays overlap. We put the 3597 // checks into a separate block to make the more common case of few elements 3598 // faster. 3599 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3600 3601 // Some loops have a single integer induction variable, while other loops 3602 // don't. One example is c++ iterators that often have multiple pointer 3603 // induction variables. In the code below we also support a case where we 3604 // don't have a single induction variable. 3605 // 3606 // We try to obtain an induction variable from the original loop as hard 3607 // as possible. However if we don't find one that: 3608 // - is an integer 3609 // - counts from zero, stepping by one 3610 // - is the size of the widest induction variable type 3611 // then we create a new one. 3612 OldInduction = Legal->getPrimaryInduction(); 3613 Type *IdxTy = Legal->getWidestInductionType(); 3614 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3615 // The loop step is equal to the vectorization factor (num of SIMD elements) 3616 // times the unroll factor (num of SIMD instructions). 3617 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3618 Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF); 3619 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3620 Induction = 3621 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3622 getDebugLocFromInstOrOperands(OldInduction)); 3623 3624 // Emit phis for the new starting index of the scalar loop. 3625 createInductionResumeValues(Lp, CountRoundDown); 3626 3627 return completeLoopSkeleton(Lp, OrigLoopID); 3628 } 3629 3630 // Fix up external users of the induction variable. At this point, we are 3631 // in LCSSA form, with all external PHIs that use the IV having one input value, 3632 // coming from the remainder loop. We need those PHIs to also have a correct 3633 // value for the IV when arriving directly from the middle block. 3634 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3635 const InductionDescriptor &II, 3636 Value *CountRoundDown, Value *EndValue, 3637 BasicBlock *MiddleBlock) { 3638 // There are two kinds of external IV usages - those that use the value 3639 // computed in the last iteration (the PHI) and those that use the penultimate 3640 // value (the value that feeds into the phi from the loop latch). 3641 // We allow both, but they, obviously, have different values. 3642 3643 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3644 3645 DenseMap<Value *, Value *> MissingVals; 3646 3647 // An external user of the last iteration's value should see the value that 3648 // the remainder loop uses to initialize its own IV. 3649 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3650 for (User *U : PostInc->users()) { 3651 Instruction *UI = cast<Instruction>(U); 3652 if (!OrigLoop->contains(UI)) { 3653 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3654 MissingVals[UI] = EndValue; 3655 } 3656 } 3657 3658 // An external user of the penultimate value need to see EndValue - Step. 3659 // The simplest way to get this is to recompute it from the constituent SCEVs, 3660 // that is Start + (Step * (CRD - 1)). 3661 for (User *U : OrigPhi->users()) { 3662 auto *UI = cast<Instruction>(U); 3663 if (!OrigLoop->contains(UI)) { 3664 const DataLayout &DL = 3665 OrigLoop->getHeader()->getModule()->getDataLayout(); 3666 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3667 3668 IRBuilder<> B(MiddleBlock->getTerminator()); 3669 Value *CountMinusOne = B.CreateSub( 3670 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3671 Value *CMO = 3672 !II.getStep()->getType()->isIntegerTy() 3673 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3674 II.getStep()->getType()) 3675 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3676 CMO->setName("cast.cmo"); 3677 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3678 Escape->setName("ind.escape"); 3679 MissingVals[UI] = Escape; 3680 } 3681 } 3682 3683 for (auto &I : MissingVals) { 3684 PHINode *PHI = cast<PHINode>(I.first); 3685 // One corner case we have to handle is two IVs "chasing" each-other, 3686 // that is %IV2 = phi [...], [ %IV1, %latch ] 3687 // In this case, if IV1 has an external use, we need to avoid adding both 3688 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3689 // don't already have an incoming value for the middle block. 3690 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3691 PHI->addIncoming(I.second, MiddleBlock); 3692 } 3693 } 3694 3695 namespace { 3696 3697 struct CSEDenseMapInfo { 3698 static bool canHandle(const Instruction *I) { 3699 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3700 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3701 } 3702 3703 static inline Instruction *getEmptyKey() { 3704 return DenseMapInfo<Instruction *>::getEmptyKey(); 3705 } 3706 3707 static inline Instruction *getTombstoneKey() { 3708 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3709 } 3710 3711 static unsigned getHashValue(const Instruction *I) { 3712 assert(canHandle(I) && "Unknown instruction!"); 3713 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3714 I->value_op_end())); 3715 } 3716 3717 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3718 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3719 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3720 return LHS == RHS; 3721 return LHS->isIdenticalTo(RHS); 3722 } 3723 }; 3724 3725 } // end anonymous namespace 3726 3727 ///Perform cse of induction variable instructions. 3728 static void cse(BasicBlock *BB) { 3729 // Perform simple cse. 3730 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3731 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3732 Instruction *In = &*I++; 3733 3734 if (!CSEDenseMapInfo::canHandle(In)) 3735 continue; 3736 3737 // Check if we can replace this instruction with any of the 3738 // visited instructions. 3739 if (Instruction *V = CSEMap.lookup(In)) { 3740 In->replaceAllUsesWith(V); 3741 In->eraseFromParent(); 3742 continue; 3743 } 3744 3745 CSEMap[In] = In; 3746 } 3747 } 3748 3749 InstructionCost 3750 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3751 bool &NeedToScalarize) { 3752 assert(!VF.isScalable() && "scalable vectors not yet supported."); 3753 Function *F = CI->getCalledFunction(); 3754 Type *ScalarRetTy = CI->getType(); 3755 SmallVector<Type *, 4> Tys, ScalarTys; 3756 for (auto &ArgOp : CI->arg_operands()) 3757 ScalarTys.push_back(ArgOp->getType()); 3758 3759 // Estimate cost of scalarized vector call. The source operands are assumed 3760 // to be vectors, so we need to extract individual elements from there, 3761 // execute VF scalar calls, and then gather the result into the vector return 3762 // value. 3763 InstructionCost ScalarCallCost = 3764 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3765 if (VF.isScalar()) 3766 return ScalarCallCost; 3767 3768 // Compute corresponding vector type for return value and arguments. 3769 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3770 for (Type *ScalarTy : ScalarTys) 3771 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3772 3773 // Compute costs of unpacking argument values for the scalar calls and 3774 // packing the return values to a vector. 3775 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3776 3777 InstructionCost Cost = 3778 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3779 3780 // If we can't emit a vector call for this function, then the currently found 3781 // cost is the cost we need to return. 3782 NeedToScalarize = true; 3783 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3784 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3785 3786 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3787 return Cost; 3788 3789 // If the corresponding vector cost is cheaper, return its cost. 3790 InstructionCost VectorCallCost = 3791 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3792 if (VectorCallCost < Cost) { 3793 NeedToScalarize = false; 3794 Cost = VectorCallCost; 3795 } 3796 return Cost; 3797 } 3798 3799 InstructionCost 3800 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3801 ElementCount VF) { 3802 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3803 assert(ID && "Expected intrinsic call!"); 3804 3805 IntrinsicCostAttributes CostAttrs(ID, *CI, VF); 3806 return TTI.getIntrinsicInstrCost(CostAttrs, 3807 TargetTransformInfo::TCK_RecipThroughput); 3808 } 3809 3810 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3811 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3812 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3813 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3814 } 3815 3816 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3817 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3818 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3819 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3820 } 3821 3822 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3823 // For every instruction `I` in MinBWs, truncate the operands, create a 3824 // truncated version of `I` and reextend its result. InstCombine runs 3825 // later and will remove any ext/trunc pairs. 3826 SmallPtrSet<Value *, 4> Erased; 3827 for (const auto &KV : Cost->getMinimalBitwidths()) { 3828 // If the value wasn't vectorized, we must maintain the original scalar 3829 // type. The absence of the value from VectorLoopValueMap indicates that it 3830 // wasn't vectorized. 3831 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3832 continue; 3833 for (unsigned Part = 0; Part < UF; ++Part) { 3834 Value *I = getOrCreateVectorValue(KV.first, Part); 3835 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3836 continue; 3837 Type *OriginalTy = I->getType(); 3838 Type *ScalarTruncatedTy = 3839 IntegerType::get(OriginalTy->getContext(), KV.second); 3840 auto *TruncatedTy = FixedVectorType::get( 3841 ScalarTruncatedTy, 3842 cast<FixedVectorType>(OriginalTy)->getNumElements()); 3843 if (TruncatedTy == OriginalTy) 3844 continue; 3845 3846 IRBuilder<> B(cast<Instruction>(I)); 3847 auto ShrinkOperand = [&](Value *V) -> Value * { 3848 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3849 if (ZI->getSrcTy() == TruncatedTy) 3850 return ZI->getOperand(0); 3851 return B.CreateZExtOrTrunc(V, TruncatedTy); 3852 }; 3853 3854 // The actual instruction modification depends on the instruction type, 3855 // unfortunately. 3856 Value *NewI = nullptr; 3857 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3858 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3859 ShrinkOperand(BO->getOperand(1))); 3860 3861 // Any wrapping introduced by shrinking this operation shouldn't be 3862 // considered undefined behavior. So, we can't unconditionally copy 3863 // arithmetic wrapping flags to NewI. 3864 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3865 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3866 NewI = 3867 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3868 ShrinkOperand(CI->getOperand(1))); 3869 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3870 NewI = B.CreateSelect(SI->getCondition(), 3871 ShrinkOperand(SI->getTrueValue()), 3872 ShrinkOperand(SI->getFalseValue())); 3873 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3874 switch (CI->getOpcode()) { 3875 default: 3876 llvm_unreachable("Unhandled cast!"); 3877 case Instruction::Trunc: 3878 NewI = ShrinkOperand(CI->getOperand(0)); 3879 break; 3880 case Instruction::SExt: 3881 NewI = B.CreateSExtOrTrunc( 3882 CI->getOperand(0), 3883 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3884 break; 3885 case Instruction::ZExt: 3886 NewI = B.CreateZExtOrTrunc( 3887 CI->getOperand(0), 3888 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3889 break; 3890 } 3891 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3892 auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType()) 3893 ->getNumElements(); 3894 auto *O0 = B.CreateZExtOrTrunc( 3895 SI->getOperand(0), 3896 FixedVectorType::get(ScalarTruncatedTy, Elements0)); 3897 auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType()) 3898 ->getNumElements(); 3899 auto *O1 = B.CreateZExtOrTrunc( 3900 SI->getOperand(1), 3901 FixedVectorType::get(ScalarTruncatedTy, Elements1)); 3902 3903 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3904 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3905 // Don't do anything with the operands, just extend the result. 3906 continue; 3907 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3908 auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType()) 3909 ->getNumElements(); 3910 auto *O0 = B.CreateZExtOrTrunc( 3911 IE->getOperand(0), 3912 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3913 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3914 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3915 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3916 auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType()) 3917 ->getNumElements(); 3918 auto *O0 = B.CreateZExtOrTrunc( 3919 EE->getOperand(0), 3920 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3921 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3922 } else { 3923 // If we don't know what to do, be conservative and don't do anything. 3924 continue; 3925 } 3926 3927 // Lastly, extend the result. 3928 NewI->takeName(cast<Instruction>(I)); 3929 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3930 I->replaceAllUsesWith(Res); 3931 cast<Instruction>(I)->eraseFromParent(); 3932 Erased.insert(I); 3933 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3934 } 3935 } 3936 3937 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3938 for (const auto &KV : Cost->getMinimalBitwidths()) { 3939 // If the value wasn't vectorized, we must maintain the original scalar 3940 // type. The absence of the value from VectorLoopValueMap indicates that it 3941 // wasn't vectorized. 3942 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3943 continue; 3944 for (unsigned Part = 0; Part < UF; ++Part) { 3945 Value *I = getOrCreateVectorValue(KV.first, Part); 3946 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3947 if (Inst && Inst->use_empty()) { 3948 Value *NewI = Inst->getOperand(0); 3949 Inst->eraseFromParent(); 3950 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3951 } 3952 } 3953 } 3954 } 3955 3956 void InnerLoopVectorizer::fixVectorizedLoop() { 3957 // Insert truncates and extends for any truncated instructions as hints to 3958 // InstCombine. 3959 if (VF.isVector()) 3960 truncateToMinimalBitwidths(); 3961 3962 // Fix widened non-induction PHIs by setting up the PHI operands. 3963 if (OrigPHIsToFix.size()) { 3964 assert(EnableVPlanNativePath && 3965 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3966 fixNonInductionPHIs(); 3967 } 3968 3969 // At this point every instruction in the original loop is widened to a 3970 // vector form. Now we need to fix the recurrences in the loop. These PHI 3971 // nodes are currently empty because we did not want to introduce cycles. 3972 // This is the second stage of vectorizing recurrences. 3973 fixCrossIterationPHIs(); 3974 3975 // Forget the original basic block. 3976 PSE.getSE()->forgetLoop(OrigLoop); 3977 3978 // Fix-up external users of the induction variables. 3979 for (auto &Entry : Legal->getInductionVars()) 3980 fixupIVUsers(Entry.first, Entry.second, 3981 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3982 IVEndValues[Entry.first], LoopMiddleBlock); 3983 3984 fixLCSSAPHIs(); 3985 for (Instruction *PI : PredicatedInstructions) 3986 sinkScalarOperands(&*PI); 3987 3988 // Remove redundant induction instructions. 3989 cse(LoopVectorBody); 3990 3991 // Set/update profile weights for the vector and remainder loops as original 3992 // loop iterations are now distributed among them. Note that original loop 3993 // represented by LoopScalarBody becomes remainder loop after vectorization. 3994 // 3995 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3996 // end up getting slightly roughened result but that should be OK since 3997 // profile is not inherently precise anyway. Note also possible bypass of 3998 // vector code caused by legality checks is ignored, assigning all the weight 3999 // to the vector loop, optimistically. 4000 // 4001 // For scalable vectorization we can't know at compile time how many iterations 4002 // of the loop are handled in one vector iteration, so instead assume a pessimistic 4003 // vscale of '1'. 4004 setProfileInfoAfterUnrolling( 4005 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 4006 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 4007 } 4008 4009 void InnerLoopVectorizer::fixCrossIterationPHIs() { 4010 // In order to support recurrences we need to be able to vectorize Phi nodes. 4011 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4012 // stage #2: We now need to fix the recurrences by adding incoming edges to 4013 // the currently empty PHI nodes. At this point every instruction in the 4014 // original loop is widened to a vector form so we can use them to construct 4015 // the incoming edges. 4016 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 4017 // Handle first-order recurrences and reductions that need to be fixed. 4018 if (Legal->isFirstOrderRecurrence(&Phi)) 4019 fixFirstOrderRecurrence(&Phi); 4020 else if (Legal->isReductionVariable(&Phi)) 4021 fixReduction(&Phi); 4022 } 4023 } 4024 4025 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 4026 // This is the second phase of vectorizing first-order recurrences. An 4027 // overview of the transformation is described below. Suppose we have the 4028 // following loop. 4029 // 4030 // for (int i = 0; i < n; ++i) 4031 // b[i] = a[i] - a[i - 1]; 4032 // 4033 // There is a first-order recurrence on "a". For this loop, the shorthand 4034 // scalar IR looks like: 4035 // 4036 // scalar.ph: 4037 // s_init = a[-1] 4038 // br scalar.body 4039 // 4040 // scalar.body: 4041 // i = phi [0, scalar.ph], [i+1, scalar.body] 4042 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4043 // s2 = a[i] 4044 // b[i] = s2 - s1 4045 // br cond, scalar.body, ... 4046 // 4047 // In this example, s1 is a recurrence because it's value depends on the 4048 // previous iteration. In the first phase of vectorization, we created a 4049 // temporary value for s1. We now complete the vectorization and produce the 4050 // shorthand vector IR shown below (for VF = 4, UF = 1). 4051 // 4052 // vector.ph: 4053 // v_init = vector(..., ..., ..., a[-1]) 4054 // br vector.body 4055 // 4056 // vector.body 4057 // i = phi [0, vector.ph], [i+4, vector.body] 4058 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4059 // v2 = a[i, i+1, i+2, i+3]; 4060 // v3 = vector(v1(3), v2(0, 1, 2)) 4061 // b[i, i+1, i+2, i+3] = v2 - v3 4062 // br cond, vector.body, middle.block 4063 // 4064 // middle.block: 4065 // x = v2(3) 4066 // br scalar.ph 4067 // 4068 // scalar.ph: 4069 // s_init = phi [x, middle.block], [a[-1], otherwise] 4070 // br scalar.body 4071 // 4072 // After execution completes the vector loop, we extract the next value of 4073 // the recurrence (x) to use as the initial value in the scalar loop. 4074 4075 // Get the original loop preheader and single loop latch. 4076 auto *Preheader = OrigLoop->getLoopPreheader(); 4077 auto *Latch = OrigLoop->getLoopLatch(); 4078 4079 // Get the initial and previous values of the scalar recurrence. 4080 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4081 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4082 4083 // Create a vector from the initial value. 4084 auto *VectorInit = ScalarInit; 4085 if (VF.isVector()) { 4086 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4087 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 4088 VectorInit = Builder.CreateInsertElement( 4089 PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4090 Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init"); 4091 } 4092 4093 // We constructed a temporary phi node in the first phase of vectorization. 4094 // This phi node will eventually be deleted. 4095 Builder.SetInsertPoint( 4096 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 4097 4098 // Create a phi node for the new recurrence. The current value will either be 4099 // the initial value inserted into a vector or loop-varying vector value. 4100 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4101 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4102 4103 // Get the vectorized previous value of the last part UF - 1. It appears last 4104 // among all unrolled iterations, due to the order of their construction. 4105 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 4106 4107 // Find and set the insertion point after the previous value if it is an 4108 // instruction. 4109 BasicBlock::iterator InsertPt; 4110 // Note that the previous value may have been constant-folded so it is not 4111 // guaranteed to be an instruction in the vector loop. 4112 // FIXME: Loop invariant values do not form recurrences. We should deal with 4113 // them earlier. 4114 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart)) 4115 InsertPt = LoopVectorBody->getFirstInsertionPt(); 4116 else { 4117 Instruction *PreviousInst = cast<Instruction>(PreviousLastPart); 4118 if (isa<PHINode>(PreviousLastPart)) 4119 // If the previous value is a phi node, we should insert after all the phi 4120 // nodes in the block containing the PHI to avoid breaking basic block 4121 // verification. Note that the basic block may be different to 4122 // LoopVectorBody, in case we predicate the loop. 4123 InsertPt = PreviousInst->getParent()->getFirstInsertionPt(); 4124 else 4125 InsertPt = ++PreviousInst->getIterator(); 4126 } 4127 Builder.SetInsertPoint(&*InsertPt); 4128 4129 // We will construct a vector for the recurrence by combining the values for 4130 // the current and previous iterations. This is the required shuffle mask. 4131 assert(!VF.isScalable()); 4132 SmallVector<int, 8> ShuffleMask(VF.getKnownMinValue()); 4133 ShuffleMask[0] = VF.getKnownMinValue() - 1; 4134 for (unsigned I = 1; I < VF.getKnownMinValue(); ++I) 4135 ShuffleMask[I] = I + VF.getKnownMinValue() - 1; 4136 4137 // The vector from which to take the initial value for the current iteration 4138 // (actual or unrolled). Initially, this is the vector phi node. 4139 Value *Incoming = VecPhi; 4140 4141 // Shuffle the current and previous vector and update the vector parts. 4142 for (unsigned Part = 0; Part < UF; ++Part) { 4143 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 4144 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 4145 auto *Shuffle = 4146 VF.isVector() 4147 ? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask) 4148 : Incoming; 4149 PhiPart->replaceAllUsesWith(Shuffle); 4150 cast<Instruction>(PhiPart)->eraseFromParent(); 4151 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 4152 Incoming = PreviousPart; 4153 } 4154 4155 // Fix the latch value of the new recurrence in the vector loop. 4156 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4157 4158 // Extract the last vector element in the middle block. This will be the 4159 // initial value for the recurrence when jumping to the scalar loop. 4160 auto *ExtractForScalar = Incoming; 4161 if (VF.isVector()) { 4162 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4163 ExtractForScalar = Builder.CreateExtractElement( 4164 ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1), 4165 "vector.recur.extract"); 4166 } 4167 // Extract the second last element in the middle block if the 4168 // Phi is used outside the loop. We need to extract the phi itself 4169 // and not the last element (the phi update in the current iteration). This 4170 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4171 // when the scalar loop is not run at all. 4172 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4173 if (VF.isVector()) 4174 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4175 Incoming, Builder.getInt32(VF.getKnownMinValue() - 2), 4176 "vector.recur.extract.for.phi"); 4177 // When loop is unrolled without vectorizing, initialize 4178 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 4179 // `Incoming`. This is analogous to the vectorized case above: extracting the 4180 // second last element when VF > 1. 4181 else if (UF > 1) 4182 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 4183 4184 // Fix the initial value of the original recurrence in the scalar loop. 4185 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4186 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4187 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4188 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4189 Start->addIncoming(Incoming, BB); 4190 } 4191 4192 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4193 Phi->setName("scalar.recur"); 4194 4195 // Finally, fix users of the recurrence outside the loop. The users will need 4196 // either the last value of the scalar recurrence or the last value of the 4197 // vector recurrence we extracted in the middle block. Since the loop is in 4198 // LCSSA form, we just need to find all the phi nodes for the original scalar 4199 // recurrence in the exit block, and then add an edge for the middle block. 4200 // Note that LCSSA does not imply single entry when the original scalar loop 4201 // had multiple exiting edges (as we always run the last iteration in the 4202 // scalar epilogue); in that case, the exiting path through middle will be 4203 // dynamically dead and the value picked for the phi doesn't matter. 4204 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4205 if (any_of(LCSSAPhi.incoming_values(), 4206 [Phi](Value *V) { return V == Phi; })) 4207 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4208 } 4209 4210 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 4211 // Get it's reduction variable descriptor. 4212 assert(Legal->isReductionVariable(Phi) && 4213 "Unable to find the reduction variable"); 4214 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 4215 4216 RecurKind RK = RdxDesc.getRecurrenceKind(); 4217 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4218 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4219 setDebugLocFromInst(Builder, ReductionStartValue); 4220 bool IsInLoopReductionPhi = Cost->isInLoopReduction(Phi); 4221 4222 // This is the vector-clone of the value that leaves the loop. 4223 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 4224 4225 // Wrap flags are in general invalid after vectorization, clear them. 4226 clearReductionWrapFlags(RdxDesc); 4227 4228 // Fix the vector-loop phi. 4229 4230 // Reductions do not have to start at zero. They can start with 4231 // any loop invariant values. 4232 BasicBlock *Latch = OrigLoop->getLoopLatch(); 4233 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 4234 4235 for (unsigned Part = 0; Part < UF; ++Part) { 4236 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 4237 Value *Val = getOrCreateVectorValue(LoopVal, Part); 4238 cast<PHINode>(VecRdxPhi) 4239 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4240 } 4241 4242 // Before each round, move the insertion point right between 4243 // the PHIs and the values we are going to write. 4244 // This allows us to write both PHINodes and the extractelement 4245 // instructions. 4246 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4247 4248 setDebugLocFromInst(Builder, LoopExitInst); 4249 4250 // If tail is folded by masking, the vector value to leave the loop should be 4251 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4252 // instead of the former. For an inloop reduction the reduction will already 4253 // be predicated, and does not need to be handled here. 4254 if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) { 4255 for (unsigned Part = 0; Part < UF; ++Part) { 4256 Value *VecLoopExitInst = 4257 VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4258 Value *Sel = nullptr; 4259 for (User *U : VecLoopExitInst->users()) { 4260 if (isa<SelectInst>(U)) { 4261 assert(!Sel && "Reduction exit feeding two selects"); 4262 Sel = U; 4263 } else 4264 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4265 } 4266 assert(Sel && "Reduction exit feeds no select"); 4267 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel); 4268 4269 // If the target can create a predicated operator for the reduction at no 4270 // extra cost in the loop (for example a predicated vadd), it can be 4271 // cheaper for the select to remain in the loop than be sunk out of it, 4272 // and so use the select value for the phi instead of the old 4273 // LoopExitValue. 4274 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 4275 if (PreferPredicatedReductionSelect || 4276 TTI->preferPredicatedReductionSelect( 4277 RdxDesc.getOpcode(), Phi->getType(), 4278 TargetTransformInfo::ReductionFlags())) { 4279 auto *VecRdxPhi = cast<PHINode>(getOrCreateVectorValue(Phi, Part)); 4280 VecRdxPhi->setIncomingValueForBlock( 4281 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4282 } 4283 } 4284 } 4285 4286 // If the vector reduction can be performed in a smaller type, we truncate 4287 // then extend the loop exit value to enable InstCombine to evaluate the 4288 // entire expression in the smaller type. 4289 if (VF.isVector() && Phi->getType() != RdxDesc.getRecurrenceType()) { 4290 assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!"); 4291 assert(!VF.isScalable() && "scalable vectors not yet supported."); 4292 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4293 Builder.SetInsertPoint( 4294 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4295 VectorParts RdxParts(UF); 4296 for (unsigned Part = 0; Part < UF; ++Part) { 4297 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4298 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4299 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4300 : Builder.CreateZExt(Trunc, VecTy); 4301 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 4302 UI != RdxParts[Part]->user_end();) 4303 if (*UI != Trunc) { 4304 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 4305 RdxParts[Part] = Extnd; 4306 } else { 4307 ++UI; 4308 } 4309 } 4310 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4311 for (unsigned Part = 0; Part < UF; ++Part) { 4312 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4313 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 4314 } 4315 } 4316 4317 // Reduce all of the unrolled parts into a single vector. 4318 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 4319 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4320 4321 // The middle block terminator has already been assigned a DebugLoc here (the 4322 // OrigLoop's single latch terminator). We want the whole middle block to 4323 // appear to execute on this line because: (a) it is all compiler generated, 4324 // (b) these instructions are always executed after evaluating the latch 4325 // conditional branch, and (c) other passes may add new predecessors which 4326 // terminate on this line. This is the easiest way to ensure we don't 4327 // accidentally cause an extra step back into the loop while debugging. 4328 setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator()); 4329 { 4330 // Floating-point operations should have some FMF to enable the reduction. 4331 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4332 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4333 for (unsigned Part = 1; Part < UF; ++Part) { 4334 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4335 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4336 ReducedPartRdx = Builder.CreateBinOp( 4337 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4338 } else { 4339 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4340 } 4341 } 4342 } 4343 4344 // Create the reduction after the loop. Note that inloop reductions create the 4345 // target reduction in the loop using a Reduction recipe. 4346 if (VF.isVector() && !IsInLoopReductionPhi) { 4347 ReducedPartRdx = 4348 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx); 4349 // If the reduction can be performed in a smaller type, we need to extend 4350 // the reduction to the wider type before we branch to the original loop. 4351 if (Phi->getType() != RdxDesc.getRecurrenceType()) 4352 ReducedPartRdx = 4353 RdxDesc.isSigned() 4354 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 4355 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 4356 } 4357 4358 // Create a phi node that merges control-flow from the backedge-taken check 4359 // block and the middle block. 4360 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4361 LoopScalarPreHeader->getTerminator()); 4362 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4363 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4364 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4365 4366 // Now, we need to fix the users of the reduction variable 4367 // inside and outside of the scalar remainder loop. 4368 4369 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4370 // in the exit blocks. See comment on analogous loop in 4371 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4372 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4373 if (any_of(LCSSAPhi.incoming_values(), 4374 [LoopExitInst](Value *V) { return V == LoopExitInst; })) 4375 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4376 4377 // Fix the scalar loop reduction variable with the incoming reduction sum 4378 // from the vector body and from the backedge value. 4379 int IncomingEdgeBlockIdx = 4380 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4381 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4382 // Pick the other block. 4383 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4384 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4385 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4386 } 4387 4388 void InnerLoopVectorizer::clearReductionWrapFlags( 4389 RecurrenceDescriptor &RdxDesc) { 4390 RecurKind RK = RdxDesc.getRecurrenceKind(); 4391 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4392 return; 4393 4394 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4395 assert(LoopExitInstr && "null loop exit instruction"); 4396 SmallVector<Instruction *, 8> Worklist; 4397 SmallPtrSet<Instruction *, 8> Visited; 4398 Worklist.push_back(LoopExitInstr); 4399 Visited.insert(LoopExitInstr); 4400 4401 while (!Worklist.empty()) { 4402 Instruction *Cur = Worklist.pop_back_val(); 4403 if (isa<OverflowingBinaryOperator>(Cur)) 4404 for (unsigned Part = 0; Part < UF; ++Part) { 4405 Value *V = getOrCreateVectorValue(Cur, Part); 4406 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4407 } 4408 4409 for (User *U : Cur->users()) { 4410 Instruction *UI = cast<Instruction>(U); 4411 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4412 Visited.insert(UI).second) 4413 Worklist.push_back(UI); 4414 } 4415 } 4416 } 4417 4418 void InnerLoopVectorizer::fixLCSSAPHIs() { 4419 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4420 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4421 // Some phis were already hand updated by the reduction and recurrence 4422 // code above, leave them alone. 4423 continue; 4424 4425 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4426 // Non-instruction incoming values will have only one value. 4427 unsigned LastLane = 0; 4428 if (isa<Instruction>(IncomingValue)) 4429 LastLane = Cost->isUniformAfterVectorization( 4430 cast<Instruction>(IncomingValue), VF) 4431 ? 0 4432 : VF.getKnownMinValue() - 1; 4433 assert((!VF.isScalable() || LastLane == 0) && 4434 "scalable vectors dont support non-uniform scalars yet"); 4435 // Can be a loop invariant incoming value or the last scalar value to be 4436 // extracted from the vectorized loop. 4437 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4438 Value *lastIncomingValue = 4439 getOrCreateScalarValue(IncomingValue, VPIteration(UF - 1, LastLane)); 4440 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4441 } 4442 } 4443 4444 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4445 // The basic block and loop containing the predicated instruction. 4446 auto *PredBB = PredInst->getParent(); 4447 auto *VectorLoop = LI->getLoopFor(PredBB); 4448 4449 // Initialize a worklist with the operands of the predicated instruction. 4450 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4451 4452 // Holds instructions that we need to analyze again. An instruction may be 4453 // reanalyzed if we don't yet know if we can sink it or not. 4454 SmallVector<Instruction *, 8> InstsToReanalyze; 4455 4456 // Returns true if a given use occurs in the predicated block. Phi nodes use 4457 // their operands in their corresponding predecessor blocks. 4458 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4459 auto *I = cast<Instruction>(U.getUser()); 4460 BasicBlock *BB = I->getParent(); 4461 if (auto *Phi = dyn_cast<PHINode>(I)) 4462 BB = Phi->getIncomingBlock( 4463 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4464 return BB == PredBB; 4465 }; 4466 4467 // Iteratively sink the scalarized operands of the predicated instruction 4468 // into the block we created for it. When an instruction is sunk, it's 4469 // operands are then added to the worklist. The algorithm ends after one pass 4470 // through the worklist doesn't sink a single instruction. 4471 bool Changed; 4472 do { 4473 // Add the instructions that need to be reanalyzed to the worklist, and 4474 // reset the changed indicator. 4475 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4476 InstsToReanalyze.clear(); 4477 Changed = false; 4478 4479 while (!Worklist.empty()) { 4480 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4481 4482 // We can't sink an instruction if it is a phi node, is already in the 4483 // predicated block, is not in the loop, or may have side effects. 4484 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4485 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4486 continue; 4487 4488 // It's legal to sink the instruction if all its uses occur in the 4489 // predicated block. Otherwise, there's nothing to do yet, and we may 4490 // need to reanalyze the instruction. 4491 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4492 InstsToReanalyze.push_back(I); 4493 continue; 4494 } 4495 4496 // Move the instruction to the beginning of the predicated block, and add 4497 // it's operands to the worklist. 4498 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4499 Worklist.insert(I->op_begin(), I->op_end()); 4500 4501 // The sinking may have enabled other instructions to be sunk, so we will 4502 // need to iterate. 4503 Changed = true; 4504 } 4505 } while (Changed); 4506 } 4507 4508 void InnerLoopVectorizer::fixNonInductionPHIs() { 4509 for (PHINode *OrigPhi : OrigPHIsToFix) { 4510 PHINode *NewPhi = 4511 cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0)); 4512 unsigned NumIncomingValues = OrigPhi->getNumIncomingValues(); 4513 4514 SmallVector<BasicBlock *, 2> ScalarBBPredecessors( 4515 predecessors(OrigPhi->getParent())); 4516 SmallVector<BasicBlock *, 2> VectorBBPredecessors( 4517 predecessors(NewPhi->getParent())); 4518 assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() && 4519 "Scalar and Vector BB should have the same number of predecessors"); 4520 4521 // The insertion point in Builder may be invalidated by the time we get 4522 // here. Force the Builder insertion point to something valid so that we do 4523 // not run into issues during insertion point restore in 4524 // getOrCreateVectorValue calls below. 4525 Builder.SetInsertPoint(NewPhi); 4526 4527 // The predecessor order is preserved and we can rely on mapping between 4528 // scalar and vector block predecessors. 4529 for (unsigned i = 0; i < NumIncomingValues; ++i) { 4530 BasicBlock *NewPredBB = VectorBBPredecessors[i]; 4531 4532 // When looking up the new scalar/vector values to fix up, use incoming 4533 // values from original phi. 4534 Value *ScIncV = 4535 OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]); 4536 4537 // Scalar incoming value may need a broadcast 4538 Value *NewIncV = getOrCreateVectorValue(ScIncV, 0); 4539 NewPhi->addIncoming(NewIncV, NewPredBB); 4540 } 4541 } 4542 } 4543 4544 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, 4545 VPUser &Operands, unsigned UF, 4546 ElementCount VF, bool IsPtrLoopInvariant, 4547 SmallBitVector &IsIndexLoopInvariant, 4548 VPTransformState &State) { 4549 // Construct a vector GEP by widening the operands of the scalar GEP as 4550 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4551 // results in a vector of pointers when at least one operand of the GEP 4552 // is vector-typed. Thus, to keep the representation compact, we only use 4553 // vector-typed operands for loop-varying values. 4554 4555 if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4556 // If we are vectorizing, but the GEP has only loop-invariant operands, 4557 // the GEP we build (by only using vector-typed operands for 4558 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4559 // produce a vector of pointers, we need to either arbitrarily pick an 4560 // operand to broadcast, or broadcast a clone of the original GEP. 4561 // Here, we broadcast a clone of the original. 4562 // 4563 // TODO: If at some point we decide to scalarize instructions having 4564 // loop-invariant operands, this special case will no longer be 4565 // required. We would add the scalarization decision to 4566 // collectLoopScalars() and teach getVectorValue() to broadcast 4567 // the lane-zero scalar value. 4568 auto *Clone = Builder.Insert(GEP->clone()); 4569 for (unsigned Part = 0; Part < UF; ++Part) { 4570 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4571 State.set(VPDef, GEP, EntryPart, Part); 4572 addMetadata(EntryPart, GEP); 4573 } 4574 } else { 4575 // If the GEP has at least one loop-varying operand, we are sure to 4576 // produce a vector of pointers. But if we are only unrolling, we want 4577 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4578 // produce with the code below will be scalar (if VF == 1) or vector 4579 // (otherwise). Note that for the unroll-only case, we still maintain 4580 // values in the vector mapping with initVector, as we do for other 4581 // instructions. 4582 for (unsigned Part = 0; Part < UF; ++Part) { 4583 // The pointer operand of the new GEP. If it's loop-invariant, we 4584 // won't broadcast it. 4585 auto *Ptr = IsPtrLoopInvariant 4586 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 4587 : State.get(Operands.getOperand(0), Part); 4588 4589 // Collect all the indices for the new GEP. If any index is 4590 // loop-invariant, we won't broadcast it. 4591 SmallVector<Value *, 4> Indices; 4592 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { 4593 VPValue *Operand = Operands.getOperand(I); 4594 if (IsIndexLoopInvariant[I - 1]) 4595 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 4596 else 4597 Indices.push_back(State.get(Operand, Part)); 4598 } 4599 4600 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4601 // but it should be a vector, otherwise. 4602 auto *NewGEP = 4603 GEP->isInBounds() 4604 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4605 Indices) 4606 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4607 assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) && 4608 "NewGEP is not a pointer vector"); 4609 State.set(VPDef, GEP, NewGEP, Part); 4610 addMetadata(NewGEP, GEP); 4611 } 4612 } 4613 } 4614 4615 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4616 RecurrenceDescriptor *RdxDesc, 4617 Value *StartV, unsigned UF, 4618 ElementCount VF) { 4619 assert(!VF.isScalable() && "scalable vectors not yet supported."); 4620 PHINode *P = cast<PHINode>(PN); 4621 if (EnableVPlanNativePath) { 4622 // Currently we enter here in the VPlan-native path for non-induction 4623 // PHIs where all control flow is uniform. We simply widen these PHIs. 4624 // Create a vector phi with no operands - the vector phi operands will be 4625 // set at the end of vector code generation. 4626 Type *VecTy = 4627 (VF.isScalar()) ? PN->getType() : VectorType::get(PN->getType(), VF); 4628 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4629 VectorLoopValueMap.setVectorValue(P, 0, VecPhi); 4630 OrigPHIsToFix.push_back(P); 4631 4632 return; 4633 } 4634 4635 assert(PN->getParent() == OrigLoop->getHeader() && 4636 "Non-header phis should have been handled elsewhere"); 4637 4638 // In order to support recurrences we need to be able to vectorize Phi nodes. 4639 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4640 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4641 // this value when we vectorize all of the instructions that use the PHI. 4642 if (RdxDesc || Legal->isFirstOrderRecurrence(P)) { 4643 Value *Iden = nullptr; 4644 bool ScalarPHI = 4645 (VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN)); 4646 Type *VecTy = 4647 ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), VF); 4648 4649 if (RdxDesc) { 4650 assert(Legal->isReductionVariable(P) && StartV && 4651 "RdxDesc should only be set for reduction variables; in that case " 4652 "a StartV is also required"); 4653 RecurKind RK = RdxDesc->getRecurrenceKind(); 4654 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) { 4655 // MinMax reduction have the start value as their identify. 4656 if (ScalarPHI) { 4657 Iden = StartV; 4658 } else { 4659 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4660 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4661 StartV = Iden = Builder.CreateVectorSplat(VF, StartV, "minmax.ident"); 4662 } 4663 } else { 4664 Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity( 4665 RK, VecTy->getScalarType()); 4666 Iden = IdenC; 4667 4668 if (!ScalarPHI) { 4669 Iden = ConstantVector::getSplat(VF, IdenC); 4670 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4671 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4672 Constant *Zero = Builder.getInt32(0); 4673 StartV = Builder.CreateInsertElement(Iden, StartV, Zero); 4674 } 4675 } 4676 } 4677 4678 for (unsigned Part = 0; Part < UF; ++Part) { 4679 // This is phase one of vectorizing PHIs. 4680 Value *EntryPart = PHINode::Create( 4681 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4682 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 4683 if (StartV) { 4684 // Make sure to add the reduction start value only to the 4685 // first unroll part. 4686 Value *StartVal = (Part == 0) ? StartV : Iden; 4687 cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader); 4688 } 4689 } 4690 return; 4691 } 4692 4693 assert(!Legal->isReductionVariable(P) && 4694 "reductions should be handled above"); 4695 4696 setDebugLocFromInst(Builder, P); 4697 4698 // This PHINode must be an induction variable. 4699 // Make sure that we know about it. 4700 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4701 4702 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4703 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4704 4705 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4706 // which can be found from the original scalar operations. 4707 switch (II.getKind()) { 4708 case InductionDescriptor::IK_NoInduction: 4709 llvm_unreachable("Unknown induction"); 4710 case InductionDescriptor::IK_IntInduction: 4711 case InductionDescriptor::IK_FpInduction: 4712 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4713 case InductionDescriptor::IK_PtrInduction: { 4714 // Handle the pointer induction variable case. 4715 assert(P->getType()->isPointerTy() && "Unexpected type."); 4716 4717 if (Cost->isScalarAfterVectorization(P, VF)) { 4718 // This is the normalized GEP that starts counting at zero. 4719 Value *PtrInd = 4720 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4721 // Determine the number of scalars we need to generate for each unroll 4722 // iteration. If the instruction is uniform, we only need to generate the 4723 // first lane. Otherwise, we generate all VF values. 4724 unsigned Lanes = 4725 Cost->isUniformAfterVectorization(P, VF) ? 1 : VF.getKnownMinValue(); 4726 for (unsigned Part = 0; Part < UF; ++Part) { 4727 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4728 Constant *Idx = ConstantInt::get(PtrInd->getType(), 4729 Lane + Part * VF.getKnownMinValue()); 4730 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4731 Value *SclrGep = 4732 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4733 SclrGep->setName("next.gep"); 4734 VectorLoopValueMap.setScalarValue(P, VPIteration(Part, Lane), 4735 SclrGep); 4736 } 4737 } 4738 return; 4739 } 4740 assert(isa<SCEVConstant>(II.getStep()) && 4741 "Induction step not a SCEV constant!"); 4742 Type *PhiType = II.getStep()->getType(); 4743 4744 // Build a pointer phi 4745 Value *ScalarStartValue = II.getStartValue(); 4746 Type *ScStValueType = ScalarStartValue->getType(); 4747 PHINode *NewPointerPhi = 4748 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4749 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4750 4751 // A pointer induction, performed by using a gep 4752 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4753 Instruction *InductionLoc = LoopLatch->getTerminator(); 4754 const SCEV *ScalarStep = II.getStep(); 4755 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4756 Value *ScalarStepValue = 4757 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4758 Value *InductionGEP = GetElementPtrInst::Create( 4759 ScStValueType->getPointerElementType(), NewPointerPhi, 4760 Builder.CreateMul( 4761 ScalarStepValue, 4762 ConstantInt::get(PhiType, VF.getKnownMinValue() * UF)), 4763 "ptr.ind", InductionLoc); 4764 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4765 4766 // Create UF many actual address geps that use the pointer 4767 // phi as base and a vectorized version of the step value 4768 // (<step*0, ..., step*N>) as offset. 4769 for (unsigned Part = 0; Part < UF; ++Part) { 4770 SmallVector<Constant *, 8> Indices; 4771 // Create a vector of consecutive numbers from zero to VF. 4772 for (unsigned i = 0; i < VF.getKnownMinValue(); ++i) 4773 Indices.push_back( 4774 ConstantInt::get(PhiType, i + Part * VF.getKnownMinValue())); 4775 Constant *StartOffset = ConstantVector::get(Indices); 4776 4777 Value *GEP = Builder.CreateGEP( 4778 ScStValueType->getPointerElementType(), NewPointerPhi, 4779 Builder.CreateMul( 4780 StartOffset, 4781 Builder.CreateVectorSplat(VF.getKnownMinValue(), ScalarStepValue), 4782 "vector.gep")); 4783 VectorLoopValueMap.setVectorValue(P, Part, GEP); 4784 } 4785 } 4786 } 4787 } 4788 4789 /// A helper function for checking whether an integer division-related 4790 /// instruction may divide by zero (in which case it must be predicated if 4791 /// executed conditionally in the scalar code). 4792 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4793 /// Non-zero divisors that are non compile-time constants will not be 4794 /// converted into multiplication, so we will still end up scalarizing 4795 /// the division, but can do so w/o predication. 4796 static bool mayDivideByZero(Instruction &I) { 4797 assert((I.getOpcode() == Instruction::UDiv || 4798 I.getOpcode() == Instruction::SDiv || 4799 I.getOpcode() == Instruction::URem || 4800 I.getOpcode() == Instruction::SRem) && 4801 "Unexpected instruction"); 4802 Value *Divisor = I.getOperand(1); 4803 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4804 return !CInt || CInt->isZero(); 4805 } 4806 4807 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def, 4808 VPUser &User, 4809 VPTransformState &State) { 4810 switch (I.getOpcode()) { 4811 case Instruction::Call: 4812 case Instruction::Br: 4813 case Instruction::PHI: 4814 case Instruction::GetElementPtr: 4815 case Instruction::Select: 4816 llvm_unreachable("This instruction is handled by a different recipe."); 4817 case Instruction::UDiv: 4818 case Instruction::SDiv: 4819 case Instruction::SRem: 4820 case Instruction::URem: 4821 case Instruction::Add: 4822 case Instruction::FAdd: 4823 case Instruction::Sub: 4824 case Instruction::FSub: 4825 case Instruction::FNeg: 4826 case Instruction::Mul: 4827 case Instruction::FMul: 4828 case Instruction::FDiv: 4829 case Instruction::FRem: 4830 case Instruction::Shl: 4831 case Instruction::LShr: 4832 case Instruction::AShr: 4833 case Instruction::And: 4834 case Instruction::Or: 4835 case Instruction::Xor: { 4836 // Just widen unops and binops. 4837 setDebugLocFromInst(Builder, &I); 4838 4839 for (unsigned Part = 0; Part < UF; ++Part) { 4840 SmallVector<Value *, 2> Ops; 4841 for (VPValue *VPOp : User.operands()) 4842 Ops.push_back(State.get(VPOp, Part)); 4843 4844 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4845 4846 if (auto *VecOp = dyn_cast<Instruction>(V)) 4847 VecOp->copyIRFlags(&I); 4848 4849 // Use this vector value for all users of the original instruction. 4850 State.set(Def, &I, V, Part); 4851 addMetadata(V, &I); 4852 } 4853 4854 break; 4855 } 4856 case Instruction::ICmp: 4857 case Instruction::FCmp: { 4858 // Widen compares. Generate vector compares. 4859 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4860 auto *Cmp = cast<CmpInst>(&I); 4861 setDebugLocFromInst(Builder, Cmp); 4862 for (unsigned Part = 0; Part < UF; ++Part) { 4863 Value *A = State.get(User.getOperand(0), Part); 4864 Value *B = State.get(User.getOperand(1), Part); 4865 Value *C = nullptr; 4866 if (FCmp) { 4867 // Propagate fast math flags. 4868 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4869 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4870 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4871 } else { 4872 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4873 } 4874 State.set(Def, &I, C, Part); 4875 addMetadata(C, &I); 4876 } 4877 4878 break; 4879 } 4880 4881 case Instruction::ZExt: 4882 case Instruction::SExt: 4883 case Instruction::FPToUI: 4884 case Instruction::FPToSI: 4885 case Instruction::FPExt: 4886 case Instruction::PtrToInt: 4887 case Instruction::IntToPtr: 4888 case Instruction::SIToFP: 4889 case Instruction::UIToFP: 4890 case Instruction::Trunc: 4891 case Instruction::FPTrunc: 4892 case Instruction::BitCast: { 4893 auto *CI = cast<CastInst>(&I); 4894 setDebugLocFromInst(Builder, CI); 4895 4896 /// Vectorize casts. 4897 Type *DestTy = 4898 (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); 4899 4900 for (unsigned Part = 0; Part < UF; ++Part) { 4901 Value *A = State.get(User.getOperand(0), Part); 4902 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4903 State.set(Def, &I, Cast, Part); 4904 addMetadata(Cast, &I); 4905 } 4906 break; 4907 } 4908 default: 4909 // This instruction is not vectorized by simple widening. 4910 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4911 llvm_unreachable("Unhandled instruction!"); 4912 } // end of switch. 4913 } 4914 4915 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4916 VPUser &ArgOperands, 4917 VPTransformState &State) { 4918 assert(!isa<DbgInfoIntrinsic>(I) && 4919 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4920 setDebugLocFromInst(Builder, &I); 4921 4922 Module *M = I.getParent()->getParent()->getParent(); 4923 auto *CI = cast<CallInst>(&I); 4924 4925 SmallVector<Type *, 4> Tys; 4926 for (Value *ArgOperand : CI->arg_operands()) 4927 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4928 4929 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4930 4931 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4932 // version of the instruction. 4933 // Is it beneficial to perform intrinsic call compared to lib call? 4934 bool NeedToScalarize = false; 4935 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4936 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4937 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4938 assert((UseVectorIntrinsic || !NeedToScalarize) && 4939 "Instruction should be scalarized elsewhere."); 4940 assert(IntrinsicCost.isValid() && CallCost.isValid() && 4941 "Cannot have invalid costs while widening"); 4942 4943 for (unsigned Part = 0; Part < UF; ++Part) { 4944 SmallVector<Value *, 4> Args; 4945 for (auto &I : enumerate(ArgOperands.operands())) { 4946 // Some intrinsics have a scalar argument - don't replace it with a 4947 // vector. 4948 Value *Arg; 4949 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4950 Arg = State.get(I.value(), Part); 4951 else 4952 Arg = State.get(I.value(), VPIteration(0, 0)); 4953 Args.push_back(Arg); 4954 } 4955 4956 Function *VectorF; 4957 if (UseVectorIntrinsic) { 4958 // Use vector version of the intrinsic. 4959 Type *TysForDecl[] = {CI->getType()}; 4960 if (VF.isVector()) { 4961 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 4962 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4963 } 4964 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4965 assert(VectorF && "Can't retrieve vector intrinsic."); 4966 } else { 4967 // Use vector version of the function call. 4968 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4969 #ifndef NDEBUG 4970 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4971 "Can't create vector function."); 4972 #endif 4973 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4974 } 4975 SmallVector<OperandBundleDef, 1> OpBundles; 4976 CI->getOperandBundlesAsDefs(OpBundles); 4977 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4978 4979 if (isa<FPMathOperator>(V)) 4980 V->copyFastMathFlags(CI); 4981 4982 State.set(Def, &I, V, Part); 4983 addMetadata(V, &I); 4984 } 4985 } 4986 4987 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, 4988 VPUser &Operands, 4989 bool InvariantCond, 4990 VPTransformState &State) { 4991 setDebugLocFromInst(Builder, &I); 4992 4993 // The condition can be loop invariant but still defined inside the 4994 // loop. This means that we can't just use the original 'cond' value. 4995 // We have to take the 'vectorized' value and pick the first lane. 4996 // Instcombine will make this a no-op. 4997 auto *InvarCond = InvariantCond 4998 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 4999 : nullptr; 5000 5001 for (unsigned Part = 0; Part < UF; ++Part) { 5002 Value *Cond = 5003 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); 5004 Value *Op0 = State.get(Operands.getOperand(1), Part); 5005 Value *Op1 = State.get(Operands.getOperand(2), Part); 5006 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); 5007 State.set(VPDef, &I, Sel, Part); 5008 addMetadata(Sel, &I); 5009 } 5010 } 5011 5012 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 5013 // We should not collect Scalars more than once per VF. Right now, this 5014 // function is called from collectUniformsAndScalars(), which already does 5015 // this check. Collecting Scalars for VF=1 does not make any sense. 5016 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 5017 "This function should not be visited twice for the same VF"); 5018 5019 SmallSetVector<Instruction *, 8> Worklist; 5020 5021 // These sets are used to seed the analysis with pointers used by memory 5022 // accesses that will remain scalar. 5023 SmallSetVector<Instruction *, 8> ScalarPtrs; 5024 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 5025 auto *Latch = TheLoop->getLoopLatch(); 5026 5027 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 5028 // The pointer operands of loads and stores will be scalar as long as the 5029 // memory access is not a gather or scatter operation. The value operand of a 5030 // store will remain scalar if the store is scalarized. 5031 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 5032 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 5033 assert(WideningDecision != CM_Unknown && 5034 "Widening decision should be ready at this moment"); 5035 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 5036 if (Ptr == Store->getValueOperand()) 5037 return WideningDecision == CM_Scalarize; 5038 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 5039 "Ptr is neither a value or pointer operand"); 5040 return WideningDecision != CM_GatherScatter; 5041 }; 5042 5043 // A helper that returns true if the given value is a bitcast or 5044 // getelementptr instruction contained in the loop. 5045 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 5046 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 5047 isa<GetElementPtrInst>(V)) && 5048 !TheLoop->isLoopInvariant(V); 5049 }; 5050 5051 auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { 5052 if (!isa<PHINode>(Ptr) || 5053 !Legal->getInductionVars().count(cast<PHINode>(Ptr))) 5054 return false; 5055 auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; 5056 if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) 5057 return false; 5058 return isScalarUse(MemAccess, Ptr); 5059 }; 5060 5061 // A helper that evaluates a memory access's use of a pointer. If the 5062 // pointer is actually the pointer induction of a loop, it is being 5063 // inserted into Worklist. If the use will be a scalar use, and the 5064 // pointer is only used by memory accesses, we place the pointer in 5065 // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. 5066 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5067 if (isScalarPtrInduction(MemAccess, Ptr)) { 5068 Worklist.insert(cast<Instruction>(Ptr)); 5069 Instruction *Update = cast<Instruction>( 5070 cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); 5071 Worklist.insert(Update); 5072 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr 5073 << "\n"); 5074 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update 5075 << "\n"); 5076 return; 5077 } 5078 // We only care about bitcast and getelementptr instructions contained in 5079 // the loop. 5080 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5081 return; 5082 5083 // If the pointer has already been identified as scalar (e.g., if it was 5084 // also identified as uniform), there's nothing to do. 5085 auto *I = cast<Instruction>(Ptr); 5086 if (Worklist.count(I)) 5087 return; 5088 5089 // If the use of the pointer will be a scalar use, and all users of the 5090 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 5091 // place the pointer in PossibleNonScalarPtrs. 5092 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 5093 return isa<LoadInst>(U) || isa<StoreInst>(U); 5094 })) 5095 ScalarPtrs.insert(I); 5096 else 5097 PossibleNonScalarPtrs.insert(I); 5098 }; 5099 5100 // We seed the scalars analysis with three classes of instructions: (1) 5101 // instructions marked uniform-after-vectorization and (2) bitcast, 5102 // getelementptr and (pointer) phi instructions used by memory accesses 5103 // requiring a scalar use. 5104 // 5105 // (1) Add to the worklist all instructions that have been identified as 5106 // uniform-after-vectorization. 5107 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5108 5109 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5110 // memory accesses requiring a scalar use. The pointer operands of loads and 5111 // stores will be scalar as long as the memory accesses is not a gather or 5112 // scatter operation. The value operand of a store will remain scalar if the 5113 // store is scalarized. 5114 for (auto *BB : TheLoop->blocks()) 5115 for (auto &I : *BB) { 5116 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5117 evaluatePtrUse(Load, Load->getPointerOperand()); 5118 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5119 evaluatePtrUse(Store, Store->getPointerOperand()); 5120 evaluatePtrUse(Store, Store->getValueOperand()); 5121 } 5122 } 5123 for (auto *I : ScalarPtrs) 5124 if (!PossibleNonScalarPtrs.count(I)) { 5125 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5126 Worklist.insert(I); 5127 } 5128 5129 // Insert the forced scalars. 5130 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5131 // induction variable when the PHI user is scalarized. 5132 auto ForcedScalar = ForcedScalars.find(VF); 5133 if (ForcedScalar != ForcedScalars.end()) 5134 for (auto *I : ForcedScalar->second) 5135 Worklist.insert(I); 5136 5137 // Expand the worklist by looking through any bitcasts and getelementptr 5138 // instructions we've already identified as scalar. This is similar to the 5139 // expansion step in collectLoopUniforms(); however, here we're only 5140 // expanding to include additional bitcasts and getelementptr instructions. 5141 unsigned Idx = 0; 5142 while (Idx != Worklist.size()) { 5143 Instruction *Dst = Worklist[Idx++]; 5144 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5145 continue; 5146 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5147 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 5148 auto *J = cast<Instruction>(U); 5149 return !TheLoop->contains(J) || Worklist.count(J) || 5150 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5151 isScalarUse(J, Src)); 5152 })) { 5153 Worklist.insert(Src); 5154 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5155 } 5156 } 5157 5158 // An induction variable will remain scalar if all users of the induction 5159 // variable and induction variable update remain scalar. 5160 for (auto &Induction : Legal->getInductionVars()) { 5161 auto *Ind = Induction.first; 5162 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5163 5164 // If tail-folding is applied, the primary induction variable will be used 5165 // to feed a vector compare. 5166 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 5167 continue; 5168 5169 // Determine if all users of the induction variable are scalar after 5170 // vectorization. 5171 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5172 auto *I = cast<Instruction>(U); 5173 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5174 }); 5175 if (!ScalarInd) 5176 continue; 5177 5178 // Determine if all users of the induction variable update instruction are 5179 // scalar after vectorization. 5180 auto ScalarIndUpdate = 5181 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5182 auto *I = cast<Instruction>(U); 5183 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5184 }); 5185 if (!ScalarIndUpdate) 5186 continue; 5187 5188 // The induction variable and its update instruction will remain scalar. 5189 Worklist.insert(Ind); 5190 Worklist.insert(IndUpdate); 5191 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5192 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 5193 << "\n"); 5194 } 5195 5196 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5197 } 5198 5199 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, 5200 ElementCount VF) { 5201 if (!blockNeedsPredication(I->getParent())) 5202 return false; 5203 switch(I->getOpcode()) { 5204 default: 5205 break; 5206 case Instruction::Load: 5207 case Instruction::Store: { 5208 if (!Legal->isMaskRequired(I)) 5209 return false; 5210 auto *Ptr = getLoadStorePointerOperand(I); 5211 auto *Ty = getMemInstValueType(I); 5212 // We have already decided how to vectorize this instruction, get that 5213 // result. 5214 if (VF.isVector()) { 5215 InstWidening WideningDecision = getWideningDecision(I, VF); 5216 assert(WideningDecision != CM_Unknown && 5217 "Widening decision should be ready at this moment"); 5218 return WideningDecision == CM_Scalarize; 5219 } 5220 const Align Alignment = getLoadStoreAlignment(I); 5221 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 5222 isLegalMaskedGather(Ty, Alignment)) 5223 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 5224 isLegalMaskedScatter(Ty, Alignment)); 5225 } 5226 case Instruction::UDiv: 5227 case Instruction::SDiv: 5228 case Instruction::SRem: 5229 case Instruction::URem: 5230 return mayDivideByZero(*I); 5231 } 5232 return false; 5233 } 5234 5235 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5236 Instruction *I, ElementCount VF) { 5237 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5238 assert(getWideningDecision(I, VF) == CM_Unknown && 5239 "Decision should not be set yet."); 5240 auto *Group = getInterleavedAccessGroup(I); 5241 assert(Group && "Must have a group."); 5242 5243 // If the instruction's allocated size doesn't equal it's type size, it 5244 // requires padding and will be scalarized. 5245 auto &DL = I->getModule()->getDataLayout(); 5246 auto *ScalarTy = getMemInstValueType(I); 5247 if (hasIrregularType(ScalarTy, DL, VF)) 5248 return false; 5249 5250 // Check if masking is required. 5251 // A Group may need masking for one of two reasons: it resides in a block that 5252 // needs predication, or it was decided to use masking to deal with gaps. 5253 bool PredicatedAccessRequiresMasking = 5254 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 5255 bool AccessWithGapsRequiresMasking = 5256 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 5257 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 5258 return true; 5259 5260 // If masked interleaving is required, we expect that the user/target had 5261 // enabled it, because otherwise it either wouldn't have been created or 5262 // it should have been invalidated by the CostModel. 5263 assert(useMaskedInterleavedAccesses(TTI) && 5264 "Masked interleave-groups for predicated accesses are not enabled."); 5265 5266 auto *Ty = getMemInstValueType(I); 5267 const Align Alignment = getLoadStoreAlignment(I); 5268 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5269 : TTI.isLegalMaskedStore(Ty, Alignment); 5270 } 5271 5272 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5273 Instruction *I, ElementCount VF) { 5274 // Get and ensure we have a valid memory instruction. 5275 LoadInst *LI = dyn_cast<LoadInst>(I); 5276 StoreInst *SI = dyn_cast<StoreInst>(I); 5277 assert((LI || SI) && "Invalid memory instruction"); 5278 5279 auto *Ptr = getLoadStorePointerOperand(I); 5280 5281 // In order to be widened, the pointer should be consecutive, first of all. 5282 if (!Legal->isConsecutivePtr(Ptr)) 5283 return false; 5284 5285 // If the instruction is a store located in a predicated block, it will be 5286 // scalarized. 5287 if (isScalarWithPredication(I)) 5288 return false; 5289 5290 // If the instruction's allocated size doesn't equal it's type size, it 5291 // requires padding and will be scalarized. 5292 auto &DL = I->getModule()->getDataLayout(); 5293 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5294 if (hasIrregularType(ScalarTy, DL, VF)) 5295 return false; 5296 5297 return true; 5298 } 5299 5300 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5301 // We should not collect Uniforms more than once per VF. Right now, 5302 // this function is called from collectUniformsAndScalars(), which 5303 // already does this check. Collecting Uniforms for VF=1 does not make any 5304 // sense. 5305 5306 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5307 "This function should not be visited twice for the same VF"); 5308 5309 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5310 // not analyze again. Uniforms.count(VF) will return 1. 5311 Uniforms[VF].clear(); 5312 5313 // We now know that the loop is vectorizable! 5314 // Collect instructions inside the loop that will remain uniform after 5315 // vectorization. 5316 5317 // Global values, params and instructions outside of current loop are out of 5318 // scope. 5319 auto isOutOfScope = [&](Value *V) -> bool { 5320 Instruction *I = dyn_cast<Instruction>(V); 5321 return (!I || !TheLoop->contains(I)); 5322 }; 5323 5324 SetVector<Instruction *> Worklist; 5325 BasicBlock *Latch = TheLoop->getLoopLatch(); 5326 5327 // Instructions that are scalar with predication must not be considered 5328 // uniform after vectorization, because that would create an erroneous 5329 // replicating region where only a single instance out of VF should be formed. 5330 // TODO: optimize such seldom cases if found important, see PR40816. 5331 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5332 if (isOutOfScope(I)) { 5333 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5334 << *I << "\n"); 5335 return; 5336 } 5337 if (isScalarWithPredication(I, VF)) { 5338 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5339 << *I << "\n"); 5340 return; 5341 } 5342 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5343 Worklist.insert(I); 5344 }; 5345 5346 // Start with the conditional branch. If the branch condition is an 5347 // instruction contained in the loop that is only used by the branch, it is 5348 // uniform. 5349 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5350 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5351 addToWorklistIfAllowed(Cmp); 5352 5353 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5354 InstWidening WideningDecision = getWideningDecision(I, VF); 5355 assert(WideningDecision != CM_Unknown && 5356 "Widening decision should be ready at this moment"); 5357 5358 // A uniform memory op is itself uniform. We exclude uniform stores 5359 // here as they demand the last lane, not the first one. 5360 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5361 assert(WideningDecision == CM_Scalarize); 5362 return true; 5363 } 5364 5365 return (WideningDecision == CM_Widen || 5366 WideningDecision == CM_Widen_Reverse || 5367 WideningDecision == CM_Interleave); 5368 }; 5369 5370 5371 // Returns true if Ptr is the pointer operand of a memory access instruction 5372 // I, and I is known to not require scalarization. 5373 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5374 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5375 }; 5376 5377 // Holds a list of values which are known to have at least one uniform use. 5378 // Note that there may be other uses which aren't uniform. A "uniform use" 5379 // here is something which only demands lane 0 of the unrolled iterations; 5380 // it does not imply that all lanes produce the same value (e.g. this is not 5381 // the usual meaning of uniform) 5382 SmallPtrSet<Value *, 8> HasUniformUse; 5383 5384 // Scan the loop for instructions which are either a) known to have only 5385 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5386 for (auto *BB : TheLoop->blocks()) 5387 for (auto &I : *BB) { 5388 // If there's no pointer operand, there's nothing to do. 5389 auto *Ptr = getLoadStorePointerOperand(&I); 5390 if (!Ptr) 5391 continue; 5392 5393 // A uniform memory op is itself uniform. We exclude uniform stores 5394 // here as they demand the last lane, not the first one. 5395 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5396 addToWorklistIfAllowed(&I); 5397 5398 if (isUniformDecision(&I, VF)) { 5399 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5400 HasUniformUse.insert(Ptr); 5401 } 5402 } 5403 5404 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5405 // demanding) users. Since loops are assumed to be in LCSSA form, this 5406 // disallows uses outside the loop as well. 5407 for (auto *V : HasUniformUse) { 5408 if (isOutOfScope(V)) 5409 continue; 5410 auto *I = cast<Instruction>(V); 5411 auto UsersAreMemAccesses = 5412 llvm::all_of(I->users(), [&](User *U) -> bool { 5413 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5414 }); 5415 if (UsersAreMemAccesses) 5416 addToWorklistIfAllowed(I); 5417 } 5418 5419 // Expand Worklist in topological order: whenever a new instruction 5420 // is added , its users should be already inside Worklist. It ensures 5421 // a uniform instruction will only be used by uniform instructions. 5422 unsigned idx = 0; 5423 while (idx != Worklist.size()) { 5424 Instruction *I = Worklist[idx++]; 5425 5426 for (auto OV : I->operand_values()) { 5427 // isOutOfScope operands cannot be uniform instructions. 5428 if (isOutOfScope(OV)) 5429 continue; 5430 // First order recurrence Phi's should typically be considered 5431 // non-uniform. 5432 auto *OP = dyn_cast<PHINode>(OV); 5433 if (OP && Legal->isFirstOrderRecurrence(OP)) 5434 continue; 5435 // If all the users of the operand are uniform, then add the 5436 // operand into the uniform worklist. 5437 auto *OI = cast<Instruction>(OV); 5438 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5439 auto *J = cast<Instruction>(U); 5440 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5441 })) 5442 addToWorklistIfAllowed(OI); 5443 } 5444 } 5445 5446 // For an instruction to be added into Worklist above, all its users inside 5447 // the loop should also be in Worklist. However, this condition cannot be 5448 // true for phi nodes that form a cyclic dependence. We must process phi 5449 // nodes separately. An induction variable will remain uniform if all users 5450 // of the induction variable and induction variable update remain uniform. 5451 // The code below handles both pointer and non-pointer induction variables. 5452 for (auto &Induction : Legal->getInductionVars()) { 5453 auto *Ind = Induction.first; 5454 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5455 5456 // Determine if all users of the induction variable are uniform after 5457 // vectorization. 5458 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5459 auto *I = cast<Instruction>(U); 5460 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5461 isVectorizedMemAccessUse(I, Ind); 5462 }); 5463 if (!UniformInd) 5464 continue; 5465 5466 // Determine if all users of the induction variable update instruction are 5467 // uniform after vectorization. 5468 auto UniformIndUpdate = 5469 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5470 auto *I = cast<Instruction>(U); 5471 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5472 isVectorizedMemAccessUse(I, IndUpdate); 5473 }); 5474 if (!UniformIndUpdate) 5475 continue; 5476 5477 // The induction variable and its update instruction will remain uniform. 5478 addToWorklistIfAllowed(Ind); 5479 addToWorklistIfAllowed(IndUpdate); 5480 } 5481 5482 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5483 } 5484 5485 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5486 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5487 5488 if (Legal->getRuntimePointerChecking()->Need) { 5489 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5490 "runtime pointer checks needed. Enable vectorization of this " 5491 "loop with '#pragma clang loop vectorize(enable)' when " 5492 "compiling with -Os/-Oz", 5493 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5494 return true; 5495 } 5496 5497 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5498 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5499 "runtime SCEV checks needed. Enable vectorization of this " 5500 "loop with '#pragma clang loop vectorize(enable)' when " 5501 "compiling with -Os/-Oz", 5502 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5503 return true; 5504 } 5505 5506 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5507 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5508 reportVectorizationFailure("Runtime stride check for small trip count", 5509 "runtime stride == 1 checks needed. Enable vectorization of " 5510 "this loop without such check by compiling with -Os/-Oz", 5511 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5512 return true; 5513 } 5514 5515 return false; 5516 } 5517 5518 Optional<ElementCount> 5519 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5520 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5521 // TODO: It may by useful to do since it's still likely to be dynamically 5522 // uniform if the target can skip. 5523 reportVectorizationFailure( 5524 "Not inserting runtime ptr check for divergent target", 5525 "runtime pointer checks needed. Not enabled for divergent target", 5526 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5527 return None; 5528 } 5529 5530 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5531 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5532 if (TC == 1) { 5533 reportVectorizationFailure("Single iteration (non) loop", 5534 "loop trip count is one, irrelevant for vectorization", 5535 "SingleIterationLoop", ORE, TheLoop); 5536 return None; 5537 } 5538 5539 switch (ScalarEpilogueStatus) { 5540 case CM_ScalarEpilogueAllowed: 5541 return computeFeasibleMaxVF(TC, UserVF); 5542 case CM_ScalarEpilogueNotAllowedUsePredicate: 5543 LLVM_FALLTHROUGH; 5544 case CM_ScalarEpilogueNotNeededUsePredicate: 5545 LLVM_DEBUG( 5546 dbgs() << "LV: vector predicate hint/switch found.\n" 5547 << "LV: Not allowing scalar epilogue, creating predicated " 5548 << "vector loop.\n"); 5549 break; 5550 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5551 // fallthrough as a special case of OptForSize 5552 case CM_ScalarEpilogueNotAllowedOptSize: 5553 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5554 LLVM_DEBUG( 5555 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5556 else 5557 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5558 << "count.\n"); 5559 5560 // Bail if runtime checks are required, which are not good when optimising 5561 // for size. 5562 if (runtimeChecksRequired()) 5563 return None; 5564 5565 break; 5566 } 5567 5568 // The only loops we can vectorize without a scalar epilogue, are loops with 5569 // a bottom-test and a single exiting block. We'd have to handle the fact 5570 // that not every instruction executes on the last iteration. This will 5571 // require a lane mask which varies through the vector loop body. (TODO) 5572 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5573 // If there was a tail-folding hint/switch, but we can't fold the tail by 5574 // masking, fallback to a vectorization with a scalar epilogue. 5575 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5576 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5577 "scalar epilogue instead.\n"); 5578 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5579 return computeFeasibleMaxVF(TC, UserVF); 5580 } 5581 return None; 5582 } 5583 5584 // Now try the tail folding 5585 5586 // Invalidate interleave groups that require an epilogue if we can't mask 5587 // the interleave-group. 5588 if (!useMaskedInterleavedAccesses(TTI)) { 5589 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5590 "No decisions should have been taken at this point"); 5591 // Note: There is no need to invalidate any cost modeling decisions here, as 5592 // non where taken so far. 5593 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5594 } 5595 5596 ElementCount MaxVF = computeFeasibleMaxVF(TC, UserVF); 5597 assert(!MaxVF.isScalable() && 5598 "Scalable vectors do not yet support tail folding"); 5599 assert((UserVF.isNonZero() || isPowerOf2_32(MaxVF.getFixedValue())) && 5600 "MaxVF must be a power of 2"); 5601 unsigned MaxVFtimesIC = 5602 UserIC ? MaxVF.getFixedValue() * UserIC : MaxVF.getFixedValue(); 5603 // Avoid tail folding if the trip count is known to be a multiple of any VF we 5604 // chose. 5605 ScalarEvolution *SE = PSE.getSE(); 5606 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5607 const SCEV *ExitCount = SE->getAddExpr( 5608 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5609 const SCEV *Rem = SE->getURemExpr( 5610 SE->applyLoopGuards(ExitCount, TheLoop), 5611 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5612 if (Rem->isZero()) { 5613 // Accept MaxVF if we do not have a tail. 5614 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5615 return MaxVF; 5616 } 5617 5618 // If we don't know the precise trip count, or if the trip count that we 5619 // found modulo the vectorization factor is not zero, try to fold the tail 5620 // by masking. 5621 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5622 if (Legal->prepareToFoldTailByMasking()) { 5623 FoldTailByMasking = true; 5624 return MaxVF; 5625 } 5626 5627 // If there was a tail-folding hint/switch, but we can't fold the tail by 5628 // masking, fallback to a vectorization with a scalar epilogue. 5629 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5630 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5631 "scalar epilogue instead.\n"); 5632 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5633 return MaxVF; 5634 } 5635 5636 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5637 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5638 return None; 5639 } 5640 5641 if (TC == 0) { 5642 reportVectorizationFailure( 5643 "Unable to calculate the loop count due to complex control flow", 5644 "unable to calculate the loop count due to complex control flow", 5645 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5646 return None; 5647 } 5648 5649 reportVectorizationFailure( 5650 "Cannot optimize for size and vectorize at the same time.", 5651 "cannot optimize for size and vectorize at the same time. " 5652 "Enable vectorization of this loop with '#pragma clang loop " 5653 "vectorize(enable)' when compiling with -Os/-Oz", 5654 "NoTailLoopWithOptForSize", ORE, TheLoop); 5655 return None; 5656 } 5657 5658 ElementCount 5659 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5660 ElementCount UserVF) { 5661 bool IgnoreScalableUserVF = UserVF.isScalable() && 5662 !TTI.supportsScalableVectors() && 5663 !ForceTargetSupportsScalableVectors; 5664 if (IgnoreScalableUserVF) { 5665 LLVM_DEBUG( 5666 dbgs() << "LV: Ignoring VF=" << UserVF 5667 << " because target does not support scalable vectors.\n"); 5668 ORE->emit([&]() { 5669 return OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreScalableUserVF", 5670 TheLoop->getStartLoc(), 5671 TheLoop->getHeader()) 5672 << "Ignoring VF=" << ore::NV("UserVF", UserVF) 5673 << " because target does not support scalable vectors."; 5674 }); 5675 } 5676 5677 // Beyond this point two scenarios are handled. If UserVF isn't specified 5678 // then a suitable VF is chosen. If UserVF is specified and there are 5679 // dependencies, check if it's legal. However, if a UserVF is specified and 5680 // there are no dependencies, then there's nothing to do. 5681 if (UserVF.isNonZero() && !IgnoreScalableUserVF && 5682 Legal->isSafeForAnyVectorWidth()) 5683 return UserVF; 5684 5685 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5686 unsigned SmallestType, WidestType; 5687 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5688 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5689 5690 // Get the maximum safe dependence distance in bits computed by LAA. 5691 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5692 // the memory accesses that is most restrictive (involved in the smallest 5693 // dependence distance). 5694 unsigned MaxSafeVectorWidthInBits = Legal->getMaxSafeVectorWidthInBits(); 5695 5696 // If the user vectorization factor is legally unsafe, clamp it to a safe 5697 // value. Otherwise, return as is. 5698 if (UserVF.isNonZero() && !IgnoreScalableUserVF) { 5699 unsigned MaxSafeElements = 5700 PowerOf2Floor(MaxSafeVectorWidthInBits / WidestType); 5701 ElementCount MaxSafeVF = ElementCount::getFixed(MaxSafeElements); 5702 5703 if (UserVF.isScalable()) { 5704 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5705 5706 // Scale VF by vscale before checking if it's safe. 5707 MaxSafeVF = ElementCount::getScalable( 5708 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5709 5710 if (MaxSafeVF.isZero()) { 5711 // The dependence distance is too small to use scalable vectors, 5712 // fallback on fixed. 5713 LLVM_DEBUG( 5714 dbgs() 5715 << "LV: Max legal vector width too small, scalable vectorization " 5716 "unfeasible. Using fixed-width vectorization instead.\n"); 5717 ORE->emit([&]() { 5718 return OptimizationRemarkAnalysis(DEBUG_TYPE, "ScalableVFUnfeasible", 5719 TheLoop->getStartLoc(), 5720 TheLoop->getHeader()) 5721 << "Max legal vector width too small, scalable vectorization " 5722 << "unfeasible. Using fixed-width vectorization instead."; 5723 }); 5724 return computeFeasibleMaxVF( 5725 ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue())); 5726 } 5727 } 5728 5729 LLVM_DEBUG(dbgs() << "LV: The max safe VF is: " << MaxSafeVF << ".\n"); 5730 5731 if (ElementCount::isKnownLE(UserVF, MaxSafeVF)) 5732 return UserVF; 5733 5734 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5735 << " is unsafe, clamping to max safe VF=" << MaxSafeVF 5736 << ".\n"); 5737 ORE->emit([&]() { 5738 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5739 TheLoop->getStartLoc(), 5740 TheLoop->getHeader()) 5741 << "User-specified vectorization factor " 5742 << ore::NV("UserVectorizationFactor", UserVF) 5743 << " is unsafe, clamping to maximum safe vectorization factor " 5744 << ore::NV("VectorizationFactor", MaxSafeVF); 5745 }); 5746 return MaxSafeVF; 5747 } 5748 5749 WidestRegister = std::min(WidestRegister, MaxSafeVectorWidthInBits); 5750 5751 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5752 // Note that both WidestRegister and WidestType may not be a powers of 2. 5753 unsigned MaxVectorSize = PowerOf2Floor(WidestRegister / WidestType); 5754 5755 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5756 << " / " << WidestType << " bits.\n"); 5757 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5758 << WidestRegister << " bits.\n"); 5759 5760 assert(MaxVectorSize <= WidestRegister && 5761 "Did not expect to pack so many elements" 5762 " into one vector!"); 5763 if (MaxVectorSize == 0) { 5764 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5765 MaxVectorSize = 1; 5766 return ElementCount::getFixed(MaxVectorSize); 5767 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 5768 isPowerOf2_32(ConstTripCount)) { 5769 // We need to clamp the VF to be the ConstTripCount. There is no point in 5770 // choosing a higher viable VF as done in the loop below. 5771 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5772 << ConstTripCount << "\n"); 5773 MaxVectorSize = ConstTripCount; 5774 return ElementCount::getFixed(MaxVectorSize); 5775 } 5776 5777 unsigned MaxVF = MaxVectorSize; 5778 if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) || 5779 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5780 // Collect all viable vectorization factors larger than the default MaxVF 5781 // (i.e. MaxVectorSize). 5782 SmallVector<ElementCount, 8> VFs; 5783 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5784 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 5785 VFs.push_back(ElementCount::getFixed(VS)); 5786 5787 // For each VF calculate its register usage. 5788 auto RUs = calculateRegisterUsage(VFs); 5789 5790 // Select the largest VF which doesn't require more registers than existing 5791 // ones. 5792 for (int i = RUs.size() - 1; i >= 0; --i) { 5793 bool Selected = true; 5794 for (auto& pair : RUs[i].MaxLocalUsers) { 5795 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5796 if (pair.second > TargetNumRegisters) 5797 Selected = false; 5798 } 5799 if (Selected) { 5800 MaxVF = VFs[i].getKnownMinValue(); 5801 break; 5802 } 5803 } 5804 if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) { 5805 if (MaxVF < MinVF) { 5806 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5807 << ") with target's minimum: " << MinVF << '\n'); 5808 MaxVF = MinVF; 5809 } 5810 } 5811 } 5812 return ElementCount::getFixed(MaxVF); 5813 } 5814 5815 VectorizationFactor 5816 LoopVectorizationCostModel::selectVectorizationFactor(ElementCount MaxVF) { 5817 // FIXME: This can be fixed for scalable vectors later, because at this stage 5818 // the LoopVectorizer will only consider vectorizing a loop with scalable 5819 // vectors when the loop has a hint to enable vectorization for a given VF. 5820 assert(!MaxVF.isScalable() && "scalable vectors not yet supported"); 5821 5822 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5823 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5824 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5825 5826 unsigned Width = 1; 5827 const float ScalarCost = *ExpectedCost.getValue(); 5828 float Cost = ScalarCost; 5829 5830 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5831 if (ForceVectorization && MaxVF.isVector()) { 5832 // Ignore scalar width, because the user explicitly wants vectorization. 5833 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5834 // evaluation. 5835 Cost = std::numeric_limits<float>::max(); 5836 } 5837 5838 for (unsigned i = 2; i <= MaxVF.getFixedValue(); i *= 2) { 5839 // Notice that the vector loop needs to be executed less times, so 5840 // we need to divide the cost of the vector loops by the width of 5841 // the vector elements. 5842 VectorizationCostTy C = expectedCost(ElementCount::getFixed(i)); 5843 assert(C.first.isValid() && "Unexpected invalid cost for vector loop"); 5844 float VectorCost = *C.first.getValue() / (float)i; 5845 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5846 << " costs: " << (int)VectorCost << ".\n"); 5847 if (!C.second && !ForceVectorization) { 5848 LLVM_DEBUG( 5849 dbgs() << "LV: Not considering vector loop of width " << i 5850 << " because it will not generate any vector instructions.\n"); 5851 continue; 5852 } 5853 5854 // If profitable add it to ProfitableVF list. 5855 if (VectorCost < ScalarCost) { 5856 ProfitableVFs.push_back(VectorizationFactor( 5857 {ElementCount::getFixed(i), (unsigned)VectorCost})); 5858 } 5859 5860 if (VectorCost < Cost) { 5861 Cost = VectorCost; 5862 Width = i; 5863 } 5864 } 5865 5866 if (!EnableCondStoresVectorization && NumPredStores) { 5867 reportVectorizationFailure("There are conditional stores.", 5868 "store that is conditionally executed prevents vectorization", 5869 "ConditionalStore", ORE, TheLoop); 5870 Width = 1; 5871 Cost = ScalarCost; 5872 } 5873 5874 LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5875 << "LV: Vectorization seems to be not beneficial, " 5876 << "but was forced by a user.\n"); 5877 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5878 VectorizationFactor Factor = {ElementCount::getFixed(Width), 5879 (unsigned)(Width * Cost)}; 5880 return Factor; 5881 } 5882 5883 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5884 const Loop &L, ElementCount VF) const { 5885 // Cross iteration phis such as reductions need special handling and are 5886 // currently unsupported. 5887 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 5888 return Legal->isFirstOrderRecurrence(&Phi) || 5889 Legal->isReductionVariable(&Phi); 5890 })) 5891 return false; 5892 5893 // Phis with uses outside of the loop require special handling and are 5894 // currently unsupported. 5895 for (auto &Entry : Legal->getInductionVars()) { 5896 // Look for uses of the value of the induction at the last iteration. 5897 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5898 for (User *U : PostInc->users()) 5899 if (!L.contains(cast<Instruction>(U))) 5900 return false; 5901 // Look for uses of penultimate value of the induction. 5902 for (User *U : Entry.first->users()) 5903 if (!L.contains(cast<Instruction>(U))) 5904 return false; 5905 } 5906 5907 // Induction variables that are widened require special handling that is 5908 // currently not supported. 5909 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5910 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5911 this->isProfitableToScalarize(Entry.first, VF)); 5912 })) 5913 return false; 5914 5915 return true; 5916 } 5917 5918 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5919 const ElementCount VF) const { 5920 // FIXME: We need a much better cost-model to take different parameters such 5921 // as register pressure, code size increase and cost of extra branches into 5922 // account. For now we apply a very crude heuristic and only consider loops 5923 // with vectorization factors larger than a certain value. 5924 // We also consider epilogue vectorization unprofitable for targets that don't 5925 // consider interleaving beneficial (eg. MVE). 5926 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5927 return false; 5928 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 5929 return true; 5930 return false; 5931 } 5932 5933 VectorizationFactor 5934 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5935 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5936 VectorizationFactor Result = VectorizationFactor::Disabled(); 5937 if (!EnableEpilogueVectorization) { 5938 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5939 return Result; 5940 } 5941 5942 if (!isScalarEpilogueAllowed()) { 5943 LLVM_DEBUG( 5944 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5945 "allowed.\n";); 5946 return Result; 5947 } 5948 5949 // FIXME: This can be fixed for scalable vectors later, because at this stage 5950 // the LoopVectorizer will only consider vectorizing a loop with scalable 5951 // vectors when the loop has a hint to enable vectorization for a given VF. 5952 if (MainLoopVF.isScalable()) { 5953 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not " 5954 "yet supported.\n"); 5955 return Result; 5956 } 5957 5958 // Not really a cost consideration, but check for unsupported cases here to 5959 // simplify the logic. 5960 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5961 LLVM_DEBUG( 5962 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5963 "not a supported candidate.\n";); 5964 return Result; 5965 } 5966 5967 if (EpilogueVectorizationForceVF > 1) { 5968 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5969 if (LVP.hasPlanWithVFs( 5970 {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)})) 5971 return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0}; 5972 else { 5973 LLVM_DEBUG( 5974 dbgs() 5975 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5976 return Result; 5977 } 5978 } 5979 5980 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5981 TheLoop->getHeader()->getParent()->hasMinSize()) { 5982 LLVM_DEBUG( 5983 dbgs() 5984 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5985 return Result; 5986 } 5987 5988 if (!isEpilogueVectorizationProfitable(MainLoopVF)) 5989 return Result; 5990 5991 for (auto &NextVF : ProfitableVFs) 5992 if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) && 5993 (Result.Width.getFixedValue() == 1 || NextVF.Cost < Result.Cost) && 5994 LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width})) 5995 Result = NextVF; 5996 5997 if (Result != VectorizationFactor::Disabled()) 5998 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5999 << Result.Width.getFixedValue() << "\n";); 6000 return Result; 6001 } 6002 6003 std::pair<unsigned, unsigned> 6004 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6005 unsigned MinWidth = -1U; 6006 unsigned MaxWidth = 8; 6007 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6008 6009 // For each block. 6010 for (BasicBlock *BB : TheLoop->blocks()) { 6011 // For each instruction in the loop. 6012 for (Instruction &I : BB->instructionsWithoutDebug()) { 6013 Type *T = I.getType(); 6014 6015 // Skip ignored values. 6016 if (ValuesToIgnore.count(&I)) 6017 continue; 6018 6019 // Only examine Loads, Stores and PHINodes. 6020 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6021 continue; 6022 6023 // Examine PHI nodes that are reduction variables. Update the type to 6024 // account for the recurrence type. 6025 if (auto *PN = dyn_cast<PHINode>(&I)) { 6026 if (!Legal->isReductionVariable(PN)) 6027 continue; 6028 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN]; 6029 if (PreferInLoopReductions || 6030 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 6031 RdxDesc.getRecurrenceType(), 6032 TargetTransformInfo::ReductionFlags())) 6033 continue; 6034 T = RdxDesc.getRecurrenceType(); 6035 } 6036 6037 // Examine the stored values. 6038 if (auto *ST = dyn_cast<StoreInst>(&I)) 6039 T = ST->getValueOperand()->getType(); 6040 6041 // Ignore loaded pointer types and stored pointer types that are not 6042 // vectorizable. 6043 // 6044 // FIXME: The check here attempts to predict whether a load or store will 6045 // be vectorized. We only know this for certain after a VF has 6046 // been selected. Here, we assume that if an access can be 6047 // vectorized, it will be. We should also look at extending this 6048 // optimization to non-pointer types. 6049 // 6050 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6051 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 6052 continue; 6053 6054 MinWidth = std::min(MinWidth, 6055 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6056 MaxWidth = std::max(MaxWidth, 6057 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6058 } 6059 } 6060 6061 return {MinWidth, MaxWidth}; 6062 } 6063 6064 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 6065 unsigned LoopCost) { 6066 // -- The interleave heuristics -- 6067 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6068 // There are many micro-architectural considerations that we can't predict 6069 // at this level. For example, frontend pressure (on decode or fetch) due to 6070 // code size, or the number and capabilities of the execution ports. 6071 // 6072 // We use the following heuristics to select the interleave count: 6073 // 1. If the code has reductions, then we interleave to break the cross 6074 // iteration dependency. 6075 // 2. If the loop is really small, then we interleave to reduce the loop 6076 // overhead. 6077 // 3. We don't interleave if we think that we will spill registers to memory 6078 // due to the increased register pressure. 6079 6080 if (!isScalarEpilogueAllowed()) 6081 return 1; 6082 6083 // We used the distance for the interleave count. 6084 if (Legal->getMaxSafeDepDistBytes() != -1U) 6085 return 1; 6086 6087 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6088 const bool HasReductions = !Legal->getReductionVars().empty(); 6089 // Do not interleave loops with a relatively small known or estimated trip 6090 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6091 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6092 // because with the above conditions interleaving can expose ILP and break 6093 // cross iteration dependences for reductions. 6094 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6095 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6096 return 1; 6097 6098 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6099 // We divide by these constants so assume that we have at least one 6100 // instruction that uses at least one register. 6101 for (auto& pair : R.MaxLocalUsers) { 6102 pair.second = std::max(pair.second, 1U); 6103 } 6104 6105 // We calculate the interleave count using the following formula. 6106 // Subtract the number of loop invariants from the number of available 6107 // registers. These registers are used by all of the interleaved instances. 6108 // Next, divide the remaining registers by the number of registers that is 6109 // required by the loop, in order to estimate how many parallel instances 6110 // fit without causing spills. All of this is rounded down if necessary to be 6111 // a power of two. We want power of two interleave count to simplify any 6112 // addressing operations or alignment considerations. 6113 // We also want power of two interleave counts to ensure that the induction 6114 // variable of the vector loop wraps to zero, when tail is folded by masking; 6115 // this currently happens when OptForSize, in which case IC is set to 1 above. 6116 unsigned IC = UINT_MAX; 6117 6118 for (auto& pair : R.MaxLocalUsers) { 6119 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6120 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6121 << " registers of " 6122 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6123 if (VF.isScalar()) { 6124 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6125 TargetNumRegisters = ForceTargetNumScalarRegs; 6126 } else { 6127 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6128 TargetNumRegisters = ForceTargetNumVectorRegs; 6129 } 6130 unsigned MaxLocalUsers = pair.second; 6131 unsigned LoopInvariantRegs = 0; 6132 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6133 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6134 6135 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6136 // Don't count the induction variable as interleaved. 6137 if (EnableIndVarRegisterHeur) { 6138 TmpIC = 6139 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6140 std::max(1U, (MaxLocalUsers - 1))); 6141 } 6142 6143 IC = std::min(IC, TmpIC); 6144 } 6145 6146 // Clamp the interleave ranges to reasonable counts. 6147 unsigned MaxInterleaveCount = 6148 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6149 6150 // Check if the user has overridden the max. 6151 if (VF.isScalar()) { 6152 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6153 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6154 } else { 6155 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6156 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6157 } 6158 6159 // If trip count is known or estimated compile time constant, limit the 6160 // interleave count to be less than the trip count divided by VF, provided it 6161 // is at least 1. 6162 // 6163 // For scalable vectors we can't know if interleaving is beneficial. It may 6164 // not be beneficial for small loops if none of the lanes in the second vector 6165 // iterations is enabled. However, for larger loops, there is likely to be a 6166 // similar benefit as for fixed-width vectors. For now, we choose to leave 6167 // the InterleaveCount as if vscale is '1', although if some information about 6168 // the vector is known (e.g. min vector size), we can make a better decision. 6169 if (BestKnownTC) { 6170 MaxInterleaveCount = 6171 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6172 // Make sure MaxInterleaveCount is greater than 0. 6173 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6174 } 6175 6176 assert(MaxInterleaveCount > 0 && 6177 "Maximum interleave count must be greater than 0"); 6178 6179 // Clamp the calculated IC to be between the 1 and the max interleave count 6180 // that the target and trip count allows. 6181 if (IC > MaxInterleaveCount) 6182 IC = MaxInterleaveCount; 6183 else 6184 // Make sure IC is greater than 0. 6185 IC = std::max(1u, IC); 6186 6187 assert(IC > 0 && "Interleave count must be greater than 0."); 6188 6189 // If we did not calculate the cost for VF (because the user selected the VF) 6190 // then we calculate the cost of VF here. 6191 if (LoopCost == 0) { 6192 assert(expectedCost(VF).first.isValid() && "Expected a valid cost"); 6193 LoopCost = *expectedCost(VF).first.getValue(); 6194 } 6195 6196 assert(LoopCost && "Non-zero loop cost expected"); 6197 6198 // Interleave if we vectorized this loop and there is a reduction that could 6199 // benefit from interleaving. 6200 if (VF.isVector() && HasReductions) { 6201 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6202 return IC; 6203 } 6204 6205 // Note that if we've already vectorized the loop we will have done the 6206 // runtime check and so interleaving won't require further checks. 6207 bool InterleavingRequiresRuntimePointerCheck = 6208 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6209 6210 // We want to interleave small loops in order to reduce the loop overhead and 6211 // potentially expose ILP opportunities. 6212 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6213 << "LV: IC is " << IC << '\n' 6214 << "LV: VF is " << VF << '\n'); 6215 const bool AggressivelyInterleaveReductions = 6216 TTI.enableAggressiveInterleaving(HasReductions); 6217 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6218 // We assume that the cost overhead is 1 and we use the cost model 6219 // to estimate the cost of the loop and interleave until the cost of the 6220 // loop overhead is about 5% of the cost of the loop. 6221 unsigned SmallIC = 6222 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6223 6224 // Interleave until store/load ports (estimated by max interleave count) are 6225 // saturated. 6226 unsigned NumStores = Legal->getNumStores(); 6227 unsigned NumLoads = Legal->getNumLoads(); 6228 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6229 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6230 6231 // If we have a scalar reduction (vector reductions are already dealt with 6232 // by this point), we can increase the critical path length if the loop 6233 // we're interleaving is inside another loop. Limit, by default to 2, so the 6234 // critical path only gets increased by one reduction operation. 6235 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6236 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6237 SmallIC = std::min(SmallIC, F); 6238 StoresIC = std::min(StoresIC, F); 6239 LoadsIC = std::min(LoadsIC, F); 6240 } 6241 6242 if (EnableLoadStoreRuntimeInterleave && 6243 std::max(StoresIC, LoadsIC) > SmallIC) { 6244 LLVM_DEBUG( 6245 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6246 return std::max(StoresIC, LoadsIC); 6247 } 6248 6249 // If there are scalar reductions and TTI has enabled aggressive 6250 // interleaving for reductions, we will interleave to expose ILP. 6251 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6252 AggressivelyInterleaveReductions) { 6253 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6254 // Interleave no less than SmallIC but not as aggressive as the normal IC 6255 // to satisfy the rare situation when resources are too limited. 6256 return std::max(IC / 2, SmallIC); 6257 } else { 6258 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6259 return SmallIC; 6260 } 6261 } 6262 6263 // Interleave if this is a large loop (small loops are already dealt with by 6264 // this point) that could benefit from interleaving. 6265 if (AggressivelyInterleaveReductions) { 6266 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6267 return IC; 6268 } 6269 6270 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6271 return 1; 6272 } 6273 6274 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6275 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6276 // This function calculates the register usage by measuring the highest number 6277 // of values that are alive at a single location. Obviously, this is a very 6278 // rough estimation. We scan the loop in a topological order in order and 6279 // assign a number to each instruction. We use RPO to ensure that defs are 6280 // met before their users. We assume that each instruction that has in-loop 6281 // users starts an interval. We record every time that an in-loop value is 6282 // used, so we have a list of the first and last occurrences of each 6283 // instruction. Next, we transpose this data structure into a multi map that 6284 // holds the list of intervals that *end* at a specific location. This multi 6285 // map allows us to perform a linear search. We scan the instructions linearly 6286 // and record each time that a new interval starts, by placing it in a set. 6287 // If we find this value in the multi-map then we remove it from the set. 6288 // The max register usage is the maximum size of the set. 6289 // We also search for instructions that are defined outside the loop, but are 6290 // used inside the loop. We need this number separately from the max-interval 6291 // usage number because when we unroll, loop-invariant values do not take 6292 // more register. 6293 LoopBlocksDFS DFS(TheLoop); 6294 DFS.perform(LI); 6295 6296 RegisterUsage RU; 6297 6298 // Each 'key' in the map opens a new interval. The values 6299 // of the map are the index of the 'last seen' usage of the 6300 // instruction that is the key. 6301 using IntervalMap = DenseMap<Instruction *, unsigned>; 6302 6303 // Maps instruction to its index. 6304 SmallVector<Instruction *, 64> IdxToInstr; 6305 // Marks the end of each interval. 6306 IntervalMap EndPoint; 6307 // Saves the list of instruction indices that are used in the loop. 6308 SmallPtrSet<Instruction *, 8> Ends; 6309 // Saves the list of values that are used in the loop but are 6310 // defined outside the loop, such as arguments and constants. 6311 SmallPtrSet<Value *, 8> LoopInvariants; 6312 6313 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6314 for (Instruction &I : BB->instructionsWithoutDebug()) { 6315 IdxToInstr.push_back(&I); 6316 6317 // Save the end location of each USE. 6318 for (Value *U : I.operands()) { 6319 auto *Instr = dyn_cast<Instruction>(U); 6320 6321 // Ignore non-instruction values such as arguments, constants, etc. 6322 if (!Instr) 6323 continue; 6324 6325 // If this instruction is outside the loop then record it and continue. 6326 if (!TheLoop->contains(Instr)) { 6327 LoopInvariants.insert(Instr); 6328 continue; 6329 } 6330 6331 // Overwrite previous end points. 6332 EndPoint[Instr] = IdxToInstr.size(); 6333 Ends.insert(Instr); 6334 } 6335 } 6336 } 6337 6338 // Saves the list of intervals that end with the index in 'key'. 6339 using InstrList = SmallVector<Instruction *, 2>; 6340 DenseMap<unsigned, InstrList> TransposeEnds; 6341 6342 // Transpose the EndPoints to a list of values that end at each index. 6343 for (auto &Interval : EndPoint) 6344 TransposeEnds[Interval.second].push_back(Interval.first); 6345 6346 SmallPtrSet<Instruction *, 8> OpenIntervals; 6347 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6348 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6349 6350 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6351 6352 // A lambda that gets the register usage for the given type and VF. 6353 const auto &TTICapture = TTI; 6354 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) { 6355 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6356 return 0U; 6357 return TTICapture.getRegUsageForType(VectorType::get(Ty, VF)); 6358 }; 6359 6360 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6361 Instruction *I = IdxToInstr[i]; 6362 6363 // Remove all of the instructions that end at this location. 6364 InstrList &List = TransposeEnds[i]; 6365 for (Instruction *ToRemove : List) 6366 OpenIntervals.erase(ToRemove); 6367 6368 // Ignore instructions that are never used within the loop. 6369 if (!Ends.count(I)) 6370 continue; 6371 6372 // Skip ignored values. 6373 if (ValuesToIgnore.count(I)) 6374 continue; 6375 6376 // For each VF find the maximum usage of registers. 6377 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6378 // Count the number of live intervals. 6379 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6380 6381 if (VFs[j].isScalar()) { 6382 for (auto Inst : OpenIntervals) { 6383 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6384 if (RegUsage.find(ClassID) == RegUsage.end()) 6385 RegUsage[ClassID] = 1; 6386 else 6387 RegUsage[ClassID] += 1; 6388 } 6389 } else { 6390 collectUniformsAndScalars(VFs[j]); 6391 for (auto Inst : OpenIntervals) { 6392 // Skip ignored values for VF > 1. 6393 if (VecValuesToIgnore.count(Inst)) 6394 continue; 6395 if (isScalarAfterVectorization(Inst, VFs[j])) { 6396 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6397 if (RegUsage.find(ClassID) == RegUsage.end()) 6398 RegUsage[ClassID] = 1; 6399 else 6400 RegUsage[ClassID] += 1; 6401 } else { 6402 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6403 if (RegUsage.find(ClassID) == RegUsage.end()) 6404 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6405 else 6406 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6407 } 6408 } 6409 } 6410 6411 for (auto& pair : RegUsage) { 6412 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6413 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6414 else 6415 MaxUsages[j][pair.first] = pair.second; 6416 } 6417 } 6418 6419 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6420 << OpenIntervals.size() << '\n'); 6421 6422 // Add the current instruction to the list of open intervals. 6423 OpenIntervals.insert(I); 6424 } 6425 6426 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6427 SmallMapVector<unsigned, unsigned, 4> Invariant; 6428 6429 for (auto Inst : LoopInvariants) { 6430 unsigned Usage = 6431 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6432 unsigned ClassID = 6433 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6434 if (Invariant.find(ClassID) == Invariant.end()) 6435 Invariant[ClassID] = Usage; 6436 else 6437 Invariant[ClassID] += Usage; 6438 } 6439 6440 LLVM_DEBUG({ 6441 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6442 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6443 << " item\n"; 6444 for (const auto &pair : MaxUsages[i]) { 6445 dbgs() << "LV(REG): RegisterClass: " 6446 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6447 << " registers\n"; 6448 } 6449 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6450 << " item\n"; 6451 for (const auto &pair : Invariant) { 6452 dbgs() << "LV(REG): RegisterClass: " 6453 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6454 << " registers\n"; 6455 } 6456 }); 6457 6458 RU.LoopInvariantRegs = Invariant; 6459 RU.MaxLocalUsers = MaxUsages[i]; 6460 RUs[i] = RU; 6461 } 6462 6463 return RUs; 6464 } 6465 6466 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6467 // TODO: Cost model for emulated masked load/store is completely 6468 // broken. This hack guides the cost model to use an artificially 6469 // high enough value to practically disable vectorization with such 6470 // operations, except where previously deployed legality hack allowed 6471 // using very low cost values. This is to avoid regressions coming simply 6472 // from moving "masked load/store" check from legality to cost model. 6473 // Masked Load/Gather emulation was previously never allowed. 6474 // Limited number of Masked Store/Scatter emulation was allowed. 6475 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 6476 return isa<LoadInst>(I) || 6477 (isa<StoreInst>(I) && 6478 NumPredStores > NumberOfStoresToPredicate); 6479 } 6480 6481 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6482 // If we aren't vectorizing the loop, or if we've already collected the 6483 // instructions to scalarize, there's nothing to do. Collection may already 6484 // have occurred if we have a user-selected VF and are now computing the 6485 // expected cost for interleaving. 6486 if (VF.isScalar() || VF.isZero() || 6487 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6488 return; 6489 6490 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6491 // not profitable to scalarize any instructions, the presence of VF in the 6492 // map will indicate that we've analyzed it already. 6493 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6494 6495 // Find all the instructions that are scalar with predication in the loop and 6496 // determine if it would be better to not if-convert the blocks they are in. 6497 // If so, we also record the instructions to scalarize. 6498 for (BasicBlock *BB : TheLoop->blocks()) { 6499 if (!blockNeedsPredication(BB)) 6500 continue; 6501 for (Instruction &I : *BB) 6502 if (isScalarWithPredication(&I)) { 6503 ScalarCostsTy ScalarCosts; 6504 // Do not apply discount logic if hacked cost is needed 6505 // for emulated masked memrefs. 6506 if (!useEmulatedMaskMemRefHack(&I) && 6507 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6508 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6509 // Remember that BB will remain after vectorization. 6510 PredicatedBBsAfterVectorization.insert(BB); 6511 } 6512 } 6513 } 6514 6515 int LoopVectorizationCostModel::computePredInstDiscount( 6516 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6517 assert(!isUniformAfterVectorization(PredInst, VF) && 6518 "Instruction marked uniform-after-vectorization will be predicated"); 6519 6520 // Initialize the discount to zero, meaning that the scalar version and the 6521 // vector version cost the same. 6522 InstructionCost Discount = 0; 6523 6524 // Holds instructions to analyze. The instructions we visit are mapped in 6525 // ScalarCosts. Those instructions are the ones that would be scalarized if 6526 // we find that the scalar version costs less. 6527 SmallVector<Instruction *, 8> Worklist; 6528 6529 // Returns true if the given instruction can be scalarized. 6530 auto canBeScalarized = [&](Instruction *I) -> bool { 6531 // We only attempt to scalarize instructions forming a single-use chain 6532 // from the original predicated block that would otherwise be vectorized. 6533 // Although not strictly necessary, we give up on instructions we know will 6534 // already be scalar to avoid traversing chains that are unlikely to be 6535 // beneficial. 6536 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6537 isScalarAfterVectorization(I, VF)) 6538 return false; 6539 6540 // If the instruction is scalar with predication, it will be analyzed 6541 // separately. We ignore it within the context of PredInst. 6542 if (isScalarWithPredication(I)) 6543 return false; 6544 6545 // If any of the instruction's operands are uniform after vectorization, 6546 // the instruction cannot be scalarized. This prevents, for example, a 6547 // masked load from being scalarized. 6548 // 6549 // We assume we will only emit a value for lane zero of an instruction 6550 // marked uniform after vectorization, rather than VF identical values. 6551 // Thus, if we scalarize an instruction that uses a uniform, we would 6552 // create uses of values corresponding to the lanes we aren't emitting code 6553 // for. This behavior can be changed by allowing getScalarValue to clone 6554 // the lane zero values for uniforms rather than asserting. 6555 for (Use &U : I->operands()) 6556 if (auto *J = dyn_cast<Instruction>(U.get())) 6557 if (isUniformAfterVectorization(J, VF)) 6558 return false; 6559 6560 // Otherwise, we can scalarize the instruction. 6561 return true; 6562 }; 6563 6564 // Compute the expected cost discount from scalarizing the entire expression 6565 // feeding the predicated instruction. We currently only consider expressions 6566 // that are single-use instruction chains. 6567 Worklist.push_back(PredInst); 6568 while (!Worklist.empty()) { 6569 Instruction *I = Worklist.pop_back_val(); 6570 6571 // If we've already analyzed the instruction, there's nothing to do. 6572 if (ScalarCosts.find(I) != ScalarCosts.end()) 6573 continue; 6574 6575 // Compute the cost of the vector instruction. Note that this cost already 6576 // includes the scalarization overhead of the predicated instruction. 6577 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6578 6579 // Compute the cost of the scalarized instruction. This cost is the cost of 6580 // the instruction as if it wasn't if-converted and instead remained in the 6581 // predicated block. We will scale this cost by block probability after 6582 // computing the scalarization overhead. 6583 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6584 InstructionCost ScalarCost = 6585 VF.getKnownMinValue() * 6586 getInstructionCost(I, ElementCount::getFixed(1)).first; 6587 6588 // Compute the scalarization overhead of needed insertelement instructions 6589 // and phi nodes. 6590 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6591 ScalarCost += TTI.getScalarizationOverhead( 6592 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6593 APInt::getAllOnesValue(VF.getKnownMinValue()), true, false); 6594 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6595 ScalarCost += 6596 VF.getKnownMinValue() * 6597 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6598 } 6599 6600 // Compute the scalarization overhead of needed extractelement 6601 // instructions. For each of the instruction's operands, if the operand can 6602 // be scalarized, add it to the worklist; otherwise, account for the 6603 // overhead. 6604 for (Use &U : I->operands()) 6605 if (auto *J = dyn_cast<Instruction>(U.get())) { 6606 assert(VectorType::isValidElementType(J->getType()) && 6607 "Instruction has non-scalar type"); 6608 if (canBeScalarized(J)) 6609 Worklist.push_back(J); 6610 else if (needsExtract(J, VF)) { 6611 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6612 ScalarCost += TTI.getScalarizationOverhead( 6613 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6614 APInt::getAllOnesValue(VF.getKnownMinValue()), false, true); 6615 } 6616 } 6617 6618 // Scale the total scalar cost by block probability. 6619 ScalarCost /= getReciprocalPredBlockProb(); 6620 6621 // Compute the discount. A non-negative discount means the vector version 6622 // of the instruction costs more, and scalarizing would be beneficial. 6623 Discount += VectorCost - ScalarCost; 6624 ScalarCosts[I] = ScalarCost; 6625 } 6626 6627 return *Discount.getValue(); 6628 } 6629 6630 LoopVectorizationCostModel::VectorizationCostTy 6631 LoopVectorizationCostModel::expectedCost(ElementCount VF) { 6632 VectorizationCostTy Cost; 6633 6634 // For each block. 6635 for (BasicBlock *BB : TheLoop->blocks()) { 6636 VectorizationCostTy BlockCost; 6637 6638 // For each instruction in the old loop. 6639 for (Instruction &I : BB->instructionsWithoutDebug()) { 6640 // Skip ignored values. 6641 if (ValuesToIgnore.count(&I) || 6642 (VF.isVector() && VecValuesToIgnore.count(&I))) 6643 continue; 6644 6645 VectorizationCostTy C = getInstructionCost(&I, VF); 6646 6647 // Check if we should override the cost. 6648 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6649 C.first = InstructionCost(ForceTargetInstructionCost); 6650 6651 BlockCost.first += C.first; 6652 BlockCost.second |= C.second; 6653 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6654 << " for VF " << VF << " For instruction: " << I 6655 << '\n'); 6656 } 6657 6658 // If we are vectorizing a predicated block, it will have been 6659 // if-converted. This means that the block's instructions (aside from 6660 // stores and instructions that may divide by zero) will now be 6661 // unconditionally executed. For the scalar case, we may not always execute 6662 // the predicated block, if it is an if-else block. Thus, scale the block's 6663 // cost by the probability of executing it. blockNeedsPredication from 6664 // Legal is used so as to not include all blocks in tail folded loops. 6665 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6666 BlockCost.first /= getReciprocalPredBlockProb(); 6667 6668 Cost.first += BlockCost.first; 6669 Cost.second |= BlockCost.second; 6670 } 6671 6672 return Cost; 6673 } 6674 6675 /// Gets Address Access SCEV after verifying that the access pattern 6676 /// is loop invariant except the induction variable dependence. 6677 /// 6678 /// This SCEV can be sent to the Target in order to estimate the address 6679 /// calculation cost. 6680 static const SCEV *getAddressAccessSCEV( 6681 Value *Ptr, 6682 LoopVectorizationLegality *Legal, 6683 PredicatedScalarEvolution &PSE, 6684 const Loop *TheLoop) { 6685 6686 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6687 if (!Gep) 6688 return nullptr; 6689 6690 // We are looking for a gep with all loop invariant indices except for one 6691 // which should be an induction variable. 6692 auto SE = PSE.getSE(); 6693 unsigned NumOperands = Gep->getNumOperands(); 6694 for (unsigned i = 1; i < NumOperands; ++i) { 6695 Value *Opd = Gep->getOperand(i); 6696 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6697 !Legal->isInductionVariable(Opd)) 6698 return nullptr; 6699 } 6700 6701 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6702 return PSE.getSCEV(Ptr); 6703 } 6704 6705 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6706 return Legal->hasStride(I->getOperand(0)) || 6707 Legal->hasStride(I->getOperand(1)); 6708 } 6709 6710 InstructionCost 6711 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6712 ElementCount VF) { 6713 assert(VF.isVector() && 6714 "Scalarization cost of instruction implies vectorization."); 6715 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6716 Type *ValTy = getMemInstValueType(I); 6717 auto SE = PSE.getSE(); 6718 6719 unsigned AS = getLoadStoreAddressSpace(I); 6720 Value *Ptr = getLoadStorePointerOperand(I); 6721 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6722 6723 // Figure out whether the access is strided and get the stride value 6724 // if it's known in compile time 6725 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6726 6727 // Get the cost of the scalar memory instruction and address computation. 6728 InstructionCost Cost = 6729 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6730 6731 // Don't pass *I here, since it is scalar but will actually be part of a 6732 // vectorized loop where the user of it is a vectorized instruction. 6733 const Align Alignment = getLoadStoreAlignment(I); 6734 Cost += VF.getKnownMinValue() * 6735 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6736 AS, TTI::TCK_RecipThroughput); 6737 6738 // Get the overhead of the extractelement and insertelement instructions 6739 // we might create due to scalarization. 6740 Cost += getScalarizationOverhead(I, VF); 6741 6742 // If we have a predicated store, it may not be executed for each vector 6743 // lane. Scale the cost by the probability of executing the predicated 6744 // block. 6745 if (isPredicatedInst(I)) { 6746 Cost /= getReciprocalPredBlockProb(); 6747 6748 if (useEmulatedMaskMemRefHack(I)) 6749 // Artificially setting to a high enough value to practically disable 6750 // vectorization with such operations. 6751 Cost = 3000000; 6752 } 6753 6754 return Cost; 6755 } 6756 6757 InstructionCost 6758 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6759 ElementCount VF) { 6760 Type *ValTy = getMemInstValueType(I); 6761 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6762 Value *Ptr = getLoadStorePointerOperand(I); 6763 unsigned AS = getLoadStoreAddressSpace(I); 6764 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6765 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6766 6767 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6768 "Stride should be 1 or -1 for consecutive memory access"); 6769 const Align Alignment = getLoadStoreAlignment(I); 6770 InstructionCost Cost = 0; 6771 if (Legal->isMaskRequired(I)) 6772 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6773 CostKind); 6774 else 6775 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6776 CostKind, I); 6777 6778 bool Reverse = ConsecutiveStride < 0; 6779 if (Reverse) 6780 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6781 return Cost; 6782 } 6783 6784 InstructionCost 6785 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6786 ElementCount VF) { 6787 assert(Legal->isUniformMemOp(*I)); 6788 6789 Type *ValTy = getMemInstValueType(I); 6790 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6791 const Align Alignment = getLoadStoreAlignment(I); 6792 unsigned AS = getLoadStoreAddressSpace(I); 6793 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6794 if (isa<LoadInst>(I)) { 6795 return TTI.getAddressComputationCost(ValTy) + 6796 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6797 CostKind) + 6798 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6799 } 6800 StoreInst *SI = cast<StoreInst>(I); 6801 6802 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6803 return TTI.getAddressComputationCost(ValTy) + 6804 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6805 CostKind) + 6806 (isLoopInvariantStoreValue 6807 ? 0 6808 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6809 VF.getKnownMinValue() - 1)); 6810 } 6811 6812 InstructionCost 6813 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6814 ElementCount VF) { 6815 Type *ValTy = getMemInstValueType(I); 6816 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6817 const Align Alignment = getLoadStoreAlignment(I); 6818 const Value *Ptr = getLoadStorePointerOperand(I); 6819 6820 return TTI.getAddressComputationCost(VectorTy) + 6821 TTI.getGatherScatterOpCost( 6822 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6823 TargetTransformInfo::TCK_RecipThroughput, I); 6824 } 6825 6826 InstructionCost 6827 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6828 ElementCount VF) { 6829 // TODO: Once we have support for interleaving with scalable vectors 6830 // we can calculate the cost properly here. 6831 if (VF.isScalable()) 6832 return InstructionCost::getInvalid(); 6833 6834 Type *ValTy = getMemInstValueType(I); 6835 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6836 unsigned AS = getLoadStoreAddressSpace(I); 6837 6838 auto Group = getInterleavedAccessGroup(I); 6839 assert(Group && "Fail to get an interleaved access group."); 6840 6841 unsigned InterleaveFactor = Group->getFactor(); 6842 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6843 6844 // Holds the indices of existing members in an interleaved load group. 6845 // An interleaved store group doesn't need this as it doesn't allow gaps. 6846 SmallVector<unsigned, 4> Indices; 6847 if (isa<LoadInst>(I)) { 6848 for (unsigned i = 0; i < InterleaveFactor; i++) 6849 if (Group->getMember(i)) 6850 Indices.push_back(i); 6851 } 6852 6853 // Calculate the cost of the whole interleaved group. 6854 bool UseMaskForGaps = 6855 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 6856 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6857 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6858 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6859 6860 if (Group->isReverse()) { 6861 // TODO: Add support for reversed masked interleaved access. 6862 assert(!Legal->isMaskRequired(I) && 6863 "Reverse masked interleaved access not supported."); 6864 Cost += Group->getNumMembers() * 6865 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6866 } 6867 return Cost; 6868 } 6869 6870 InstructionCost LoopVectorizationCostModel::getReductionPatternCost( 6871 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6872 // Early exit for no inloop reductions 6873 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6874 return InstructionCost::getInvalid(); 6875 auto *VectorTy = cast<VectorType>(Ty); 6876 6877 // We are looking for a pattern of, and finding the minimal acceptable cost: 6878 // reduce(mul(ext(A), ext(B))) or 6879 // reduce(mul(A, B)) or 6880 // reduce(ext(A)) or 6881 // reduce(A). 6882 // The basic idea is that we walk down the tree to do that, finding the root 6883 // reduction instruction in InLoopReductionImmediateChains. From there we find 6884 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6885 // of the components. If the reduction cost is lower then we return it for the 6886 // reduction instruction and 0 for the other instructions in the pattern. If 6887 // it is not we return an invalid cost specifying the orignal cost method 6888 // should be used. 6889 Instruction *RetI = I; 6890 if ((RetI->getOpcode() == Instruction::SExt || 6891 RetI->getOpcode() == Instruction::ZExt)) { 6892 if (!RetI->hasOneUser()) 6893 return InstructionCost::getInvalid(); 6894 RetI = RetI->user_back(); 6895 } 6896 if (RetI->getOpcode() == Instruction::Mul && 6897 RetI->user_back()->getOpcode() == Instruction::Add) { 6898 if (!RetI->hasOneUser()) 6899 return InstructionCost::getInvalid(); 6900 RetI = RetI->user_back(); 6901 } 6902 6903 // Test if the found instruction is a reduction, and if not return an invalid 6904 // cost specifying the parent to use the original cost modelling. 6905 if (!InLoopReductionImmediateChains.count(RetI)) 6906 return InstructionCost::getInvalid(); 6907 6908 // Find the reduction this chain is a part of and calculate the basic cost of 6909 // the reduction on its own. 6910 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6911 Instruction *ReductionPhi = LastChain; 6912 while (!isa<PHINode>(ReductionPhi)) 6913 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6914 6915 RecurrenceDescriptor RdxDesc = 6916 Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; 6917 unsigned BaseCost = TTI.getArithmeticReductionCost(RdxDesc.getOpcode(), 6918 VectorTy, false, CostKind); 6919 6920 // Get the operand that was not the reduction chain and match it to one of the 6921 // patterns, returning the better cost if it is found. 6922 Instruction *RedOp = RetI->getOperand(1) == LastChain 6923 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6924 : dyn_cast<Instruction>(RetI->getOperand(1)); 6925 6926 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6927 6928 if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) && 6929 !TheLoop->isLoopInvariant(RedOp)) { 6930 bool IsUnsigned = isa<ZExtInst>(RedOp); 6931 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6932 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6933 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6934 CostKind); 6935 6936 unsigned ExtCost = 6937 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6938 TTI::CastContextHint::None, CostKind, RedOp); 6939 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6940 return I == RetI ? *RedCost.getValue() : 0; 6941 } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) { 6942 Instruction *Mul = RedOp; 6943 Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0)); 6944 Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1)); 6945 if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) && 6946 Op0->getOpcode() == Op1->getOpcode() && 6947 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6948 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 6949 bool IsUnsigned = isa<ZExtInst>(Op0); 6950 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6951 // reduce(mul(ext, ext)) 6952 unsigned ExtCost = 6953 TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, 6954 TTI::CastContextHint::None, CostKind, Op0); 6955 unsigned MulCost = 6956 TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); 6957 6958 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6959 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6960 CostKind); 6961 6962 if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) 6963 return I == RetI ? *RedCost.getValue() : 0; 6964 } else { 6965 unsigned MulCost = 6966 TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); 6967 6968 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6969 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 6970 CostKind); 6971 6972 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 6973 return I == RetI ? *RedCost.getValue() : 0; 6974 } 6975 } 6976 6977 return I == RetI ? BaseCost : InstructionCost::getInvalid(); 6978 } 6979 6980 InstructionCost 6981 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6982 ElementCount VF) { 6983 // Calculate scalar cost only. Vectorization cost should be ready at this 6984 // moment. 6985 if (VF.isScalar()) { 6986 Type *ValTy = getMemInstValueType(I); 6987 const Align Alignment = getLoadStoreAlignment(I); 6988 unsigned AS = getLoadStoreAddressSpace(I); 6989 6990 return TTI.getAddressComputationCost(ValTy) + 6991 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 6992 TTI::TCK_RecipThroughput, I); 6993 } 6994 return getWideningCost(I, VF); 6995 } 6996 6997 LoopVectorizationCostModel::VectorizationCostTy 6998 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6999 ElementCount VF) { 7000 // If we know that this instruction will remain uniform, check the cost of 7001 // the scalar version. 7002 if (isUniformAfterVectorization(I, VF)) 7003 VF = ElementCount::getFixed(1); 7004 7005 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7006 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7007 7008 // Forced scalars do not have any scalarization overhead. 7009 auto ForcedScalar = ForcedScalars.find(VF); 7010 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7011 auto InstSet = ForcedScalar->second; 7012 if (InstSet.count(I)) 7013 return VectorizationCostTy( 7014 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7015 VF.getKnownMinValue()), 7016 false); 7017 } 7018 7019 Type *VectorTy; 7020 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7021 7022 bool TypeNotScalarized = 7023 VF.isVector() && VectorTy->isVectorTy() && 7024 TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue(); 7025 return VectorizationCostTy(C, TypeNotScalarized); 7026 } 7027 7028 InstructionCost 7029 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7030 ElementCount VF) { 7031 7032 assert(!VF.isScalable() && 7033 "cannot compute scalarization overhead for scalable vectorization"); 7034 if (VF.isScalar()) 7035 return 0; 7036 7037 InstructionCost Cost = 0; 7038 Type *RetTy = ToVectorTy(I->getType(), VF); 7039 if (!RetTy->isVoidTy() && 7040 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7041 Cost += TTI.getScalarizationOverhead( 7042 cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()), 7043 true, false); 7044 7045 // Some targets keep addresses scalar. 7046 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7047 return Cost; 7048 7049 // Some targets support efficient element stores. 7050 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7051 return Cost; 7052 7053 // Collect operands to consider. 7054 CallInst *CI = dyn_cast<CallInst>(I); 7055 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 7056 7057 // Skip operands that do not require extraction/scalarization and do not incur 7058 // any overhead. 7059 return Cost + TTI.getOperandsScalarizationOverhead( 7060 filterExtractingOperands(Ops, VF), VF.getKnownMinValue()); 7061 } 7062 7063 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7064 if (VF.isScalar()) 7065 return; 7066 NumPredStores = 0; 7067 for (BasicBlock *BB : TheLoop->blocks()) { 7068 // For each instruction in the old loop. 7069 for (Instruction &I : *BB) { 7070 Value *Ptr = getLoadStorePointerOperand(&I); 7071 if (!Ptr) 7072 continue; 7073 7074 // TODO: We should generate better code and update the cost model for 7075 // predicated uniform stores. Today they are treated as any other 7076 // predicated store (see added test cases in 7077 // invariant-store-vectorization.ll). 7078 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 7079 NumPredStores++; 7080 7081 if (Legal->isUniformMemOp(I)) { 7082 // TODO: Avoid replicating loads and stores instead of 7083 // relying on instcombine to remove them. 7084 // Load: Scalar load + broadcast 7085 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7086 InstructionCost Cost = getUniformMemOpCost(&I, VF); 7087 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7088 continue; 7089 } 7090 7091 // We assume that widening is the best solution when possible. 7092 if (memoryInstructionCanBeWidened(&I, VF)) { 7093 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7094 int ConsecutiveStride = 7095 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 7096 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7097 "Expected consecutive stride."); 7098 InstWidening Decision = 7099 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7100 setWideningDecision(&I, VF, Decision, Cost); 7101 continue; 7102 } 7103 7104 // Choose between Interleaving, Gather/Scatter or Scalarization. 7105 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7106 unsigned NumAccesses = 1; 7107 if (isAccessInterleaved(&I)) { 7108 auto Group = getInterleavedAccessGroup(&I); 7109 assert(Group && "Fail to get an interleaved access group."); 7110 7111 // Make one decision for the whole group. 7112 if (getWideningDecision(&I, VF) != CM_Unknown) 7113 continue; 7114 7115 NumAccesses = Group->getNumMembers(); 7116 if (interleavedAccessCanBeWidened(&I, VF)) 7117 InterleaveCost = getInterleaveGroupCost(&I, VF); 7118 } 7119 7120 InstructionCost GatherScatterCost = 7121 isLegalGatherOrScatter(&I) 7122 ? getGatherScatterCost(&I, VF) * NumAccesses 7123 : InstructionCost::getInvalid(); 7124 7125 InstructionCost ScalarizationCost = 7126 !VF.isScalable() ? getMemInstScalarizationCost(&I, VF) * NumAccesses 7127 : InstructionCost::getInvalid(); 7128 7129 // Choose better solution for the current VF, 7130 // write down this decision and use it during vectorization. 7131 InstructionCost Cost; 7132 InstWidening Decision; 7133 if (InterleaveCost <= GatherScatterCost && 7134 InterleaveCost < ScalarizationCost) { 7135 Decision = CM_Interleave; 7136 Cost = InterleaveCost; 7137 } else if (GatherScatterCost < ScalarizationCost) { 7138 Decision = CM_GatherScatter; 7139 Cost = GatherScatterCost; 7140 } else { 7141 assert(!VF.isScalable() && 7142 "We cannot yet scalarise for scalable vectors"); 7143 Decision = CM_Scalarize; 7144 Cost = ScalarizationCost; 7145 } 7146 // If the instructions belongs to an interleave group, the whole group 7147 // receives the same decision. The whole group receives the cost, but 7148 // the cost will actually be assigned to one instruction. 7149 if (auto Group = getInterleavedAccessGroup(&I)) 7150 setWideningDecision(Group, VF, Decision, Cost); 7151 else 7152 setWideningDecision(&I, VF, Decision, Cost); 7153 } 7154 } 7155 7156 // Make sure that any load of address and any other address computation 7157 // remains scalar unless there is gather/scatter support. This avoids 7158 // inevitable extracts into address registers, and also has the benefit of 7159 // activating LSR more, since that pass can't optimize vectorized 7160 // addresses. 7161 if (TTI.prefersVectorizedAddressing()) 7162 return; 7163 7164 // Start with all scalar pointer uses. 7165 SmallPtrSet<Instruction *, 8> AddrDefs; 7166 for (BasicBlock *BB : TheLoop->blocks()) 7167 for (Instruction &I : *BB) { 7168 Instruction *PtrDef = 7169 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7170 if (PtrDef && TheLoop->contains(PtrDef) && 7171 getWideningDecision(&I, VF) != CM_GatherScatter) 7172 AddrDefs.insert(PtrDef); 7173 } 7174 7175 // Add all instructions used to generate the addresses. 7176 SmallVector<Instruction *, 4> Worklist; 7177 append_range(Worklist, AddrDefs); 7178 while (!Worklist.empty()) { 7179 Instruction *I = Worklist.pop_back_val(); 7180 for (auto &Op : I->operands()) 7181 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7182 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7183 AddrDefs.insert(InstOp).second) 7184 Worklist.push_back(InstOp); 7185 } 7186 7187 for (auto *I : AddrDefs) { 7188 if (isa<LoadInst>(I)) { 7189 // Setting the desired widening decision should ideally be handled in 7190 // by cost functions, but since this involves the task of finding out 7191 // if the loaded register is involved in an address computation, it is 7192 // instead changed here when we know this is the case. 7193 InstWidening Decision = getWideningDecision(I, VF); 7194 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7195 // Scalarize a widened load of address. 7196 setWideningDecision( 7197 I, VF, CM_Scalarize, 7198 (VF.getKnownMinValue() * 7199 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7200 else if (auto Group = getInterleavedAccessGroup(I)) { 7201 // Scalarize an interleave group of address loads. 7202 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7203 if (Instruction *Member = Group->getMember(I)) 7204 setWideningDecision( 7205 Member, VF, CM_Scalarize, 7206 (VF.getKnownMinValue() * 7207 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7208 } 7209 } 7210 } else 7211 // Make sure I gets scalarized and a cost estimate without 7212 // scalarization overhead. 7213 ForcedScalars[VF].insert(I); 7214 } 7215 } 7216 7217 InstructionCost 7218 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7219 Type *&VectorTy) { 7220 Type *RetTy = I->getType(); 7221 if (canTruncateToMinimalBitwidth(I, VF)) 7222 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7223 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 7224 auto SE = PSE.getSE(); 7225 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7226 7227 // TODO: We need to estimate the cost of intrinsic calls. 7228 switch (I->getOpcode()) { 7229 case Instruction::GetElementPtr: 7230 // We mark this instruction as zero-cost because the cost of GEPs in 7231 // vectorized code depends on whether the corresponding memory instruction 7232 // is scalarized or not. Therefore, we handle GEPs with the memory 7233 // instruction cost. 7234 return 0; 7235 case Instruction::Br: { 7236 // In cases of scalarized and predicated instructions, there will be VF 7237 // predicated blocks in the vectorized loop. Each branch around these 7238 // blocks requires also an extract of its vector compare i1 element. 7239 bool ScalarPredicatedBB = false; 7240 BranchInst *BI = cast<BranchInst>(I); 7241 if (VF.isVector() && BI->isConditional() && 7242 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7243 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7244 ScalarPredicatedBB = true; 7245 7246 if (ScalarPredicatedBB) { 7247 // Return cost for branches around scalarized and predicated blocks. 7248 assert(!VF.isScalable() && "scalable vectors not yet supported."); 7249 auto *Vec_i1Ty = 7250 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7251 return (TTI.getScalarizationOverhead( 7252 Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), 7253 false, true) + 7254 (TTI.getCFInstrCost(Instruction::Br, CostKind) * 7255 VF.getKnownMinValue())); 7256 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7257 // The back-edge branch will remain, as will all scalar branches. 7258 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7259 else 7260 // This branch will be eliminated by if-conversion. 7261 return 0; 7262 // Note: We currently assume zero cost for an unconditional branch inside 7263 // a predicated block since it will become a fall-through, although we 7264 // may decide in the future to call TTI for all branches. 7265 } 7266 case Instruction::PHI: { 7267 auto *Phi = cast<PHINode>(I); 7268 7269 // First-order recurrences are replaced by vector shuffles inside the loop. 7270 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7271 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7272 return TTI.getShuffleCost( 7273 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7274 VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7275 7276 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7277 // converted into select instructions. We require N - 1 selects per phi 7278 // node, where N is the number of incoming values. 7279 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7280 return (Phi->getNumIncomingValues() - 1) * 7281 TTI.getCmpSelInstrCost( 7282 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7283 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7284 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7285 7286 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7287 } 7288 case Instruction::UDiv: 7289 case Instruction::SDiv: 7290 case Instruction::URem: 7291 case Instruction::SRem: 7292 // If we have a predicated instruction, it may not be executed for each 7293 // vector lane. Get the scalarization cost and scale this amount by the 7294 // probability of executing the predicated block. If the instruction is not 7295 // predicated, we fall through to the next case. 7296 if (VF.isVector() && isScalarWithPredication(I)) { 7297 InstructionCost Cost = 0; 7298 7299 // These instructions have a non-void type, so account for the phi nodes 7300 // that we will create. This cost is likely to be zero. The phi node 7301 // cost, if any, should be scaled by the block probability because it 7302 // models a copy at the end of each predicated block. 7303 Cost += VF.getKnownMinValue() * 7304 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7305 7306 // The cost of the non-predicated instruction. 7307 Cost += VF.getKnownMinValue() * 7308 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7309 7310 // The cost of insertelement and extractelement instructions needed for 7311 // scalarization. 7312 Cost += getScalarizationOverhead(I, VF); 7313 7314 // Scale the cost by the probability of executing the predicated blocks. 7315 // This assumes the predicated block for each vector lane is equally 7316 // likely. 7317 return Cost / getReciprocalPredBlockProb(); 7318 } 7319 LLVM_FALLTHROUGH; 7320 case Instruction::Add: 7321 case Instruction::FAdd: 7322 case Instruction::Sub: 7323 case Instruction::FSub: 7324 case Instruction::Mul: 7325 case Instruction::FMul: 7326 case Instruction::FDiv: 7327 case Instruction::FRem: 7328 case Instruction::Shl: 7329 case Instruction::LShr: 7330 case Instruction::AShr: 7331 case Instruction::And: 7332 case Instruction::Or: 7333 case Instruction::Xor: { 7334 // Since we will replace the stride by 1 the multiplication should go away. 7335 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7336 return 0; 7337 7338 // Detect reduction patterns 7339 InstructionCost RedCost; 7340 if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7341 .isValid()) 7342 return RedCost; 7343 7344 // Certain instructions can be cheaper to vectorize if they have a constant 7345 // second vector operand. One example of this are shifts on x86. 7346 Value *Op2 = I->getOperand(1); 7347 TargetTransformInfo::OperandValueProperties Op2VP; 7348 TargetTransformInfo::OperandValueKind Op2VK = 7349 TTI.getOperandInfo(Op2, Op2VP); 7350 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7351 Op2VK = TargetTransformInfo::OK_UniformValue; 7352 7353 SmallVector<const Value *, 4> Operands(I->operand_values()); 7354 unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1; 7355 return N * TTI.getArithmeticInstrCost( 7356 I->getOpcode(), VectorTy, CostKind, 7357 TargetTransformInfo::OK_AnyValue, 7358 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7359 } 7360 case Instruction::FNeg: { 7361 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 7362 unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1; 7363 return N * TTI.getArithmeticInstrCost( 7364 I->getOpcode(), VectorTy, CostKind, 7365 TargetTransformInfo::OK_AnyValue, 7366 TargetTransformInfo::OK_AnyValue, 7367 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None, 7368 I->getOperand(0), I); 7369 } 7370 case Instruction::Select: { 7371 SelectInst *SI = cast<SelectInst>(I); 7372 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7373 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7374 Type *CondTy = SI->getCondition()->getType(); 7375 if (!ScalarCond) 7376 CondTy = VectorType::get(CondTy, VF); 7377 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 7378 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7379 } 7380 case Instruction::ICmp: 7381 case Instruction::FCmp: { 7382 Type *ValTy = I->getOperand(0)->getType(); 7383 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7384 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7385 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7386 VectorTy = ToVectorTy(ValTy, VF); 7387 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7388 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7389 } 7390 case Instruction::Store: 7391 case Instruction::Load: { 7392 ElementCount Width = VF; 7393 if (Width.isVector()) { 7394 InstWidening Decision = getWideningDecision(I, Width); 7395 assert(Decision != CM_Unknown && 7396 "CM decision should be taken at this point"); 7397 if (Decision == CM_Scalarize) 7398 Width = ElementCount::getFixed(1); 7399 } 7400 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 7401 return getMemoryInstructionCost(I, VF); 7402 } 7403 case Instruction::ZExt: 7404 case Instruction::SExt: 7405 case Instruction::FPToUI: 7406 case Instruction::FPToSI: 7407 case Instruction::FPExt: 7408 case Instruction::PtrToInt: 7409 case Instruction::IntToPtr: 7410 case Instruction::SIToFP: 7411 case Instruction::UIToFP: 7412 case Instruction::Trunc: 7413 case Instruction::FPTrunc: 7414 case Instruction::BitCast: { 7415 // Computes the CastContextHint from a Load/Store instruction. 7416 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7417 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7418 "Expected a load or a store!"); 7419 7420 if (VF.isScalar() || !TheLoop->contains(I)) 7421 return TTI::CastContextHint::Normal; 7422 7423 switch (getWideningDecision(I, VF)) { 7424 case LoopVectorizationCostModel::CM_GatherScatter: 7425 return TTI::CastContextHint::GatherScatter; 7426 case LoopVectorizationCostModel::CM_Interleave: 7427 return TTI::CastContextHint::Interleave; 7428 case LoopVectorizationCostModel::CM_Scalarize: 7429 case LoopVectorizationCostModel::CM_Widen: 7430 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7431 : TTI::CastContextHint::Normal; 7432 case LoopVectorizationCostModel::CM_Widen_Reverse: 7433 return TTI::CastContextHint::Reversed; 7434 case LoopVectorizationCostModel::CM_Unknown: 7435 llvm_unreachable("Instr did not go through cost modelling?"); 7436 } 7437 7438 llvm_unreachable("Unhandled case!"); 7439 }; 7440 7441 unsigned Opcode = I->getOpcode(); 7442 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7443 // For Trunc, the context is the only user, which must be a StoreInst. 7444 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7445 if (I->hasOneUse()) 7446 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7447 CCH = ComputeCCH(Store); 7448 } 7449 // For Z/Sext, the context is the operand, which must be a LoadInst. 7450 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7451 Opcode == Instruction::FPExt) { 7452 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7453 CCH = ComputeCCH(Load); 7454 } 7455 7456 // We optimize the truncation of induction variables having constant 7457 // integer steps. The cost of these truncations is the same as the scalar 7458 // operation. 7459 if (isOptimizableIVTruncate(I, VF)) { 7460 auto *Trunc = cast<TruncInst>(I); 7461 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7462 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7463 } 7464 7465 // Detect reduction patterns 7466 InstructionCost RedCost; 7467 if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7468 .isValid()) 7469 return RedCost; 7470 7471 Type *SrcScalarTy = I->getOperand(0)->getType(); 7472 Type *SrcVecTy = 7473 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7474 if (canTruncateToMinimalBitwidth(I, VF)) { 7475 // This cast is going to be shrunk. This may remove the cast or it might 7476 // turn it into slightly different cast. For example, if MinBW == 16, 7477 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7478 // 7479 // Calculate the modified src and dest types. 7480 Type *MinVecTy = VectorTy; 7481 if (Opcode == Instruction::Trunc) { 7482 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7483 VectorTy = 7484 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7485 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7486 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7487 VectorTy = 7488 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7489 } 7490 } 7491 7492 unsigned N; 7493 if (isScalarAfterVectorization(I, VF)) { 7494 assert(!VF.isScalable() && "VF is assumed to be non scalable"); 7495 N = VF.getKnownMinValue(); 7496 } else 7497 N = 1; 7498 return N * 7499 TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7500 } 7501 case Instruction::Call: { 7502 bool NeedToScalarize; 7503 CallInst *CI = cast<CallInst>(I); 7504 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7505 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7506 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7507 return std::min(CallCost, IntrinsicCost); 7508 } 7509 return CallCost; 7510 } 7511 case Instruction::ExtractValue: 7512 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7513 default: 7514 // The cost of executing VF copies of the scalar instruction. This opcode 7515 // is unknown. Assume that it is the same as 'mul'. 7516 return VF.getKnownMinValue() * TTI.getArithmeticInstrCost( 7517 Instruction::Mul, VectorTy, CostKind) + 7518 getScalarizationOverhead(I, VF); 7519 } // end of switch. 7520 } 7521 7522 char LoopVectorize::ID = 0; 7523 7524 static const char lv_name[] = "Loop Vectorization"; 7525 7526 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7527 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7528 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7529 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7530 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7531 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7532 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7533 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7534 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7535 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7536 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7537 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7538 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7539 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7540 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7541 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7542 7543 namespace llvm { 7544 7545 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7546 7547 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7548 bool VectorizeOnlyWhenForced) { 7549 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7550 } 7551 7552 } // end namespace llvm 7553 7554 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7555 // Check if the pointer operand of a load or store instruction is 7556 // consecutive. 7557 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7558 return Legal->isConsecutivePtr(Ptr); 7559 return false; 7560 } 7561 7562 void LoopVectorizationCostModel::collectValuesToIgnore() { 7563 // Ignore ephemeral values. 7564 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7565 7566 // Ignore type-promoting instructions we identified during reduction 7567 // detection. 7568 for (auto &Reduction : Legal->getReductionVars()) { 7569 RecurrenceDescriptor &RedDes = Reduction.second; 7570 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7571 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7572 } 7573 // Ignore type-casting instructions we identified during induction 7574 // detection. 7575 for (auto &Induction : Legal->getInductionVars()) { 7576 InductionDescriptor &IndDes = Induction.second; 7577 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7578 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7579 } 7580 } 7581 7582 void LoopVectorizationCostModel::collectInLoopReductions() { 7583 for (auto &Reduction : Legal->getReductionVars()) { 7584 PHINode *Phi = Reduction.first; 7585 RecurrenceDescriptor &RdxDesc = Reduction.second; 7586 7587 // We don't collect reductions that are type promoted (yet). 7588 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7589 continue; 7590 7591 // If the target would prefer this reduction to happen "in-loop", then we 7592 // want to record it as such. 7593 unsigned Opcode = RdxDesc.getOpcode(); 7594 if (!PreferInLoopReductions && 7595 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7596 TargetTransformInfo::ReductionFlags())) 7597 continue; 7598 7599 // Check that we can correctly put the reductions into the loop, by 7600 // finding the chain of operations that leads from the phi to the loop 7601 // exit value. 7602 SmallVector<Instruction *, 4> ReductionOperations = 7603 RdxDesc.getReductionOpChain(Phi, TheLoop); 7604 bool InLoop = !ReductionOperations.empty(); 7605 if (InLoop) { 7606 InLoopReductionChains[Phi] = ReductionOperations; 7607 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7608 Instruction *LastChain = Phi; 7609 for (auto *I : ReductionOperations) { 7610 InLoopReductionImmediateChains[I] = LastChain; 7611 LastChain = I; 7612 } 7613 } 7614 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7615 << " reduction for phi: " << *Phi << "\n"); 7616 } 7617 } 7618 7619 // TODO: we could return a pair of values that specify the max VF and 7620 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7621 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7622 // doesn't have a cost model that can choose which plan to execute if 7623 // more than one is generated. 7624 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7625 LoopVectorizationCostModel &CM) { 7626 unsigned WidestType; 7627 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7628 return WidestVectorRegBits / WidestType; 7629 } 7630 7631 VectorizationFactor 7632 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7633 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7634 ElementCount VF = UserVF; 7635 // Outer loop handling: They may require CFG and instruction level 7636 // transformations before even evaluating whether vectorization is profitable. 7637 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7638 // the vectorization pipeline. 7639 if (!OrigLoop->isInnermost()) { 7640 // If the user doesn't provide a vectorization factor, determine a 7641 // reasonable one. 7642 if (UserVF.isZero()) { 7643 VF = ElementCount::getFixed( 7644 determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM)); 7645 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7646 7647 // Make sure we have a VF > 1 for stress testing. 7648 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7649 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7650 << "overriding computed VF.\n"); 7651 VF = ElementCount::getFixed(4); 7652 } 7653 } 7654 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7655 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7656 "VF needs to be a power of two"); 7657 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7658 << "VF " << VF << " to build VPlans.\n"); 7659 buildVPlans(VF, VF); 7660 7661 // For VPlan build stress testing, we bail out after VPlan construction. 7662 if (VPlanBuildStressTest) 7663 return VectorizationFactor::Disabled(); 7664 7665 return {VF, 0 /*Cost*/}; 7666 } 7667 7668 LLVM_DEBUG( 7669 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7670 "VPlan-native path.\n"); 7671 return VectorizationFactor::Disabled(); 7672 } 7673 7674 Optional<VectorizationFactor> 7675 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7676 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7677 Optional<ElementCount> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC); 7678 if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved. 7679 return None; 7680 7681 // Invalidate interleave groups if all blocks of loop will be predicated. 7682 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 7683 !useMaskedInterleavedAccesses(*TTI)) { 7684 LLVM_DEBUG( 7685 dbgs() 7686 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7687 "which requires masked-interleaved support.\n"); 7688 if (CM.InterleaveInfo.invalidateGroups()) 7689 // Invalidating interleave groups also requires invalidating all decisions 7690 // based on them, which includes widening decisions and uniform and scalar 7691 // values. 7692 CM.invalidateCostModelingDecisions(); 7693 } 7694 7695 ElementCount MaxVF = MaybeMaxVF.getValue(); 7696 assert(MaxVF.isNonZero() && "MaxVF is zero."); 7697 7698 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxVF); 7699 if (!UserVF.isZero() && 7700 (UserVFIsLegal || (UserVF.isScalable() && MaxVF.isScalable()))) { 7701 // FIXME: MaxVF is temporarily used inplace of UserVF for illegal scalable 7702 // VFs here, this should be reverted to only use legal UserVFs once the 7703 // loop below supports scalable VFs. 7704 ElementCount VF = UserVFIsLegal ? UserVF : MaxVF; 7705 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max") 7706 << " VF " << VF << ".\n"); 7707 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7708 "VF needs to be a power of two"); 7709 // Collect the instructions (and their associated costs) that will be more 7710 // profitable to scalarize. 7711 CM.selectUserVectorizationFactor(VF); 7712 CM.collectInLoopReductions(); 7713 buildVPlansWithVPRecipes(VF, VF); 7714 LLVM_DEBUG(printPlans(dbgs())); 7715 return {{VF, 0}}; 7716 } 7717 7718 assert(!MaxVF.isScalable() && 7719 "Scalable vectors not yet supported beyond this point"); 7720 7721 for (ElementCount VF = ElementCount::getFixed(1); 7722 ElementCount::isKnownLE(VF, MaxVF); VF *= 2) { 7723 // Collect Uniform and Scalar instructions after vectorization with VF. 7724 CM.collectUniformsAndScalars(VF); 7725 7726 // Collect the instructions (and their associated costs) that will be more 7727 // profitable to scalarize. 7728 if (VF.isVector()) 7729 CM.collectInstsToScalarize(VF); 7730 } 7731 7732 CM.collectInLoopReductions(); 7733 7734 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxVF); 7735 LLVM_DEBUG(printPlans(dbgs())); 7736 if (MaxVF.isScalar()) 7737 return VectorizationFactor::Disabled(); 7738 7739 // Select the optimal vectorization factor. 7740 return CM.selectVectorizationFactor(MaxVF); 7741 } 7742 7743 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) { 7744 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 7745 << '\n'); 7746 BestVF = VF; 7747 BestUF = UF; 7748 7749 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 7750 return !Plan->hasVF(VF); 7751 }); 7752 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 7753 } 7754 7755 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 7756 DominatorTree *DT) { 7757 // Perform the actual loop transformation. 7758 7759 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7760 VPCallbackILV CallbackILV(ILV); 7761 7762 assert(BestVF.hasValue() && "Vectorization Factor is missing"); 7763 7764 VPTransformState State{*BestVF, 7765 BestUF, 7766 LI, 7767 DT, 7768 ILV.Builder, 7769 ILV.VectorLoopValueMap, 7770 &ILV, 7771 CallbackILV}; 7772 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 7773 State.TripCount = ILV.getOrCreateTripCount(nullptr); 7774 State.CanonicalIV = ILV.Induction; 7775 7776 ILV.printDebugTracesAtStart(); 7777 7778 //===------------------------------------------------===// 7779 // 7780 // Notice: any optimization or new instruction that go 7781 // into the code below should also be implemented in 7782 // the cost-model. 7783 // 7784 //===------------------------------------------------===// 7785 7786 // 2. Copy and widen instructions from the old loop into the new loop. 7787 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 7788 VPlans.front()->execute(&State); 7789 7790 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7791 // predication, updating analyses. 7792 ILV.fixVectorizedLoop(); 7793 7794 ILV.printDebugTracesAtEnd(); 7795 } 7796 7797 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7798 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7799 7800 // We create new control-flow for the vectorized loop, so the original exit 7801 // conditions will be dead after vectorization if it's only used by the 7802 // terminator 7803 SmallVector<BasicBlock*> ExitingBlocks; 7804 OrigLoop->getExitingBlocks(ExitingBlocks); 7805 for (auto *BB : ExitingBlocks) { 7806 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 7807 if (!Cmp || !Cmp->hasOneUse()) 7808 continue; 7809 7810 // TODO: we should introduce a getUniqueExitingBlocks on Loop 7811 if (!DeadInstructions.insert(Cmp).second) 7812 continue; 7813 7814 // The operands of the icmp is often a dead trunc, used by IndUpdate. 7815 // TODO: can recurse through operands in general 7816 for (Value *Op : Cmp->operands()) { 7817 if (isa<TruncInst>(Op) && Op->hasOneUse()) 7818 DeadInstructions.insert(cast<Instruction>(Op)); 7819 } 7820 } 7821 7822 // We create new "steps" for induction variable updates to which the original 7823 // induction variables map. An original update instruction will be dead if 7824 // all its users except the induction variable are dead. 7825 auto *Latch = OrigLoop->getLoopLatch(); 7826 for (auto &Induction : Legal->getInductionVars()) { 7827 PHINode *Ind = Induction.first; 7828 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7829 7830 // If the tail is to be folded by masking, the primary induction variable, 7831 // if exists, isn't dead: it will be used for masking. Don't kill it. 7832 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 7833 continue; 7834 7835 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7836 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7837 })) 7838 DeadInstructions.insert(IndUpdate); 7839 7840 // We record as "Dead" also the type-casting instructions we had identified 7841 // during induction analysis. We don't need any handling for them in the 7842 // vectorized loop because we have proven that, under a proper runtime 7843 // test guarding the vectorized loop, the value of the phi, and the casted 7844 // value of the phi, are the same. The last instruction in this casting chain 7845 // will get its scalar/vector/widened def from the scalar/vector/widened def 7846 // of the respective phi node. Any other casts in the induction def-use chain 7847 // have no other uses outside the phi update chain, and will be ignored. 7848 InductionDescriptor &IndDes = Induction.second; 7849 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7850 DeadInstructions.insert(Casts.begin(), Casts.end()); 7851 } 7852 } 7853 7854 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 7855 7856 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7857 7858 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 7859 Instruction::BinaryOps BinOp) { 7860 // When unrolling and the VF is 1, we only need to add a simple scalar. 7861 Type *Ty = Val->getType(); 7862 assert(!Ty->isVectorTy() && "Val must be a scalar"); 7863 7864 if (Ty->isFloatingPointTy()) { 7865 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 7866 7867 // Floating point operations had to be 'fast' to enable the unrolling. 7868 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 7869 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 7870 } 7871 Constant *C = ConstantInt::get(Ty, StartIdx); 7872 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 7873 } 7874 7875 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7876 SmallVector<Metadata *, 4> MDs; 7877 // Reserve first location for self reference to the LoopID metadata node. 7878 MDs.push_back(nullptr); 7879 bool IsUnrollMetadata = false; 7880 MDNode *LoopID = L->getLoopID(); 7881 if (LoopID) { 7882 // First find existing loop unrolling disable metadata. 7883 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7884 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7885 if (MD) { 7886 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7887 IsUnrollMetadata = 7888 S && S->getString().startswith("llvm.loop.unroll.disable"); 7889 } 7890 MDs.push_back(LoopID->getOperand(i)); 7891 } 7892 } 7893 7894 if (!IsUnrollMetadata) { 7895 // Add runtime unroll disable metadata. 7896 LLVMContext &Context = L->getHeader()->getContext(); 7897 SmallVector<Metadata *, 1> DisableOperands; 7898 DisableOperands.push_back( 7899 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7900 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7901 MDs.push_back(DisableNode); 7902 MDNode *NewLoopID = MDNode::get(Context, MDs); 7903 // Set operand 0 to refer to the loop id itself. 7904 NewLoopID->replaceOperandWith(0, NewLoopID); 7905 L->setLoopID(NewLoopID); 7906 } 7907 } 7908 7909 //===--------------------------------------------------------------------===// 7910 // EpilogueVectorizerMainLoop 7911 //===--------------------------------------------------------------------===// 7912 7913 /// This function is partially responsible for generating the control flow 7914 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7915 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 7916 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7917 Loop *Lp = createVectorLoopSkeleton(""); 7918 7919 // Generate the code to check the minimum iteration count of the vector 7920 // epilogue (see below). 7921 EPI.EpilogueIterationCountCheck = 7922 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 7923 EPI.EpilogueIterationCountCheck->setName("iter.check"); 7924 7925 // Generate the code to check any assumptions that we've made for SCEV 7926 // expressions. 7927 BasicBlock *SavedPreHeader = LoopVectorPreHeader; 7928 emitSCEVChecks(Lp, LoopScalarPreHeader); 7929 7930 // If a safety check was generated save it. 7931 if (SavedPreHeader != LoopVectorPreHeader) 7932 EPI.SCEVSafetyCheck = SavedPreHeader; 7933 7934 // Generate the code that checks at runtime if arrays overlap. We put the 7935 // checks into a separate block to make the more common case of few elements 7936 // faster. 7937 SavedPreHeader = LoopVectorPreHeader; 7938 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 7939 7940 // If a safety check was generated save/overwite it. 7941 if (SavedPreHeader != LoopVectorPreHeader) 7942 EPI.MemSafetyCheck = SavedPreHeader; 7943 7944 // Generate the iteration count check for the main loop, *after* the check 7945 // for the epilogue loop, so that the path-length is shorter for the case 7946 // that goes directly through the vector epilogue. The longer-path length for 7947 // the main loop is compensated for, by the gain from vectorizing the larger 7948 // trip count. Note: the branch will get updated later on when we vectorize 7949 // the epilogue. 7950 EPI.MainLoopIterationCountCheck = 7951 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 7952 7953 // Generate the induction variable. 7954 OldInduction = Legal->getPrimaryInduction(); 7955 Type *IdxTy = Legal->getWidestInductionType(); 7956 Value *StartIdx = ConstantInt::get(IdxTy, 0); 7957 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 7958 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 7959 EPI.VectorTripCount = CountRoundDown; 7960 Induction = 7961 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 7962 getDebugLocFromInstOrOperands(OldInduction)); 7963 7964 // Skip induction resume value creation here because they will be created in 7965 // the second pass. If we created them here, they wouldn't be used anyway, 7966 // because the vplan in the second pass still contains the inductions from the 7967 // original loop. 7968 7969 return completeLoopSkeleton(Lp, OrigLoopID); 7970 } 7971 7972 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 7973 LLVM_DEBUG({ 7974 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 7975 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 7976 << ", Main Loop UF:" << EPI.MainLoopUF 7977 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 7978 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7979 }); 7980 } 7981 7982 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 7983 DEBUG_WITH_TYPE(VerboseDebug, { 7984 dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; 7985 }); 7986 } 7987 7988 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 7989 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 7990 assert(L && "Expected valid Loop."); 7991 assert(Bypass && "Expected valid bypass basic block."); 7992 unsigned VFactor = 7993 ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue(); 7994 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 7995 Value *Count = getOrCreateTripCount(L); 7996 // Reuse existing vector loop preheader for TC checks. 7997 // Note that new preheader block is generated for vector loop. 7998 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 7999 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8000 8001 // Generate code to check if the loop's trip count is less than VF * UF of the 8002 // main vector loop. 8003 auto P = 8004 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8005 8006 Value *CheckMinIters = Builder.CreateICmp( 8007 P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor), 8008 "min.iters.check"); 8009 8010 if (!ForEpilogue) 8011 TCCheckBlock->setName("vector.main.loop.iter.check"); 8012 8013 // Create new preheader for vector loop. 8014 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8015 DT, LI, nullptr, "vector.ph"); 8016 8017 if (ForEpilogue) { 8018 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8019 DT->getNode(Bypass)->getIDom()) && 8020 "TC check is expected to dominate Bypass"); 8021 8022 // Update dominator for Bypass & LoopExit. 8023 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8024 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8025 8026 LoopBypassBlocks.push_back(TCCheckBlock); 8027 8028 // Save the trip count so we don't have to regenerate it in the 8029 // vec.epilog.iter.check. This is safe to do because the trip count 8030 // generated here dominates the vector epilog iter check. 8031 EPI.TripCount = Count; 8032 } 8033 8034 ReplaceInstWithInst( 8035 TCCheckBlock->getTerminator(), 8036 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8037 8038 return TCCheckBlock; 8039 } 8040 8041 //===--------------------------------------------------------------------===// 8042 // EpilogueVectorizerEpilogueLoop 8043 //===--------------------------------------------------------------------===// 8044 8045 /// This function is partially responsible for generating the control flow 8046 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8047 BasicBlock * 8048 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8049 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8050 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8051 8052 // Now, compare the remaining count and if there aren't enough iterations to 8053 // execute the vectorized epilogue skip to the scalar part. 8054 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8055 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8056 LoopVectorPreHeader = 8057 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8058 LI, nullptr, "vec.epilog.ph"); 8059 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8060 VecEpilogueIterationCountCheck); 8061 8062 // Adjust the control flow taking the state info from the main loop 8063 // vectorization into account. 8064 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8065 "expected this to be saved from the previous pass."); 8066 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8067 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8068 8069 DT->changeImmediateDominator(LoopVectorPreHeader, 8070 EPI.MainLoopIterationCountCheck); 8071 8072 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8073 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8074 8075 if (EPI.SCEVSafetyCheck) 8076 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8077 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8078 if (EPI.MemSafetyCheck) 8079 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8080 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8081 8082 DT->changeImmediateDominator( 8083 VecEpilogueIterationCountCheck, 8084 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8085 8086 DT->changeImmediateDominator(LoopScalarPreHeader, 8087 EPI.EpilogueIterationCountCheck); 8088 DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck); 8089 8090 // Keep track of bypass blocks, as they feed start values to the induction 8091 // phis in the scalar loop preheader. 8092 if (EPI.SCEVSafetyCheck) 8093 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8094 if (EPI.MemSafetyCheck) 8095 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8096 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8097 8098 // Generate a resume induction for the vector epilogue and put it in the 8099 // vector epilogue preheader 8100 Type *IdxTy = Legal->getWidestInductionType(); 8101 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8102 LoopVectorPreHeader->getFirstNonPHI()); 8103 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8104 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8105 EPI.MainLoopIterationCountCheck); 8106 8107 // Generate the induction variable. 8108 OldInduction = Legal->getPrimaryInduction(); 8109 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8110 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8111 Value *StartIdx = EPResumeVal; 8112 Induction = 8113 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8114 getDebugLocFromInstOrOperands(OldInduction)); 8115 8116 // Generate induction resume values. These variables save the new starting 8117 // indexes for the scalar loop. They are used to test if there are any tail 8118 // iterations left once the vector loop has completed. 8119 // Note that when the vectorized epilogue is skipped due to iteration count 8120 // check, then the resume value for the induction variable comes from 8121 // the trip count of the main vector loop, hence passing the AdditionalBypass 8122 // argument. 8123 createInductionResumeValues(Lp, CountRoundDown, 8124 {VecEpilogueIterationCountCheck, 8125 EPI.VectorTripCount} /* AdditionalBypass */); 8126 8127 AddRuntimeUnrollDisableMetaData(Lp); 8128 return completeLoopSkeleton(Lp, OrigLoopID); 8129 } 8130 8131 BasicBlock * 8132 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8133 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8134 8135 assert(EPI.TripCount && 8136 "Expected trip count to have been safed in the first pass."); 8137 assert( 8138 (!isa<Instruction>(EPI.TripCount) || 8139 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8140 "saved trip count does not dominate insertion point."); 8141 Value *TC = EPI.TripCount; 8142 IRBuilder<> Builder(Insert->getTerminator()); 8143 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8144 8145 // Generate code to check if the loop's trip count is less than VF * UF of the 8146 // vector epilogue loop. 8147 auto P = 8148 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8149 8150 Value *CheckMinIters = Builder.CreateICmp( 8151 P, Count, 8152 ConstantInt::get(Count->getType(), 8153 EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF), 8154 "min.epilog.iters.check"); 8155 8156 ReplaceInstWithInst( 8157 Insert->getTerminator(), 8158 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8159 8160 LoopBypassBlocks.push_back(Insert); 8161 return Insert; 8162 } 8163 8164 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8165 LLVM_DEBUG({ 8166 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8167 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 8168 << ", Main Loop UF:" << EPI.MainLoopUF 8169 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 8170 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8171 }); 8172 } 8173 8174 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8175 DEBUG_WITH_TYPE(VerboseDebug, { 8176 dbgs() << "final fn:\n" << *Induction->getFunction() << "\n"; 8177 }); 8178 } 8179 8180 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8181 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8182 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8183 bool PredicateAtRangeStart = Predicate(Range.Start); 8184 8185 for (ElementCount TmpVF = Range.Start * 2; 8186 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8187 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8188 Range.End = TmpVF; 8189 break; 8190 } 8191 8192 return PredicateAtRangeStart; 8193 } 8194 8195 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8196 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8197 /// of VF's starting at a given VF and extending it as much as possible. Each 8198 /// vectorization decision can potentially shorten this sub-range during 8199 /// buildVPlan(). 8200 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8201 ElementCount MaxVF) { 8202 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8203 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8204 VFRange SubRange = {VF, MaxVFPlusOne}; 8205 VPlans.push_back(buildVPlan(SubRange)); 8206 VF = SubRange.End; 8207 } 8208 } 8209 8210 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8211 VPlanPtr &Plan) { 8212 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8213 8214 // Look for cached value. 8215 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8216 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8217 if (ECEntryIt != EdgeMaskCache.end()) 8218 return ECEntryIt->second; 8219 8220 VPValue *SrcMask = createBlockInMask(Src, Plan); 8221 8222 // The terminator has to be a branch inst! 8223 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8224 assert(BI && "Unexpected terminator found"); 8225 8226 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8227 return EdgeMaskCache[Edge] = SrcMask; 8228 8229 // If source is an exiting block, we know the exit edge is dynamically dead 8230 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8231 // adding uses of an otherwise potentially dead instruction. 8232 if (OrigLoop->isLoopExiting(Src)) 8233 return EdgeMaskCache[Edge] = SrcMask; 8234 8235 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8236 assert(EdgeMask && "No Edge Mask found for condition"); 8237 8238 if (BI->getSuccessor(0) != Dst) 8239 EdgeMask = Builder.createNot(EdgeMask); 8240 8241 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 8242 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 8243 8244 return EdgeMaskCache[Edge] = EdgeMask; 8245 } 8246 8247 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8248 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8249 8250 // Look for cached value. 8251 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8252 if (BCEntryIt != BlockMaskCache.end()) 8253 return BCEntryIt->second; 8254 8255 // All-one mask is modelled as no-mask following the convention for masked 8256 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8257 VPValue *BlockMask = nullptr; 8258 8259 if (OrigLoop->getHeader() == BB) { 8260 if (!CM.blockNeedsPredication(BB)) 8261 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8262 8263 // Create the block in mask as the first non-phi instruction in the block. 8264 VPBuilder::InsertPointGuard Guard(Builder); 8265 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8266 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8267 8268 // Introduce the early-exit compare IV <= BTC to form header block mask. 8269 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8270 // Start by constructing the desired canonical IV. 8271 VPValue *IV = nullptr; 8272 if (Legal->getPrimaryInduction()) 8273 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8274 else { 8275 auto IVRecipe = new VPWidenCanonicalIVRecipe(); 8276 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8277 IV = IVRecipe->getVPValue(); 8278 } 8279 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8280 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8281 8282 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8283 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8284 // as a second argument, we only pass the IV here and extract the 8285 // tripcount from the transform state where codegen of the VP instructions 8286 // happen. 8287 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8288 } else { 8289 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8290 } 8291 return BlockMaskCache[BB] = BlockMask; 8292 } 8293 8294 // This is the block mask. We OR all incoming edges. 8295 for (auto *Predecessor : predecessors(BB)) { 8296 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8297 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8298 return BlockMaskCache[BB] = EdgeMask; 8299 8300 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8301 BlockMask = EdgeMask; 8302 continue; 8303 } 8304 8305 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8306 } 8307 8308 return BlockMaskCache[BB] = BlockMask; 8309 } 8310 8311 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range, 8312 VPlanPtr &Plan) { 8313 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8314 "Must be called with either a load or store"); 8315 8316 auto willWiden = [&](ElementCount VF) -> bool { 8317 if (VF.isScalar()) 8318 return false; 8319 LoopVectorizationCostModel::InstWidening Decision = 8320 CM.getWideningDecision(I, VF); 8321 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8322 "CM decision should be taken at this point."); 8323 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8324 return true; 8325 if (CM.isScalarAfterVectorization(I, VF) || 8326 CM.isProfitableToScalarize(I, VF)) 8327 return false; 8328 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8329 }; 8330 8331 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8332 return nullptr; 8333 8334 VPValue *Mask = nullptr; 8335 if (Legal->isMaskRequired(I)) 8336 Mask = createBlockInMask(I->getParent(), Plan); 8337 8338 VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I)); 8339 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8340 return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask); 8341 8342 StoreInst *Store = cast<StoreInst>(I); 8343 VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand()); 8344 return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask); 8345 } 8346 8347 VPWidenIntOrFpInductionRecipe * 8348 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, VPlan &Plan) const { 8349 // Check if this is an integer or fp induction. If so, build the recipe that 8350 // produces its scalar and vector values. 8351 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8352 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8353 II.getKind() == InductionDescriptor::IK_FpInduction) { 8354 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8355 const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); 8356 return new VPWidenIntOrFpInductionRecipe( 8357 Phi, Start, Casts.empty() ? nullptr : Casts.front()); 8358 } 8359 8360 return nullptr; 8361 } 8362 8363 VPWidenIntOrFpInductionRecipe * 8364 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I, VFRange &Range, 8365 VPlan &Plan) const { 8366 // Optimize the special case where the source is a constant integer 8367 // induction variable. Notice that we can only optimize the 'trunc' case 8368 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8369 // (c) other casts depend on pointer size. 8370 8371 // Determine whether \p K is a truncation based on an induction variable that 8372 // can be optimized. 8373 auto isOptimizableIVTruncate = 8374 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8375 return [=](ElementCount VF) -> bool { 8376 return CM.isOptimizableIVTruncate(K, VF); 8377 }; 8378 }; 8379 8380 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8381 isOptimizableIVTruncate(I), Range)) { 8382 8383 InductionDescriptor II = 8384 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 8385 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8386 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8387 Start, nullptr, I); 8388 } 8389 return nullptr; 8390 } 8391 8392 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) { 8393 // We know that all PHIs in non-header blocks are converted into selects, so 8394 // we don't have to worry about the insertion order and we can just use the 8395 // builder. At this point we generate the predication tree. There may be 8396 // duplications since this is a simple recursive scan, but future 8397 // optimizations will clean it up. 8398 8399 SmallVector<VPValue *, 2> Operands; 8400 unsigned NumIncoming = Phi->getNumIncomingValues(); 8401 for (unsigned In = 0; In < NumIncoming; In++) { 8402 VPValue *EdgeMask = 8403 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8404 assert((EdgeMask || NumIncoming == 1) && 8405 "Multiple predecessors with one having a full mask"); 8406 Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In))); 8407 if (EdgeMask) 8408 Operands.push_back(EdgeMask); 8409 } 8410 return new VPBlendRecipe(Phi, Operands); 8411 } 8412 8413 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range, 8414 VPlan &Plan) const { 8415 8416 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8417 [this, CI](ElementCount VF) { 8418 return CM.isScalarWithPredication(CI, VF); 8419 }, 8420 Range); 8421 8422 if (IsPredicated) 8423 return nullptr; 8424 8425 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8426 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8427 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8428 ID == Intrinsic::pseudoprobe || 8429 ID == Intrinsic::experimental_noalias_scope_decl)) 8430 return nullptr; 8431 8432 auto willWiden = [&](ElementCount VF) -> bool { 8433 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8434 // The following case may be scalarized depending on the VF. 8435 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8436 // version of the instruction. 8437 // Is it beneficial to perform intrinsic call compared to lib call? 8438 bool NeedToScalarize = false; 8439 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8440 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8441 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8442 assert(IntrinsicCost.isValid() && CallCost.isValid() && 8443 "Cannot have invalid costs while widening"); 8444 return UseVectorIntrinsic || !NeedToScalarize; 8445 }; 8446 8447 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8448 return nullptr; 8449 8450 return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands())); 8451 } 8452 8453 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8454 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8455 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8456 // Instruction should be widened, unless it is scalar after vectorization, 8457 // scalarization is profitable or it is predicated. 8458 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8459 return CM.isScalarAfterVectorization(I, VF) || 8460 CM.isProfitableToScalarize(I, VF) || 8461 CM.isScalarWithPredication(I, VF); 8462 }; 8463 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8464 Range); 8465 } 8466 8467 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const { 8468 auto IsVectorizableOpcode = [](unsigned Opcode) { 8469 switch (Opcode) { 8470 case Instruction::Add: 8471 case Instruction::And: 8472 case Instruction::AShr: 8473 case Instruction::BitCast: 8474 case Instruction::FAdd: 8475 case Instruction::FCmp: 8476 case Instruction::FDiv: 8477 case Instruction::FMul: 8478 case Instruction::FNeg: 8479 case Instruction::FPExt: 8480 case Instruction::FPToSI: 8481 case Instruction::FPToUI: 8482 case Instruction::FPTrunc: 8483 case Instruction::FRem: 8484 case Instruction::FSub: 8485 case Instruction::ICmp: 8486 case Instruction::IntToPtr: 8487 case Instruction::LShr: 8488 case Instruction::Mul: 8489 case Instruction::Or: 8490 case Instruction::PtrToInt: 8491 case Instruction::SDiv: 8492 case Instruction::Select: 8493 case Instruction::SExt: 8494 case Instruction::Shl: 8495 case Instruction::SIToFP: 8496 case Instruction::SRem: 8497 case Instruction::Sub: 8498 case Instruction::Trunc: 8499 case Instruction::UDiv: 8500 case Instruction::UIToFP: 8501 case Instruction::URem: 8502 case Instruction::Xor: 8503 case Instruction::ZExt: 8504 return true; 8505 } 8506 return false; 8507 }; 8508 8509 if (!IsVectorizableOpcode(I->getOpcode())) 8510 return nullptr; 8511 8512 // Success: widen this instruction. 8513 return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands())); 8514 } 8515 8516 VPBasicBlock *VPRecipeBuilder::handleReplication( 8517 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8518 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 8519 VPlanPtr &Plan) { 8520 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8521 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8522 Range); 8523 8524 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8525 [&](ElementCount VF) { return CM.isScalarWithPredication(I, VF); }, 8526 Range); 8527 8528 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8529 IsUniform, IsPredicated); 8530 setRecipe(I, Recipe); 8531 Plan->addVPValue(I, Recipe); 8532 8533 // Find if I uses a predicated instruction. If so, it will use its scalar 8534 // value. Avoid hoisting the insert-element which packs the scalar value into 8535 // a vector value, as that happens iff all users use the vector value. 8536 for (auto &Op : I->operands()) 8537 if (auto *PredInst = dyn_cast<Instruction>(Op)) 8538 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 8539 PredInst2Recipe[PredInst]->setAlsoPack(false); 8540 8541 // Finalize the recipe for Instr, first if it is not predicated. 8542 if (!IsPredicated) { 8543 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8544 VPBB->appendRecipe(Recipe); 8545 return VPBB; 8546 } 8547 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8548 assert(VPBB->getSuccessors().empty() && 8549 "VPBB has successors when handling predicated replication."); 8550 // Record predicated instructions for above packing optimizations. 8551 PredInst2Recipe[I] = Recipe; 8552 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8553 VPBlockUtils::insertBlockAfter(Region, VPBB); 8554 auto *RegSucc = new VPBasicBlock(); 8555 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8556 return RegSucc; 8557 } 8558 8559 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8560 VPRecipeBase *PredRecipe, 8561 VPlanPtr &Plan) { 8562 // Instructions marked for predication are replicated and placed under an 8563 // if-then construct to prevent side-effects. 8564 8565 // Generate recipes to compute the block mask for this region. 8566 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8567 8568 // Build the triangular if-then region. 8569 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8570 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8571 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8572 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8573 auto *PHIRecipe = Instr->getType()->isVoidTy() 8574 ? nullptr 8575 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8576 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8577 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8578 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8579 8580 // Note: first set Entry as region entry and then connect successors starting 8581 // from it in order, to propagate the "parent" of each VPBasicBlock. 8582 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8583 VPBlockUtils::connectBlocks(Pred, Exit); 8584 8585 return Region; 8586 } 8587 8588 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8589 VFRange &Range, 8590 VPlanPtr &Plan) { 8591 // First, check for specific widening recipes that deal with calls, memory 8592 // operations, inductions and Phi nodes. 8593 if (auto *CI = dyn_cast<CallInst>(Instr)) 8594 return tryToWidenCall(CI, Range, *Plan); 8595 8596 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8597 return tryToWidenMemory(Instr, Range, Plan); 8598 8599 VPRecipeBase *Recipe; 8600 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8601 if (Phi->getParent() != OrigLoop->getHeader()) 8602 return tryToBlend(Phi, Plan); 8603 if ((Recipe = tryToOptimizeInductionPHI(Phi, *Plan))) 8604 return Recipe; 8605 8606 if (Legal->isReductionVariable(Phi)) { 8607 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 8608 VPValue *StartV = 8609 Plan->getOrAddVPValue(RdxDesc.getRecurrenceStartValue()); 8610 return new VPWidenPHIRecipe(Phi, RdxDesc, *StartV); 8611 } 8612 8613 return new VPWidenPHIRecipe(Phi); 8614 } 8615 8616 if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate( 8617 cast<TruncInst>(Instr), Range, *Plan))) 8618 return Recipe; 8619 8620 if (!shouldWiden(Instr, Range)) 8621 return nullptr; 8622 8623 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8624 return new VPWidenGEPRecipe(GEP, Plan->mapToVPValues(GEP->operands()), 8625 OrigLoop); 8626 8627 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8628 bool InvariantCond = 8629 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8630 return new VPWidenSelectRecipe(*SI, Plan->mapToVPValues(SI->operands()), 8631 InvariantCond); 8632 } 8633 8634 return tryToWiden(Instr, *Plan); 8635 } 8636 8637 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8638 ElementCount MaxVF) { 8639 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8640 8641 // Collect instructions from the original loop that will become trivially dead 8642 // in the vectorized loop. We don't need to vectorize these instructions. For 8643 // example, original induction update instructions can become dead because we 8644 // separately emit induction "steps" when generating code for the new loop. 8645 // Similarly, we create a new latch condition when setting up the structure 8646 // of the new loop, so the old one can become dead. 8647 SmallPtrSet<Instruction *, 4> DeadInstructions; 8648 collectTriviallyDeadInstructions(DeadInstructions); 8649 8650 // Add assume instructions we need to drop to DeadInstructions, to prevent 8651 // them from being added to the VPlan. 8652 // TODO: We only need to drop assumes in blocks that get flattend. If the 8653 // control flow is preserved, we should keep them. 8654 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8655 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8656 8657 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8658 // Dead instructions do not need sinking. Remove them from SinkAfter. 8659 for (Instruction *I : DeadInstructions) 8660 SinkAfter.erase(I); 8661 8662 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8663 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8664 VFRange SubRange = {VF, MaxVFPlusOne}; 8665 VPlans.push_back( 8666 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8667 VF = SubRange.End; 8668 } 8669 } 8670 8671 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8672 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8673 const DenseMap<Instruction *, Instruction *> &SinkAfter) { 8674 8675 // Hold a mapping from predicated instructions to their recipes, in order to 8676 // fix their AlsoPack behavior if a user is determined to replicate and use a 8677 // scalar instead of vector value. 8678 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 8679 8680 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8681 8682 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8683 8684 // --------------------------------------------------------------------------- 8685 // Pre-construction: record ingredients whose recipes we'll need to further 8686 // process after constructing the initial VPlan. 8687 // --------------------------------------------------------------------------- 8688 8689 // Mark instructions we'll need to sink later and their targets as 8690 // ingredients whose recipe we'll need to record. 8691 for (auto &Entry : SinkAfter) { 8692 RecipeBuilder.recordRecipeOf(Entry.first); 8693 RecipeBuilder.recordRecipeOf(Entry.second); 8694 } 8695 for (auto &Reduction : CM.getInLoopReductionChains()) { 8696 PHINode *Phi = Reduction.first; 8697 RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); 8698 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8699 8700 RecipeBuilder.recordRecipeOf(Phi); 8701 for (auto &R : ReductionOperations) { 8702 RecipeBuilder.recordRecipeOf(R); 8703 // For min/max reducitons, where we have a pair of icmp/select, we also 8704 // need to record the ICmp recipe, so it can be removed later. 8705 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8706 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8707 } 8708 } 8709 8710 // For each interleave group which is relevant for this (possibly trimmed) 8711 // Range, add it to the set of groups to be later applied to the VPlan and add 8712 // placeholders for its members' Recipes which we'll be replacing with a 8713 // single VPInterleaveRecipe. 8714 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8715 auto applyIG = [IG, this](ElementCount VF) -> bool { 8716 return (VF.isVector() && // Query is illegal for VF == 1 8717 CM.getWideningDecision(IG->getInsertPos(), VF) == 8718 LoopVectorizationCostModel::CM_Interleave); 8719 }; 8720 if (!getDecisionAndClampRange(applyIG, Range)) 8721 continue; 8722 InterleaveGroups.insert(IG); 8723 for (unsigned i = 0; i < IG->getFactor(); i++) 8724 if (Instruction *Member = IG->getMember(i)) 8725 RecipeBuilder.recordRecipeOf(Member); 8726 }; 8727 8728 // --------------------------------------------------------------------------- 8729 // Build initial VPlan: Scan the body of the loop in a topological order to 8730 // visit each basic block after having visited its predecessor basic blocks. 8731 // --------------------------------------------------------------------------- 8732 8733 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 8734 auto Plan = std::make_unique<VPlan>(); 8735 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 8736 Plan->setEntry(VPBB); 8737 8738 // Scan the body of the loop in a topological order to visit each basic block 8739 // after having visited its predecessor basic blocks. 8740 LoopBlocksDFS DFS(OrigLoop); 8741 DFS.perform(LI); 8742 8743 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8744 // Relevant instructions from basic block BB will be grouped into VPRecipe 8745 // ingredients and fill a new VPBasicBlock. 8746 unsigned VPBBsForBB = 0; 8747 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 8748 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 8749 VPBB = FirstVPBBForBB; 8750 Builder.setInsertPoint(VPBB); 8751 8752 // Introduce each ingredient into VPlan. 8753 // TODO: Model and preserve debug instrinsics in VPlan. 8754 for (Instruction &I : BB->instructionsWithoutDebug()) { 8755 Instruction *Instr = &I; 8756 8757 // First filter out irrelevant instructions, to ensure no recipes are 8758 // built for them. 8759 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 8760 continue; 8761 8762 if (auto Recipe = 8763 RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) { 8764 for (auto *Def : Recipe->definedValues()) { 8765 auto *UV = Def->getUnderlyingValue(); 8766 Plan->addVPValue(UV, Def); 8767 } 8768 8769 RecipeBuilder.setRecipe(Instr, Recipe); 8770 VPBB->appendRecipe(Recipe); 8771 continue; 8772 } 8773 8774 // Otherwise, if all widening options failed, Instruction is to be 8775 // replicated. This may create a successor for VPBB. 8776 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication( 8777 Instr, Range, VPBB, PredInst2Recipe, Plan); 8778 if (NextVPBB != VPBB) { 8779 VPBB = NextVPBB; 8780 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8781 : ""); 8782 } 8783 } 8784 } 8785 8786 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 8787 // may also be empty, such as the last one VPBB, reflecting original 8788 // basic-blocks with no recipes. 8789 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 8790 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 8791 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 8792 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 8793 delete PreEntry; 8794 8795 // --------------------------------------------------------------------------- 8796 // Transform initial VPlan: Apply previously taken decisions, in order, to 8797 // bring the VPlan to its final state. 8798 // --------------------------------------------------------------------------- 8799 8800 // Apply Sink-After legal constraints. 8801 for (auto &Entry : SinkAfter) { 8802 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 8803 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 8804 // If the target is in a replication region, make sure to move Sink to the 8805 // block after it, not into the replication region itself. 8806 if (auto *Region = 8807 dyn_cast_or_null<VPRegionBlock>(Target->getParent()->getParent())) { 8808 if (Region->isReplicator()) { 8809 assert(Region->getNumSuccessors() == 1 && "Expected SESE region!"); 8810 VPBasicBlock *NextBlock = 8811 cast<VPBasicBlock>(Region->getSuccessors().front()); 8812 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 8813 continue; 8814 } 8815 } 8816 Sink->moveAfter(Target); 8817 } 8818 8819 // Interleave memory: for each Interleave Group we marked earlier as relevant 8820 // for this VPlan, replace the Recipes widening its memory instructions with a 8821 // single VPInterleaveRecipe at its insertion point. 8822 for (auto IG : InterleaveGroups) { 8823 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 8824 RecipeBuilder.getRecipe(IG->getInsertPos())); 8825 SmallVector<VPValue *, 4> StoredValues; 8826 for (unsigned i = 0; i < IG->getFactor(); ++i) 8827 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) 8828 StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0))); 8829 8830 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 8831 Recipe->getMask()); 8832 VPIG->insertBefore(Recipe); 8833 unsigned J = 0; 8834 for (unsigned i = 0; i < IG->getFactor(); ++i) 8835 if (Instruction *Member = IG->getMember(i)) { 8836 if (!Member->getType()->isVoidTy()) { 8837 VPValue *OriginalV = Plan->getVPValue(Member); 8838 Plan->removeVPValueFor(Member); 8839 Plan->addVPValue(Member, VPIG->getVPValue(J)); 8840 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 8841 J++; 8842 } 8843 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 8844 } 8845 } 8846 8847 // Adjust the recipes for any inloop reductions. 8848 if (Range.Start.isVector()) 8849 adjustRecipesForInLoopReductions(Plan, RecipeBuilder); 8850 8851 // Finally, if tail is folded by masking, introduce selects between the phi 8852 // and the live-out instruction of each reduction, at the end of the latch. 8853 if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) { 8854 Builder.setInsertPoint(VPBB); 8855 auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 8856 for (auto &Reduction : Legal->getReductionVars()) { 8857 if (CM.isInLoopReduction(Reduction.first)) 8858 continue; 8859 VPValue *Phi = Plan->getOrAddVPValue(Reduction.first); 8860 VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr()); 8861 Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi}); 8862 } 8863 } 8864 8865 std::string PlanName; 8866 raw_string_ostream RSO(PlanName); 8867 ElementCount VF = Range.Start; 8868 Plan->addVF(VF); 8869 RSO << "Initial VPlan for VF={" << VF; 8870 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 8871 Plan->addVF(VF); 8872 RSO << "," << VF; 8873 } 8874 RSO << "},UF>=1"; 8875 RSO.flush(); 8876 Plan->setName(PlanName); 8877 8878 return Plan; 8879 } 8880 8881 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 8882 // Outer loop handling: They may require CFG and instruction level 8883 // transformations before even evaluating whether vectorization is profitable. 8884 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 8885 // the vectorization pipeline. 8886 assert(!OrigLoop->isInnermost()); 8887 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 8888 8889 // Create new empty VPlan 8890 auto Plan = std::make_unique<VPlan>(); 8891 8892 // Build hierarchical CFG 8893 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 8894 HCFGBuilder.buildHierarchicalCFG(); 8895 8896 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 8897 VF *= 2) 8898 Plan->addVF(VF); 8899 8900 if (EnableVPlanPredication) { 8901 VPlanPredicator VPP(*Plan); 8902 VPP.predicate(); 8903 8904 // Avoid running transformation to recipes until masked code generation in 8905 // VPlan-native path is in place. 8906 return Plan; 8907 } 8908 8909 SmallPtrSet<Instruction *, 1> DeadInstructions; 8910 VPlanTransforms::VPInstructionsToVPRecipes( 8911 OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions); 8912 return Plan; 8913 } 8914 8915 // Adjust the recipes for any inloop reductions. The chain of instructions 8916 // leading from the loop exit instr to the phi need to be converted to 8917 // reductions, with one operand being vector and the other being the scalar 8918 // reduction chain. 8919 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions( 8920 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) { 8921 for (auto &Reduction : CM.getInLoopReductionChains()) { 8922 PHINode *Phi = Reduction.first; 8923 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 8924 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8925 8926 // ReductionOperations are orders top-down from the phi's use to the 8927 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 8928 // which of the two operands will remain scalar and which will be reduced. 8929 // For minmax the chain will be the select instructions. 8930 Instruction *Chain = Phi; 8931 for (Instruction *R : ReductionOperations) { 8932 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 8933 RecurKind Kind = RdxDesc.getRecurrenceKind(); 8934 8935 VPValue *ChainOp = Plan->getVPValue(Chain); 8936 unsigned FirstOpId; 8937 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 8938 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 8939 "Expected to replace a VPWidenSelectSC"); 8940 FirstOpId = 1; 8941 } else { 8942 assert(isa<VPWidenRecipe>(WidenRecipe) && 8943 "Expected to replace a VPWidenSC"); 8944 FirstOpId = 0; 8945 } 8946 unsigned VecOpId = 8947 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 8948 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 8949 8950 auto *CondOp = CM.foldTailByMasking() 8951 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 8952 : nullptr; 8953 VPReductionRecipe *RedRecipe = new VPReductionRecipe( 8954 &RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 8955 WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe); 8956 Plan->removeVPValueFor(R); 8957 Plan->addVPValue(R, RedRecipe); 8958 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 8959 WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe); 8960 WidenRecipe->eraseFromParent(); 8961 8962 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 8963 VPRecipeBase *CompareRecipe = 8964 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 8965 assert(isa<VPWidenRecipe>(CompareRecipe) && 8966 "Expected to replace a VPWidenSC"); 8967 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 8968 "Expected no remaining users"); 8969 CompareRecipe->eraseFromParent(); 8970 } 8971 Chain = R; 8972 } 8973 } 8974 } 8975 8976 Value* LoopVectorizationPlanner::VPCallbackILV:: 8977 getOrCreateVectorValues(Value *V, unsigned Part) { 8978 return ILV.getOrCreateVectorValue(V, Part); 8979 } 8980 8981 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue( 8982 Value *V, const VPIteration &Instance) { 8983 return ILV.getOrCreateScalarValue(V, Instance); 8984 } 8985 8986 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 8987 VPSlotTracker &SlotTracker) const { 8988 O << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 8989 IG->getInsertPos()->printAsOperand(O, false); 8990 O << ", "; 8991 getAddr()->printAsOperand(O, SlotTracker); 8992 VPValue *Mask = getMask(); 8993 if (Mask) { 8994 O << ", "; 8995 Mask->printAsOperand(O, SlotTracker); 8996 } 8997 for (unsigned i = 0; i < IG->getFactor(); ++i) 8998 if (Instruction *I = IG->getMember(i)) 8999 O << "\\l\" +\n" << Indent << "\" " << VPlanIngredient(I) << " " << i; 9000 } 9001 9002 void VPWidenCallRecipe::execute(VPTransformState &State) { 9003 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9004 *this, State); 9005 } 9006 9007 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9008 State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), 9009 this, *this, InvariantCond, State); 9010 } 9011 9012 void VPWidenRecipe::execute(VPTransformState &State) { 9013 State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State); 9014 } 9015 9016 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9017 State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, 9018 *this, State.UF, State.VF, IsPtrLoopInvariant, 9019 IsIndexLoopInvariant, State); 9020 } 9021 9022 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9023 assert(!State.Instance && "Int or FP induction being replicated."); 9024 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 9025 getTruncInst(), getVPValue(0), 9026 getCastValue(), State); 9027 } 9028 9029 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9030 Value *StartV = 9031 getStartValue() ? getStartValue()->getLiveInIRValue() : nullptr; 9032 State.ILV->widenPHIInstruction(Phi, RdxDesc, StartV, State.UF, State.VF); 9033 } 9034 9035 void VPBlendRecipe::execute(VPTransformState &State) { 9036 State.ILV->setDebugLocFromInst(State.Builder, Phi); 9037 // We know that all PHIs in non-header blocks are converted into 9038 // selects, so we don't have to worry about the insertion order and we 9039 // can just use the builder. 9040 // At this point we generate the predication tree. There may be 9041 // duplications since this is a simple recursive scan, but future 9042 // optimizations will clean it up. 9043 9044 unsigned NumIncoming = getNumIncomingValues(); 9045 9046 // Generate a sequence of selects of the form: 9047 // SELECT(Mask3, In3, 9048 // SELECT(Mask2, In2, 9049 // SELECT(Mask1, In1, 9050 // In0))) 9051 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9052 // are essentially undef are taken from In0. 9053 InnerLoopVectorizer::VectorParts Entry(State.UF); 9054 for (unsigned In = 0; In < NumIncoming; ++In) { 9055 for (unsigned Part = 0; Part < State.UF; ++Part) { 9056 // We might have single edge PHIs (blocks) - use an identity 9057 // 'select' for the first PHI operand. 9058 Value *In0 = State.get(getIncomingValue(In), Part); 9059 if (In == 0) 9060 Entry[Part] = In0; // Initialize with the first incoming value. 9061 else { 9062 // Select between the current value and the previous incoming edge 9063 // based on the incoming mask. 9064 Value *Cond = State.get(getMask(In), Part); 9065 Entry[Part] = 9066 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9067 } 9068 } 9069 } 9070 for (unsigned Part = 0; Part < State.UF; ++Part) 9071 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 9072 } 9073 9074 void VPInterleaveRecipe::execute(VPTransformState &State) { 9075 assert(!State.Instance && "Interleave group being replicated."); 9076 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9077 getStoredValues(), getMask()); 9078 } 9079 9080 void VPReductionRecipe::execute(VPTransformState &State) { 9081 assert(!State.Instance && "Reduction being replicated."); 9082 for (unsigned Part = 0; Part < State.UF; ++Part) { 9083 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9084 Value *NewVecOp = State.get(getVecOp(), Part); 9085 if (VPValue *Cond = getCondOp()) { 9086 Value *NewCond = State.get(Cond, Part); 9087 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9088 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 9089 Kind, VecTy->getElementType()); 9090 Constant *IdenVec = 9091 ConstantVector::getSplat(VecTy->getElementCount(), Iden); 9092 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9093 NewVecOp = Select; 9094 } 9095 Value *NewRed = 9096 createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9097 Value *PrevInChain = State.get(getChainOp(), Part); 9098 Value *NextInChain; 9099 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9100 NextInChain = 9101 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9102 NewRed, PrevInChain); 9103 } else { 9104 NextInChain = State.Builder.CreateBinOp( 9105 (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed, 9106 PrevInChain); 9107 } 9108 State.set(this, getUnderlyingInstr(), NextInChain, Part); 9109 } 9110 } 9111 9112 void VPReplicateRecipe::execute(VPTransformState &State) { 9113 if (State.Instance) { // Generate a single instance. 9114 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9115 State.ILV->scalarizeInstruction(getUnderlyingInstr(), *this, 9116 *State.Instance, IsPredicated, State); 9117 // Insert scalar instance packing it into a vector. 9118 if (AlsoPack && State.VF.isVector()) { 9119 // If we're constructing lane 0, initialize to start from poison. 9120 if (State.Instance->Lane == 0) { 9121 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9122 Value *Poison = PoisonValue::get( 9123 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9124 State.ValueMap.setVectorValue(getUnderlyingInstr(), 9125 State.Instance->Part, Poison); 9126 } 9127 State.ILV->packScalarIntoVectorValue(getUnderlyingInstr(), 9128 *State.Instance); 9129 } 9130 return; 9131 } 9132 9133 // Generate scalar instances for all VF lanes of all UF parts, unless the 9134 // instruction is uniform inwhich case generate only the first lane for each 9135 // of the UF parts. 9136 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9137 assert((!State.VF.isScalable() || IsUniform) && 9138 "Can't scalarize a scalable vector"); 9139 for (unsigned Part = 0; Part < State.UF; ++Part) 9140 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9141 State.ILV->scalarizeInstruction(getUnderlyingInstr(), *this, 9142 VPIteration(Part, Lane), IsPredicated, 9143 State); 9144 } 9145 9146 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9147 assert(State.Instance && "Branch on Mask works only on single instance."); 9148 9149 unsigned Part = State.Instance->Part; 9150 unsigned Lane = State.Instance->Lane; 9151 9152 Value *ConditionBit = nullptr; 9153 VPValue *BlockInMask = getMask(); 9154 if (BlockInMask) { 9155 ConditionBit = State.get(BlockInMask, Part); 9156 if (ConditionBit->getType()->isVectorTy()) 9157 ConditionBit = State.Builder.CreateExtractElement( 9158 ConditionBit, State.Builder.getInt32(Lane)); 9159 } else // Block in mask is all-one. 9160 ConditionBit = State.Builder.getTrue(); 9161 9162 // Replace the temporary unreachable terminator with a new conditional branch, 9163 // whose two destinations will be set later when they are created. 9164 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9165 assert(isa<UnreachableInst>(CurrentTerminator) && 9166 "Expected to replace unreachable terminator with conditional branch."); 9167 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9168 CondBr->setSuccessor(0, nullptr); 9169 ReplaceInstWithInst(CurrentTerminator, CondBr); 9170 } 9171 9172 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9173 assert(State.Instance && "Predicated instruction PHI works per instance."); 9174 Instruction *ScalarPredInst = 9175 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9176 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9177 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9178 assert(PredicatingBB && "Predicated block has no single predecessor."); 9179 9180 // By current pack/unpack logic we need to generate only a single phi node: if 9181 // a vector value for the predicated instruction exists at this point it means 9182 // the instruction has vector users only, and a phi for the vector value is 9183 // needed. In this case the recipe of the predicated instruction is marked to 9184 // also do that packing, thereby "hoisting" the insert-element sequence. 9185 // Otherwise, a phi node for the scalar value is needed. 9186 unsigned Part = State.Instance->Part; 9187 Instruction *PredInst = 9188 cast<Instruction>(getOperand(0)->getUnderlyingValue()); 9189 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 9190 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 9191 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9192 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9193 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9194 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9195 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 9196 } else { 9197 Type *PredInstType = PredInst->getType(); 9198 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9199 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), PredicatingBB); 9200 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9201 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 9202 } 9203 } 9204 9205 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9206 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9207 State.ILV->vectorizeMemoryInstruction(&Ingredient, State, 9208 StoredValue ? nullptr : getVPValue(), 9209 getAddr(), StoredValue, getMask()); 9210 } 9211 9212 // Determine how to lower the scalar epilogue, which depends on 1) optimising 9213 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 9214 // predication, and 4) a TTI hook that analyses whether the loop is suitable 9215 // for predication. 9216 static ScalarEpilogueLowering getScalarEpilogueLowering( 9217 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 9218 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 9219 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 9220 LoopVectorizationLegality &LVL) { 9221 // 1) OptSize takes precedence over all other options, i.e. if this is set, 9222 // don't look at hints or options, and don't request a scalar epilogue. 9223 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 9224 // LoopAccessInfo (due to code dependency and not being able to reliably get 9225 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 9226 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 9227 // versioning when the vectorization is forced, unlike hasOptSize. So revert 9228 // back to the old way and vectorize with versioning when forced. See D81345.) 9229 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 9230 PGSOQueryType::IRPass) && 9231 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 9232 return CM_ScalarEpilogueNotAllowedOptSize; 9233 9234 // 2) If set, obey the directives 9235 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 9236 switch (PreferPredicateOverEpilogue) { 9237 case PreferPredicateTy::ScalarEpilogue: 9238 return CM_ScalarEpilogueAllowed; 9239 case PreferPredicateTy::PredicateElseScalarEpilogue: 9240 return CM_ScalarEpilogueNotNeededUsePredicate; 9241 case PreferPredicateTy::PredicateOrDontVectorize: 9242 return CM_ScalarEpilogueNotAllowedUsePredicate; 9243 }; 9244 } 9245 9246 // 3) If set, obey the hints 9247 switch (Hints.getPredicate()) { 9248 case LoopVectorizeHints::FK_Enabled: 9249 return CM_ScalarEpilogueNotNeededUsePredicate; 9250 case LoopVectorizeHints::FK_Disabled: 9251 return CM_ScalarEpilogueAllowed; 9252 }; 9253 9254 // 4) if the TTI hook indicates this is profitable, request predication. 9255 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 9256 LVL.getLAI())) 9257 return CM_ScalarEpilogueNotNeededUsePredicate; 9258 9259 return CM_ScalarEpilogueAllowed; 9260 } 9261 9262 void VPTransformState::set(VPValue *Def, Value *IRDef, Value *V, 9263 const VPIteration &Instance) { 9264 set(Def, V, Instance); 9265 ILV->setScalarValue(IRDef, Instance, V); 9266 } 9267 9268 void VPTransformState::set(VPValue *Def, Value *IRDef, Value *V, 9269 unsigned Part) { 9270 set(Def, V, Part); 9271 ILV->setVectorValue(IRDef, Part, V); 9272 } 9273 9274 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 9275 // If Values have been set for this Def return the one relevant for \p Part. 9276 if (hasVectorValue(Def, Part)) 9277 return Data.PerPartOutput[Def][Part]; 9278 9279 // TODO: Remove the callback once all scalar recipes are managed using 9280 // VPValues. 9281 if (!hasScalarValue(Def, {Part, 0})) 9282 return Callback.getOrCreateVectorValues(VPValue2Value[Def], Part); 9283 9284 Value *ScalarValue = get(Def, {Part, 0}); 9285 // If we aren't vectorizing, we can just copy the scalar map values over 9286 // to the vector map. 9287 if (VF.isScalar()) { 9288 set(Def, ScalarValue, Part); 9289 return ScalarValue; 9290 } 9291 9292 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 9293 bool IsUniform = RepR && RepR->isUniform(); 9294 9295 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 9296 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 9297 9298 // Set the insert point after the last scalarized instruction. This 9299 // ensures the insertelement sequence will directly follow the scalar 9300 // definitions. 9301 auto OldIP = Builder.saveIP(); 9302 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 9303 Builder.SetInsertPoint(&*NewIP); 9304 9305 // However, if we are vectorizing, we need to construct the vector values. 9306 // If the value is known to be uniform after vectorization, we can just 9307 // broadcast the scalar value corresponding to lane zero for each unroll 9308 // iteration. Otherwise, we construct the vector values using 9309 // insertelement instructions. Since the resulting vectors are stored in 9310 // VectorLoopValueMap, we will only generate the insertelements once. 9311 Value *VectorValue = nullptr; 9312 if (IsUniform) { 9313 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 9314 set(Def, VectorValue, Part); 9315 } else { 9316 // Initialize packing with insertelements to start from undef. 9317 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 9318 Value *Undef = UndefValue::get(VectorType::get(LastInst->getType(), VF)); 9319 set(Def, Undef, Part); 9320 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 9321 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 9322 VectorValue = get(Def, Part); 9323 } 9324 Builder.restoreIP(OldIP); 9325 return VectorValue; 9326 } 9327 9328 // Process the loop in the VPlan-native vectorization path. This path builds 9329 // VPlan upfront in the vectorization pipeline, which allows to apply 9330 // VPlan-to-VPlan transformations from the very beginning without modifying the 9331 // input LLVM IR. 9332 static bool processLoopInVPlanNativePath( 9333 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 9334 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 9335 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 9336 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 9337 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) { 9338 9339 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 9340 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 9341 return false; 9342 } 9343 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 9344 Function *F = L->getHeader()->getParent(); 9345 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 9346 9347 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9348 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 9349 9350 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 9351 &Hints, IAI); 9352 // Use the planner for outer loop vectorization. 9353 // TODO: CM is not used at this point inside the planner. Turn CM into an 9354 // optional argument if we don't need it in the future. 9355 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE); 9356 9357 // Get user vectorization factor. 9358 ElementCount UserVF = Hints.getWidth(); 9359 9360 // Plan how to best vectorize, return the best VF and its cost. 9361 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 9362 9363 // If we are stress testing VPlan builds, do not attempt to generate vector 9364 // code. Masked vector code generation support will follow soon. 9365 // Also, do not attempt to vectorize if no vector code will be produced. 9366 if (VPlanBuildStressTest || EnableVPlanPredication || 9367 VectorizationFactor::Disabled() == VF) 9368 return false; 9369 9370 LVP.setBestPlan(VF.Width, 1); 9371 9372 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 9373 &CM, BFI, PSI); 9374 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 9375 << L->getHeader()->getParent()->getName() << "\"\n"); 9376 LVP.executePlan(LB, DT); 9377 9378 // Mark the loop as already vectorized to avoid vectorizing again. 9379 Hints.setAlreadyVectorized(); 9380 9381 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9382 return true; 9383 } 9384 9385 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 9386 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 9387 !EnableLoopInterleaving), 9388 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 9389 !EnableLoopVectorization) {} 9390 9391 bool LoopVectorizePass::processLoop(Loop *L) { 9392 assert((EnableVPlanNativePath || L->isInnermost()) && 9393 "VPlan-native path is not enabled. Only process inner loops."); 9394 9395 #ifndef NDEBUG 9396 const std::string DebugLocStr = getDebugLocString(L); 9397 #endif /* NDEBUG */ 9398 9399 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 9400 << L->getHeader()->getParent()->getName() << "\" from " 9401 << DebugLocStr << "\n"); 9402 9403 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 9404 9405 LLVM_DEBUG( 9406 dbgs() << "LV: Loop hints:" 9407 << " force=" 9408 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 9409 ? "disabled" 9410 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 9411 ? "enabled" 9412 : "?")) 9413 << " width=" << Hints.getWidth() 9414 << " unroll=" << Hints.getInterleave() << "\n"); 9415 9416 // Function containing loop 9417 Function *F = L->getHeader()->getParent(); 9418 9419 // Looking at the diagnostic output is the only way to determine if a loop 9420 // was vectorized (other than looking at the IR or machine code), so it 9421 // is important to generate an optimization remark for each loop. Most of 9422 // these messages are generated as OptimizationRemarkAnalysis. Remarks 9423 // generated as OptimizationRemark and OptimizationRemarkMissed are 9424 // less verbose reporting vectorized loops and unvectorized loops that may 9425 // benefit from vectorization, respectively. 9426 9427 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 9428 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 9429 return false; 9430 } 9431 9432 PredicatedScalarEvolution PSE(*SE, *L); 9433 9434 // Check if it is legal to vectorize the loop. 9435 LoopVectorizationRequirements Requirements(*ORE); 9436 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 9437 &Requirements, &Hints, DB, AC, BFI, PSI); 9438 if (!LVL.canVectorize(EnableVPlanNativePath)) { 9439 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 9440 Hints.emitRemarkWithHints(); 9441 return false; 9442 } 9443 9444 // Check the function attributes and profiles to find out if this function 9445 // should be optimized for size. 9446 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9447 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 9448 9449 // Entrance to the VPlan-native vectorization path. Outer loops are processed 9450 // here. They may require CFG and instruction level transformations before 9451 // even evaluating whether vectorization is profitable. Since we cannot modify 9452 // the incoming IR, we need to build VPlan upfront in the vectorization 9453 // pipeline. 9454 if (!L->isInnermost()) 9455 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 9456 ORE, BFI, PSI, Hints); 9457 9458 assert(L->isInnermost() && "Inner loop expected."); 9459 9460 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 9461 // count by optimizing for size, to minimize overheads. 9462 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 9463 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 9464 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 9465 << "This loop is worth vectorizing only if no scalar " 9466 << "iteration overheads are incurred."); 9467 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 9468 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 9469 else { 9470 LLVM_DEBUG(dbgs() << "\n"); 9471 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 9472 } 9473 } 9474 9475 // Check the function attributes to see if implicit floats are allowed. 9476 // FIXME: This check doesn't seem possibly correct -- what if the loop is 9477 // an integer loop and the vector instructions selected are purely integer 9478 // vector instructions? 9479 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 9480 reportVectorizationFailure( 9481 "Can't vectorize when the NoImplicitFloat attribute is used", 9482 "loop not vectorized due to NoImplicitFloat attribute", 9483 "NoImplicitFloat", ORE, L); 9484 Hints.emitRemarkWithHints(); 9485 return false; 9486 } 9487 9488 // Check if the target supports potentially unsafe FP vectorization. 9489 // FIXME: Add a check for the type of safety issue (denormal, signaling) 9490 // for the target we're vectorizing for, to make sure none of the 9491 // additional fp-math flags can help. 9492 if (Hints.isPotentiallyUnsafe() && 9493 TTI->isFPVectorizationPotentiallyUnsafe()) { 9494 reportVectorizationFailure( 9495 "Potentially unsafe FP op prevents vectorization", 9496 "loop not vectorized due to unsafe FP support.", 9497 "UnsafeFP", ORE, L); 9498 Hints.emitRemarkWithHints(); 9499 return false; 9500 } 9501 9502 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 9503 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 9504 9505 // If an override option has been passed in for interleaved accesses, use it. 9506 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 9507 UseInterleaved = EnableInterleavedMemAccesses; 9508 9509 // Analyze interleaved memory accesses. 9510 if (UseInterleaved) { 9511 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 9512 } 9513 9514 // Use the cost model. 9515 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 9516 F, &Hints, IAI); 9517 CM.collectValuesToIgnore(); 9518 9519 // Use the planner for vectorization. 9520 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE); 9521 9522 // Get user vectorization factor and interleave count. 9523 ElementCount UserVF = Hints.getWidth(); 9524 unsigned UserIC = Hints.getInterleave(); 9525 9526 // Plan how to best vectorize, return the best VF and its cost. 9527 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 9528 9529 VectorizationFactor VF = VectorizationFactor::Disabled(); 9530 unsigned IC = 1; 9531 9532 if (MaybeVF) { 9533 VF = *MaybeVF; 9534 // Select the interleave count. 9535 IC = CM.selectInterleaveCount(VF.Width, VF.Cost); 9536 } 9537 9538 // Identify the diagnostic messages that should be produced. 9539 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 9540 bool VectorizeLoop = true, InterleaveLoop = true; 9541 if (Requirements.doesNotMeet(F, L, Hints)) { 9542 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 9543 "requirements.\n"); 9544 Hints.emitRemarkWithHints(); 9545 return false; 9546 } 9547 9548 if (VF.Width.isScalar()) { 9549 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 9550 VecDiagMsg = std::make_pair( 9551 "VectorizationNotBeneficial", 9552 "the cost-model indicates that vectorization is not beneficial"); 9553 VectorizeLoop = false; 9554 } 9555 9556 if (!MaybeVF && UserIC > 1) { 9557 // Tell the user interleaving was avoided up-front, despite being explicitly 9558 // requested. 9559 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 9560 "interleaving should be avoided up front\n"); 9561 IntDiagMsg = std::make_pair( 9562 "InterleavingAvoided", 9563 "Ignoring UserIC, because interleaving was avoided up front"); 9564 InterleaveLoop = false; 9565 } else if (IC == 1 && UserIC <= 1) { 9566 // Tell the user interleaving is not beneficial. 9567 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 9568 IntDiagMsg = std::make_pair( 9569 "InterleavingNotBeneficial", 9570 "the cost-model indicates that interleaving is not beneficial"); 9571 InterleaveLoop = false; 9572 if (UserIC == 1) { 9573 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 9574 IntDiagMsg.second += 9575 " and is explicitly disabled or interleave count is set to 1"; 9576 } 9577 } else if (IC > 1 && UserIC == 1) { 9578 // Tell the user interleaving is beneficial, but it explicitly disabled. 9579 LLVM_DEBUG( 9580 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 9581 IntDiagMsg = std::make_pair( 9582 "InterleavingBeneficialButDisabled", 9583 "the cost-model indicates that interleaving is beneficial " 9584 "but is explicitly disabled or interleave count is set to 1"); 9585 InterleaveLoop = false; 9586 } 9587 9588 // Override IC if user provided an interleave count. 9589 IC = UserIC > 0 ? UserIC : IC; 9590 9591 // Emit diagnostic messages, if any. 9592 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 9593 if (!VectorizeLoop && !InterleaveLoop) { 9594 // Do not vectorize or interleaving the loop. 9595 ORE->emit([&]() { 9596 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 9597 L->getStartLoc(), L->getHeader()) 9598 << VecDiagMsg.second; 9599 }); 9600 ORE->emit([&]() { 9601 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 9602 L->getStartLoc(), L->getHeader()) 9603 << IntDiagMsg.second; 9604 }); 9605 return false; 9606 } else if (!VectorizeLoop && InterleaveLoop) { 9607 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 9608 ORE->emit([&]() { 9609 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 9610 L->getStartLoc(), L->getHeader()) 9611 << VecDiagMsg.second; 9612 }); 9613 } else if (VectorizeLoop && !InterleaveLoop) { 9614 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 9615 << ") in " << DebugLocStr << '\n'); 9616 ORE->emit([&]() { 9617 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 9618 L->getStartLoc(), L->getHeader()) 9619 << IntDiagMsg.second; 9620 }); 9621 } else if (VectorizeLoop && InterleaveLoop) { 9622 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 9623 << ") in " << DebugLocStr << '\n'); 9624 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 9625 } 9626 9627 LVP.setBestPlan(VF.Width, IC); 9628 9629 using namespace ore; 9630 bool DisableRuntimeUnroll = false; 9631 MDNode *OrigLoopID = L->getLoopID(); 9632 9633 if (!VectorizeLoop) { 9634 assert(IC > 1 && "interleave count should not be 1 or 0"); 9635 // If we decided that it is not legal to vectorize the loop, then 9636 // interleave it. 9637 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, &CM, 9638 BFI, PSI); 9639 LVP.executePlan(Unroller, DT); 9640 9641 ORE->emit([&]() { 9642 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 9643 L->getHeader()) 9644 << "interleaved loop (interleaved count: " 9645 << NV("InterleaveCount", IC) << ")"; 9646 }); 9647 } else { 9648 // If we decided that it is *legal* to vectorize the loop, then do it. 9649 9650 // Consider vectorizing the epilogue too if it's profitable. 9651 VectorizationFactor EpilogueVF = 9652 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 9653 if (EpilogueVF.Width.isVector()) { 9654 9655 // The first pass vectorizes the main loop and creates a scalar epilogue 9656 // to be vectorized by executing the plan (potentially with a different 9657 // factor) again shortly afterwards. 9658 EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC, 9659 EpilogueVF.Width.getKnownMinValue(), 1); 9660 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, EPI, 9661 &LVL, &CM, BFI, PSI); 9662 9663 LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF); 9664 LVP.executePlan(MainILV, DT); 9665 ++LoopsVectorized; 9666 9667 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 9668 formLCSSARecursively(*L, *DT, LI, SE); 9669 9670 // Second pass vectorizes the epilogue and adjusts the control flow 9671 // edges from the first pass. 9672 LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF); 9673 EPI.MainLoopVF = EPI.EpilogueVF; 9674 EPI.MainLoopUF = EPI.EpilogueUF; 9675 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 9676 ORE, EPI, &LVL, &CM, BFI, PSI); 9677 LVP.executePlan(EpilogILV, DT); 9678 ++LoopsEpilogueVectorized; 9679 9680 if (!MainILV.areSafetyChecksAdded()) 9681 DisableRuntimeUnroll = true; 9682 } else { 9683 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 9684 &LVL, &CM, BFI, PSI); 9685 LVP.executePlan(LB, DT); 9686 ++LoopsVectorized; 9687 9688 // Add metadata to disable runtime unrolling a scalar loop when there are 9689 // no runtime checks about strides and memory. A scalar loop that is 9690 // rarely used is not worth unrolling. 9691 if (!LB.areSafetyChecksAdded()) 9692 DisableRuntimeUnroll = true; 9693 } 9694 9695 // Report the vectorization decision. 9696 ORE->emit([&]() { 9697 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 9698 L->getHeader()) 9699 << "vectorized loop (vectorization width: " 9700 << NV("VectorizationFactor", VF.Width) 9701 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 9702 }); 9703 } 9704 9705 Optional<MDNode *> RemainderLoopID = 9706 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 9707 LLVMLoopVectorizeFollowupEpilogue}); 9708 if (RemainderLoopID.hasValue()) { 9709 L->setLoopID(RemainderLoopID.getValue()); 9710 } else { 9711 if (DisableRuntimeUnroll) 9712 AddRuntimeUnrollDisableMetaData(L); 9713 9714 // Mark the loop as already vectorized to avoid vectorizing again. 9715 Hints.setAlreadyVectorized(); 9716 } 9717 9718 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9719 return true; 9720 } 9721 9722 LoopVectorizeResult LoopVectorizePass::runImpl( 9723 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 9724 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 9725 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 9726 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 9727 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 9728 SE = &SE_; 9729 LI = &LI_; 9730 TTI = &TTI_; 9731 DT = &DT_; 9732 BFI = &BFI_; 9733 TLI = TLI_; 9734 AA = &AA_; 9735 AC = &AC_; 9736 GetLAA = &GetLAA_; 9737 DB = &DB_; 9738 ORE = &ORE_; 9739 PSI = PSI_; 9740 9741 // Don't attempt if 9742 // 1. the target claims to have no vector registers, and 9743 // 2. interleaving won't help ILP. 9744 // 9745 // The second condition is necessary because, even if the target has no 9746 // vector registers, loop vectorization may still enable scalar 9747 // interleaving. 9748 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 9749 TTI->getMaxInterleaveFactor(1) < 2) 9750 return LoopVectorizeResult(false, false); 9751 9752 bool Changed = false, CFGChanged = false; 9753 9754 // The vectorizer requires loops to be in simplified form. 9755 // Since simplification may add new inner loops, it has to run before the 9756 // legality and profitability checks. This means running the loop vectorizer 9757 // will simplify all loops, regardless of whether anything end up being 9758 // vectorized. 9759 for (auto &L : *LI) 9760 Changed |= CFGChanged |= 9761 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 9762 9763 // Build up a worklist of inner-loops to vectorize. This is necessary as 9764 // the act of vectorizing or partially unrolling a loop creates new loops 9765 // and can invalidate iterators across the loops. 9766 SmallVector<Loop *, 8> Worklist; 9767 9768 for (Loop *L : *LI) 9769 collectSupportedLoops(*L, LI, ORE, Worklist); 9770 9771 LoopsAnalyzed += Worklist.size(); 9772 9773 // Now walk the identified inner loops. 9774 while (!Worklist.empty()) { 9775 Loop *L = Worklist.pop_back_val(); 9776 9777 // For the inner loops we actually process, form LCSSA to simplify the 9778 // transform. 9779 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 9780 9781 Changed |= CFGChanged |= processLoop(L); 9782 } 9783 9784 // Process each loop nest in the function. 9785 return LoopVectorizeResult(Changed, CFGChanged); 9786 } 9787 9788 PreservedAnalyses LoopVectorizePass::run(Function &F, 9789 FunctionAnalysisManager &AM) { 9790 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 9791 auto &LI = AM.getResult<LoopAnalysis>(F); 9792 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 9793 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 9794 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 9795 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 9796 auto &AA = AM.getResult<AAManager>(F); 9797 auto &AC = AM.getResult<AssumptionAnalysis>(F); 9798 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 9799 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 9800 MemorySSA *MSSA = EnableMSSALoopDependency 9801 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 9802 : nullptr; 9803 9804 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 9805 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 9806 [&](Loop &L) -> const LoopAccessInfo & { 9807 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 9808 TLI, TTI, nullptr, MSSA}; 9809 return LAM.getResult<LoopAccessAnalysis>(L, AR); 9810 }; 9811 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 9812 ProfileSummaryInfo *PSI = 9813 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 9814 LoopVectorizeResult Result = 9815 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 9816 if (!Result.MadeAnyChange) 9817 return PreservedAnalyses::all(); 9818 PreservedAnalyses PA; 9819 9820 // We currently do not preserve loopinfo/dominator analyses with outer loop 9821 // vectorization. Until this is addressed, mark these analyses as preserved 9822 // only for non-VPlan-native path. 9823 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 9824 if (!EnableVPlanNativePath) { 9825 PA.preserve<LoopAnalysis>(); 9826 PA.preserve<DominatorTreeAnalysis>(); 9827 } 9828 PA.preserve<BasicAA>(); 9829 PA.preserve<GlobalsAA>(); 9830 if (!Result.MadeCFGChange) 9831 PA.preserveSet<CFGAnalyses>(); 9832 return PA; 9833 } 9834