1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/MemorySSA.h" 91 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 92 #include "llvm/Analysis/ProfileSummaryInfo.h" 93 #include "llvm/Analysis/ScalarEvolution.h" 94 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 95 #include "llvm/Analysis/TargetLibraryInfo.h" 96 #include "llvm/Analysis/TargetTransformInfo.h" 97 #include "llvm/Analysis/VectorUtils.h" 98 #include "llvm/IR/Attributes.h" 99 #include "llvm/IR/BasicBlock.h" 100 #include "llvm/IR/CFG.h" 101 #include "llvm/IR/Constant.h" 102 #include "llvm/IR/Constants.h" 103 #include "llvm/IR/DataLayout.h" 104 #include "llvm/IR/DebugInfoMetadata.h" 105 #include "llvm/IR/DebugLoc.h" 106 #include "llvm/IR/DerivedTypes.h" 107 #include "llvm/IR/DiagnosticInfo.h" 108 #include "llvm/IR/Dominators.h" 109 #include "llvm/IR/Function.h" 110 #include "llvm/IR/IRBuilder.h" 111 #include "llvm/IR/InstrTypes.h" 112 #include "llvm/IR/Instruction.h" 113 #include "llvm/IR/Instructions.h" 114 #include "llvm/IR/IntrinsicInst.h" 115 #include "llvm/IR/Intrinsics.h" 116 #include "llvm/IR/LLVMContext.h" 117 #include "llvm/IR/Metadata.h" 118 #include "llvm/IR/Module.h" 119 #include "llvm/IR/Operator.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 202 // that predication is preferred, and this lists all options. I.e., the 203 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 204 // and predicate the instructions accordingly. If tail-folding fails, there are 205 // different fallback strategies depending on these values: 206 namespace PreferPredicateTy { 207 enum Option { 208 ScalarEpilogue = 0, 209 PredicateElseScalarEpilogue, 210 PredicateOrDontVectorize 211 }; 212 } // namespace PreferPredicateTy 213 214 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 215 "prefer-predicate-over-epilogue", 216 cl::init(PreferPredicateTy::ScalarEpilogue), 217 cl::Hidden, 218 cl::desc("Tail-folding and predication preferences over creating a scalar " 219 "epilogue loop."), 220 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 221 "scalar-epilogue", 222 "Don't tail-predicate loops, create scalar epilogue"), 223 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 224 "predicate-else-scalar-epilogue", 225 "prefer tail-folding, create scalar epilogue if tail " 226 "folding fails."), 227 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 228 "predicate-dont-vectorize", 229 "prefers tail-folding, don't attempt vectorization if " 230 "tail-folding fails."))); 231 232 static cl::opt<bool> MaximizeBandwidth( 233 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 234 cl::desc("Maximize bandwidth when selecting vectorization factor which " 235 "will be determined by the smallest type in loop.")); 236 237 static cl::opt<bool> EnableInterleavedMemAccesses( 238 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 239 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 240 241 /// An interleave-group may need masking if it resides in a block that needs 242 /// predication, or in order to mask away gaps. 243 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 244 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 245 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 246 247 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 248 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 249 cl::desc("We don't interleave loops with a estimated constant trip count " 250 "below this number")); 251 252 static cl::opt<unsigned> ForceTargetNumScalarRegs( 253 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 254 cl::desc("A flag that overrides the target's number of scalar registers.")); 255 256 static cl::opt<unsigned> ForceTargetNumVectorRegs( 257 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 258 cl::desc("A flag that overrides the target's number of vector registers.")); 259 260 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 261 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 262 cl::desc("A flag that overrides the target's max interleave factor for " 263 "scalar loops.")); 264 265 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 266 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "vectorized loops.")); 269 270 static cl::opt<unsigned> ForceTargetInstructionCost( 271 "force-target-instruction-cost", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's expected cost for " 273 "an instruction to a single constant value. Mostly " 274 "useful for getting consistent testing.")); 275 276 static cl::opt<bool> ForceTargetSupportsScalableVectors( 277 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 278 cl::desc( 279 "Pretend that scalable vectors are supported, even if the target does " 280 "not support them. This flag should only be used for testing.")); 281 282 static cl::opt<unsigned> SmallLoopCost( 283 "small-loop-cost", cl::init(20), cl::Hidden, 284 cl::desc( 285 "The cost of a loop that is considered 'small' by the interleaver.")); 286 287 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 288 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 289 cl::desc("Enable the use of the block frequency analysis to access PGO " 290 "heuristics minimizing code growth in cold regions and being more " 291 "aggressive in hot regions.")); 292 293 // Runtime interleave loops for load/store throughput. 294 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 295 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 296 cl::desc( 297 "Enable runtime interleaving until load/store ports are saturated")); 298 299 /// Interleave small loops with scalar reductions. 300 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 301 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 302 cl::desc("Enable interleaving for loops with small iteration counts that " 303 "contain scalar reductions to expose ILP.")); 304 305 /// The number of stores in a loop that are allowed to need predication. 306 static cl::opt<unsigned> NumberOfStoresToPredicate( 307 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 308 cl::desc("Max number of stores to be predicated behind an if.")); 309 310 static cl::opt<bool> EnableIndVarRegisterHeur( 311 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 312 cl::desc("Count the induction variable only once when interleaving")); 313 314 static cl::opt<bool> EnableCondStoresVectorization( 315 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 316 cl::desc("Enable if predication of stores during vectorization.")); 317 318 static cl::opt<unsigned> MaxNestedScalarReductionIC( 319 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 320 cl::desc("The maximum interleave count to use when interleaving a scalar " 321 "reduction in a nested loop.")); 322 323 static cl::opt<bool> 324 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 325 cl::Hidden, 326 cl::desc("Prefer in-loop vector reductions, " 327 "overriding the targets preference.")); 328 329 static cl::opt<bool> PreferPredicatedReductionSelect( 330 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 331 cl::desc( 332 "Prefer predicating a reduction operation over an after loop select.")); 333 334 cl::opt<bool> EnableVPlanNativePath( 335 "enable-vplan-native-path", cl::init(false), cl::Hidden, 336 cl::desc("Enable VPlan-native vectorization path with " 337 "support for outer loop vectorization.")); 338 339 // FIXME: Remove this switch once we have divergence analysis. Currently we 340 // assume divergent non-backedge branches when this switch is true. 341 cl::opt<bool> EnableVPlanPredication( 342 "enable-vplan-predication", cl::init(false), cl::Hidden, 343 cl::desc("Enable VPlan-native vectorization path predicator with " 344 "support for outer loop vectorization.")); 345 346 // This flag enables the stress testing of the VPlan H-CFG construction in the 347 // VPlan-native vectorization path. It must be used in conjuction with 348 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 349 // verification of the H-CFGs built. 350 static cl::opt<bool> VPlanBuildStressTest( 351 "vplan-build-stress-test", cl::init(false), cl::Hidden, 352 cl::desc( 353 "Build VPlan for every supported loop nest in the function and bail " 354 "out right after the build (stress test the VPlan H-CFG construction " 355 "in the VPlan-native vectorization path).")); 356 357 cl::opt<bool> llvm::EnableLoopInterleaving( 358 "interleave-loops", cl::init(true), cl::Hidden, 359 cl::desc("Enable loop interleaving in Loop vectorization passes")); 360 cl::opt<bool> llvm::EnableLoopVectorization( 361 "vectorize-loops", cl::init(true), cl::Hidden, 362 cl::desc("Run the Loop vectorization passes")); 363 364 /// A helper function that returns the type of loaded or stored value. 365 static Type *getMemInstValueType(Value *I) { 366 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 367 "Expected Load or Store instruction"); 368 if (auto *LI = dyn_cast<LoadInst>(I)) 369 return LI->getType(); 370 return cast<StoreInst>(I)->getValueOperand()->getType(); 371 } 372 373 /// A helper function that returns true if the given type is irregular. The 374 /// type is irregular if its allocated size doesn't equal the store size of an 375 /// element of the corresponding vector type at the given vectorization factor. 376 static bool hasIrregularType(Type *Ty, const DataLayout &DL, ElementCount VF) { 377 // Determine if an array of VF elements of type Ty is "bitcast compatible" 378 // with a <VF x Ty> vector. 379 if (VF.isVector()) { 380 auto *VectorTy = VectorType::get(Ty, VF); 381 return TypeSize::get(VF.getKnownMinValue() * 382 DL.getTypeAllocSize(Ty).getFixedValue(), 383 VF.isScalable()) != DL.getTypeStoreSize(VectorTy); 384 } 385 386 // If the vectorization factor is one, we just check if an array of type Ty 387 // requires padding between elements. 388 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 389 } 390 391 /// A helper function that returns the reciprocal of the block probability of 392 /// predicated blocks. If we return X, we are assuming the predicated block 393 /// will execute once for every X iterations of the loop header. 394 /// 395 /// TODO: We should use actual block probability here, if available. Currently, 396 /// we always assume predicated blocks have a 50% chance of executing. 397 static unsigned getReciprocalPredBlockProb() { return 2; } 398 399 /// A helper function that adds a 'fast' flag to floating-point operations. 400 static Value *addFastMathFlag(Value *V) { 401 if (isa<FPMathOperator>(V)) 402 cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast()); 403 return V; 404 } 405 406 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) { 407 if (isa<FPMathOperator>(V)) 408 cast<Instruction>(V)->setFastMathFlags(FMF); 409 return V; 410 } 411 412 /// A helper function that returns an integer or floating-point constant with 413 /// value C. 414 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 415 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 416 : ConstantFP::get(Ty, C); 417 } 418 419 /// Returns "best known" trip count for the specified loop \p L as defined by 420 /// the following procedure: 421 /// 1) Returns exact trip count if it is known. 422 /// 2) Returns expected trip count according to profile data if any. 423 /// 3) Returns upper bound estimate if it is known. 424 /// 4) Returns None if all of the above failed. 425 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 426 // Check if exact trip count is known. 427 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 428 return ExpectedTC; 429 430 // Check if there is an expected trip count available from profile data. 431 if (LoopVectorizeWithBlockFrequency) 432 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 433 return EstimatedTC; 434 435 // Check if upper bound estimate is known. 436 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 437 return ExpectedTC; 438 439 return None; 440 } 441 442 namespace llvm { 443 444 /// InnerLoopVectorizer vectorizes loops which contain only one basic 445 /// block to a specified vectorization factor (VF). 446 /// This class performs the widening of scalars into vectors, or multiple 447 /// scalars. This class also implements the following features: 448 /// * It inserts an epilogue loop for handling loops that don't have iteration 449 /// counts that are known to be a multiple of the vectorization factor. 450 /// * It handles the code generation for reduction variables. 451 /// * Scalarization (implementation using scalars) of un-vectorizable 452 /// instructions. 453 /// InnerLoopVectorizer does not perform any vectorization-legality 454 /// checks, and relies on the caller to check for the different legality 455 /// aspects. The InnerLoopVectorizer relies on the 456 /// LoopVectorizationLegality class to provide information about the induction 457 /// and reduction variables that were found to a given vectorization factor. 458 class InnerLoopVectorizer { 459 public: 460 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 461 LoopInfo *LI, DominatorTree *DT, 462 const TargetLibraryInfo *TLI, 463 const TargetTransformInfo *TTI, AssumptionCache *AC, 464 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 465 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 466 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 467 ProfileSummaryInfo *PSI) 468 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 469 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 470 Builder(PSE.getSE()->getContext()), 471 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM), 472 BFI(BFI), PSI(PSI) { 473 // Query this against the original loop and save it here because the profile 474 // of the original loop header may change as the transformation happens. 475 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 476 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 477 } 478 479 virtual ~InnerLoopVectorizer() = default; 480 481 /// Create a new empty loop that will contain vectorized instructions later 482 /// on, while the old loop will be used as the scalar remainder. Control flow 483 /// is generated around the vectorized (and scalar epilogue) loops consisting 484 /// of various checks and bypasses. Return the pre-header block of the new 485 /// loop. 486 /// In the case of epilogue vectorization, this function is overriden to 487 /// handle the more complex control flow around the loops. 488 virtual BasicBlock *createVectorizedLoopSkeleton(); 489 490 /// Widen a single instruction within the innermost loop. 491 void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands, 492 VPTransformState &State); 493 494 /// Widen a single call instruction within the innermost loop. 495 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 496 VPTransformState &State); 497 498 /// Widen a single select instruction within the innermost loop. 499 void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, 500 bool InvariantCond, VPTransformState &State); 501 502 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 503 void fixVectorizedLoop(); 504 505 // Return true if any runtime check is added. 506 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 507 508 /// A type for vectorized values in the new loop. Each value from the 509 /// original loop, when vectorized, is represented by UF vector values in the 510 /// new unrolled loop, where UF is the unroll factor. 511 using VectorParts = SmallVector<Value *, 2>; 512 513 /// Vectorize a single GetElementPtrInst based on information gathered and 514 /// decisions taken during planning. 515 void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices, 516 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant, 517 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); 518 519 /// Vectorize a single PHINode in a block. This method handles the induction 520 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 521 /// arbitrary length vectors. 522 void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc, 523 Value *StartV, unsigned UF, ElementCount VF); 524 525 /// A helper function to scalarize a single Instruction in the innermost loop. 526 /// Generates a sequence of scalar instances for each lane between \p MinLane 527 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 528 /// inclusive. Uses the VPValue operands from \p Operands instead of \p 529 /// Instr's operands. 530 void scalarizeInstruction(Instruction *Instr, VPUser &Operands, 531 const VPIteration &Instance, bool IfPredicateInstr, 532 VPTransformState &State); 533 534 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 535 /// is provided, the integer induction variable will first be truncated to 536 /// the corresponding type. 537 void widenIntOrFpInduction(PHINode *IV, Value *Start, 538 TruncInst *Trunc = nullptr); 539 540 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 541 /// vector or scalar value on-demand if one is not yet available. When 542 /// vectorizing a loop, we visit the definition of an instruction before its 543 /// uses. When visiting the definition, we either vectorize or scalarize the 544 /// instruction, creating an entry for it in the corresponding map. (In some 545 /// cases, such as induction variables, we will create both vector and scalar 546 /// entries.) Then, as we encounter uses of the definition, we derive values 547 /// for each scalar or vector use unless such a value is already available. 548 /// For example, if we scalarize a definition and one of its uses is vector, 549 /// we build the required vector on-demand with an insertelement sequence 550 /// when visiting the use. Otherwise, if the use is scalar, we can use the 551 /// existing scalar definition. 552 /// 553 /// Return a value in the new loop corresponding to \p V from the original 554 /// loop at unroll index \p Part. If the value has already been vectorized, 555 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 556 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 557 /// a new vector value on-demand by inserting the scalar values into a vector 558 /// with an insertelement sequence. If the value has been neither vectorized 559 /// nor scalarized, it must be loop invariant, so we simply broadcast the 560 /// value into a vector. 561 Value *getOrCreateVectorValue(Value *V, unsigned Part); 562 563 void setVectorValue(Value *Scalar, unsigned Part, Value *Vector) { 564 VectorLoopValueMap.setVectorValue(Scalar, Part, Vector); 565 } 566 567 /// Return a value in the new loop corresponding to \p V from the original 568 /// loop at unroll and vector indices \p Instance. If the value has been 569 /// vectorized but not scalarized, the necessary extractelement instruction 570 /// will be generated. 571 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 572 573 /// Construct the vector value of a scalarized value \p V one lane at a time. 574 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 575 576 /// Try to vectorize interleaved access group \p Group with the base address 577 /// given in \p Addr, optionally masking the vector operations if \p 578 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 579 /// values in the vectorized loop. 580 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 581 ArrayRef<VPValue *> VPDefs, 582 VPTransformState &State, VPValue *Addr, 583 ArrayRef<VPValue *> StoredValues, 584 VPValue *BlockInMask = nullptr); 585 586 /// Vectorize Load and Store instructions with the base address given in \p 587 /// Addr, optionally masking the vector operations if \p BlockInMask is 588 /// non-null. Use \p State to translate given VPValues to IR values in the 589 /// vectorized loop. 590 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 591 VPValue *Def, VPValue *Addr, 592 VPValue *StoredValue, VPValue *BlockInMask); 593 594 /// Set the debug location in the builder using the debug location in 595 /// the instruction. 596 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 597 598 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 599 void fixNonInductionPHIs(void); 600 601 protected: 602 friend class LoopVectorizationPlanner; 603 604 /// A small list of PHINodes. 605 using PhiVector = SmallVector<PHINode *, 4>; 606 607 /// A type for scalarized values in the new loop. Each value from the 608 /// original loop, when scalarized, is represented by UF x VF scalar values 609 /// in the new unrolled loop, where UF is the unroll factor and VF is the 610 /// vectorization factor. 611 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 612 613 /// Set up the values of the IVs correctly when exiting the vector loop. 614 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 615 Value *CountRoundDown, Value *EndValue, 616 BasicBlock *MiddleBlock); 617 618 /// Create a new induction variable inside L. 619 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 620 Value *Step, Instruction *DL); 621 622 /// Handle all cross-iteration phis in the header. 623 void fixCrossIterationPHIs(); 624 625 /// Fix a first-order recurrence. This is the second phase of vectorizing 626 /// this phi node. 627 void fixFirstOrderRecurrence(PHINode *Phi); 628 629 /// Fix a reduction cross-iteration phi. This is the second phase of 630 /// vectorizing this phi node. 631 void fixReduction(PHINode *Phi); 632 633 /// Clear NSW/NUW flags from reduction instructions if necessary. 634 void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc); 635 636 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 637 /// means we need to add the appropriate incoming value from the middle 638 /// block as exiting edges from the scalar epilogue loop (if present) are 639 /// already in place, and we exit the vector loop exclusively to the middle 640 /// block. 641 void fixLCSSAPHIs(); 642 643 /// Iteratively sink the scalarized operands of a predicated instruction into 644 /// the block that was created for it. 645 void sinkScalarOperands(Instruction *PredInst); 646 647 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 648 /// represented as. 649 void truncateToMinimalBitwidths(); 650 651 /// Create a broadcast instruction. This method generates a broadcast 652 /// instruction (shuffle) for loop invariant values and for the induction 653 /// value. If this is the induction variable then we extend it to N, N+1, ... 654 /// this is needed because each iteration in the loop corresponds to a SIMD 655 /// element. 656 virtual Value *getBroadcastInstrs(Value *V); 657 658 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 659 /// to each vector element of Val. The sequence starts at StartIndex. 660 /// \p Opcode is relevant for FP induction variable. 661 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 662 Instruction::BinaryOps Opcode = 663 Instruction::BinaryOpsEnd); 664 665 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 666 /// variable on which to base the steps, \p Step is the size of the step, and 667 /// \p EntryVal is the value from the original loop that maps to the steps. 668 /// Note that \p EntryVal doesn't have to be an induction variable - it 669 /// can also be a truncate instruction. 670 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 671 const InductionDescriptor &ID); 672 673 /// Create a vector induction phi node based on an existing scalar one. \p 674 /// EntryVal is the value from the original loop that maps to the vector phi 675 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 676 /// truncate instruction, instead of widening the original IV, we widen a 677 /// version of the IV truncated to \p EntryVal's type. 678 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 679 Value *Step, Value *Start, 680 Instruction *EntryVal); 681 682 /// Returns true if an instruction \p I should be scalarized instead of 683 /// vectorized for the chosen vectorization factor. 684 bool shouldScalarizeInstruction(Instruction *I) const; 685 686 /// Returns true if we should generate a scalar version of \p IV. 687 bool needsScalarInduction(Instruction *IV) const; 688 689 /// If there is a cast involved in the induction variable \p ID, which should 690 /// be ignored in the vectorized loop body, this function records the 691 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 692 /// cast. We had already proved that the casted Phi is equal to the uncasted 693 /// Phi in the vectorized loop (under a runtime guard), and therefore 694 /// there is no need to vectorize the cast - the same value can be used in the 695 /// vector loop for both the Phi and the cast. 696 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 697 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 698 /// 699 /// \p EntryVal is the value from the original loop that maps to the vector 700 /// phi node and is used to distinguish what is the IV currently being 701 /// processed - original one (if \p EntryVal is a phi corresponding to the 702 /// original IV) or the "newly-created" one based on the proof mentioned above 703 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 704 /// latter case \p EntryVal is a TruncInst and we must not record anything for 705 /// that IV, but it's error-prone to expect callers of this routine to care 706 /// about that, hence this explicit parameter. 707 void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID, 708 const Instruction *EntryVal, 709 Value *VectorLoopValue, 710 unsigned Part, 711 unsigned Lane = UINT_MAX); 712 713 /// Generate a shuffle sequence that will reverse the vector Vec. 714 virtual Value *reverseVector(Value *Vec); 715 716 /// Returns (and creates if needed) the original loop trip count. 717 Value *getOrCreateTripCount(Loop *NewLoop); 718 719 /// Returns (and creates if needed) the trip count of the widened loop. 720 Value *getOrCreateVectorTripCount(Loop *NewLoop); 721 722 /// Returns a bitcasted value to the requested vector type. 723 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 724 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 725 const DataLayout &DL); 726 727 /// Emit a bypass check to see if the vector trip count is zero, including if 728 /// it overflows. 729 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 730 731 /// Emit a bypass check to see if all of the SCEV assumptions we've 732 /// had to make are correct. 733 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 734 735 /// Emit bypass checks to check any memory assumptions we may have made. 736 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 737 738 /// Compute the transformed value of Index at offset StartValue using step 739 /// StepValue. 740 /// For integer induction, returns StartValue + Index * StepValue. 741 /// For pointer induction, returns StartValue[Index * StepValue]. 742 /// FIXME: The newly created binary instructions should contain nsw/nuw 743 /// flags, which can be found from the original scalar operations. 744 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 745 const DataLayout &DL, 746 const InductionDescriptor &ID) const; 747 748 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 749 /// vector loop preheader, middle block and scalar preheader. Also 750 /// allocate a loop object for the new vector loop and return it. 751 Loop *createVectorLoopSkeleton(StringRef Prefix); 752 753 /// Create new phi nodes for the induction variables to resume iteration count 754 /// in the scalar epilogue, from where the vectorized loop left off (given by 755 /// \p VectorTripCount). 756 /// In cases where the loop skeleton is more complicated (eg. epilogue 757 /// vectorization) and the resume values can come from an additional bypass 758 /// block, the \p AdditionalBypass pair provides information about the bypass 759 /// block and the end value on the edge from bypass to this loop. 760 void createInductionResumeValues( 761 Loop *L, Value *VectorTripCount, 762 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 763 764 /// Complete the loop skeleton by adding debug MDs, creating appropriate 765 /// conditional branches in the middle block, preparing the builder and 766 /// running the verifier. Take in the vector loop \p L as argument, and return 767 /// the preheader of the completed vector loop. 768 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 769 770 /// Add additional metadata to \p To that was not present on \p Orig. 771 /// 772 /// Currently this is used to add the noalias annotations based on the 773 /// inserted memchecks. Use this for instructions that are *cloned* into the 774 /// vector loop. 775 void addNewMetadata(Instruction *To, const Instruction *Orig); 776 777 /// Add metadata from one instruction to another. 778 /// 779 /// This includes both the original MDs from \p From and additional ones (\see 780 /// addNewMetadata). Use this for *newly created* instructions in the vector 781 /// loop. 782 void addMetadata(Instruction *To, Instruction *From); 783 784 /// Similar to the previous function but it adds the metadata to a 785 /// vector of instructions. 786 void addMetadata(ArrayRef<Value *> To, Instruction *From); 787 788 /// Allow subclasses to override and print debug traces before/after vplan 789 /// execution, when trace information is requested. 790 virtual void printDebugTracesAtStart(){}; 791 virtual void printDebugTracesAtEnd(){}; 792 793 /// The original loop. 794 Loop *OrigLoop; 795 796 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 797 /// dynamic knowledge to simplify SCEV expressions and converts them to a 798 /// more usable form. 799 PredicatedScalarEvolution &PSE; 800 801 /// Loop Info. 802 LoopInfo *LI; 803 804 /// Dominator Tree. 805 DominatorTree *DT; 806 807 /// Alias Analysis. 808 AAResults *AA; 809 810 /// Target Library Info. 811 const TargetLibraryInfo *TLI; 812 813 /// Target Transform Info. 814 const TargetTransformInfo *TTI; 815 816 /// Assumption Cache. 817 AssumptionCache *AC; 818 819 /// Interface to emit optimization remarks. 820 OptimizationRemarkEmitter *ORE; 821 822 /// LoopVersioning. It's only set up (non-null) if memchecks were 823 /// used. 824 /// 825 /// This is currently only used to add no-alias metadata based on the 826 /// memchecks. The actually versioning is performed manually. 827 std::unique_ptr<LoopVersioning> LVer; 828 829 /// The vectorization SIMD factor to use. Each vector will have this many 830 /// vector elements. 831 ElementCount VF; 832 833 /// The vectorization unroll factor to use. Each scalar is vectorized to this 834 /// many different vector instructions. 835 unsigned UF; 836 837 /// The builder that we use 838 IRBuilder<> Builder; 839 840 // --- Vectorization state --- 841 842 /// The vector-loop preheader. 843 BasicBlock *LoopVectorPreHeader; 844 845 /// The scalar-loop preheader. 846 BasicBlock *LoopScalarPreHeader; 847 848 /// Middle Block between the vector and the scalar. 849 BasicBlock *LoopMiddleBlock; 850 851 /// The (unique) ExitBlock of the scalar loop. Note that 852 /// there can be multiple exiting edges reaching this block. 853 BasicBlock *LoopExitBlock; 854 855 /// The vector loop body. 856 BasicBlock *LoopVectorBody; 857 858 /// The scalar loop body. 859 BasicBlock *LoopScalarBody; 860 861 /// A list of all bypass blocks. The first block is the entry of the loop. 862 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 863 864 /// The new Induction variable which was added to the new block. 865 PHINode *Induction = nullptr; 866 867 /// The induction variable of the old basic block. 868 PHINode *OldInduction = nullptr; 869 870 /// Maps values from the original loop to their corresponding values in the 871 /// vectorized loop. A key value can map to either vector values, scalar 872 /// values or both kinds of values, depending on whether the key was 873 /// vectorized and scalarized. 874 VectorizerValueMap VectorLoopValueMap; 875 876 /// Store instructions that were predicated. 877 SmallVector<Instruction *, 4> PredicatedInstructions; 878 879 /// Trip count of the original loop. 880 Value *TripCount = nullptr; 881 882 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 883 Value *VectorTripCount = nullptr; 884 885 /// The legality analysis. 886 LoopVectorizationLegality *Legal; 887 888 /// The profitablity analysis. 889 LoopVectorizationCostModel *Cost; 890 891 // Record whether runtime checks are added. 892 bool AddedSafetyChecks = false; 893 894 // Holds the end values for each induction variable. We save the end values 895 // so we can later fix-up the external users of the induction variables. 896 DenseMap<PHINode *, Value *> IVEndValues; 897 898 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 899 // fixed up at the end of vector code generation. 900 SmallVector<PHINode *, 8> OrigPHIsToFix; 901 902 /// BFI and PSI are used to check for profile guided size optimizations. 903 BlockFrequencyInfo *BFI; 904 ProfileSummaryInfo *PSI; 905 906 // Whether this loop should be optimized for size based on profile guided size 907 // optimizatios. 908 bool OptForSizeBasedOnProfile; 909 }; 910 911 class InnerLoopUnroller : public InnerLoopVectorizer { 912 public: 913 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 914 LoopInfo *LI, DominatorTree *DT, 915 const TargetLibraryInfo *TLI, 916 const TargetTransformInfo *TTI, AssumptionCache *AC, 917 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 918 LoopVectorizationLegality *LVL, 919 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 920 ProfileSummaryInfo *PSI) 921 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 922 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 923 BFI, PSI) {} 924 925 private: 926 Value *getBroadcastInstrs(Value *V) override; 927 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 928 Instruction::BinaryOps Opcode = 929 Instruction::BinaryOpsEnd) override; 930 Value *reverseVector(Value *Vec) override; 931 }; 932 933 /// Encapsulate information regarding vectorization of a loop and its epilogue. 934 /// This information is meant to be updated and used across two stages of 935 /// epilogue vectorization. 936 struct EpilogueLoopVectorizationInfo { 937 ElementCount MainLoopVF = ElementCount::getFixed(0); 938 unsigned MainLoopUF = 0; 939 ElementCount EpilogueVF = ElementCount::getFixed(0); 940 unsigned EpilogueUF = 0; 941 BasicBlock *MainLoopIterationCountCheck = nullptr; 942 BasicBlock *EpilogueIterationCountCheck = nullptr; 943 BasicBlock *SCEVSafetyCheck = nullptr; 944 BasicBlock *MemSafetyCheck = nullptr; 945 Value *TripCount = nullptr; 946 Value *VectorTripCount = nullptr; 947 948 EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF, 949 unsigned EUF) 950 : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF), 951 EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) { 952 assert(EUF == 1 && 953 "A high UF for the epilogue loop is likely not beneficial."); 954 } 955 }; 956 957 /// An extension of the inner loop vectorizer that creates a skeleton for a 958 /// vectorized loop that has its epilogue (residual) also vectorized. 959 /// The idea is to run the vplan on a given loop twice, firstly to setup the 960 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 961 /// from the first step and vectorize the epilogue. This is achieved by 962 /// deriving two concrete strategy classes from this base class and invoking 963 /// them in succession from the loop vectorizer planner. 964 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 965 public: 966 InnerLoopAndEpilogueVectorizer( 967 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 968 DominatorTree *DT, const TargetLibraryInfo *TLI, 969 const TargetTransformInfo *TTI, AssumptionCache *AC, 970 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 971 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 972 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 973 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 974 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI), 975 EPI(EPI) {} 976 977 // Override this function to handle the more complex control flow around the 978 // three loops. 979 BasicBlock *createVectorizedLoopSkeleton() final override { 980 return createEpilogueVectorizedLoopSkeleton(); 981 } 982 983 /// The interface for creating a vectorized skeleton using one of two 984 /// different strategies, each corresponding to one execution of the vplan 985 /// as described above. 986 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 987 988 /// Holds and updates state information required to vectorize the main loop 989 /// and its epilogue in two separate passes. This setup helps us avoid 990 /// regenerating and recomputing runtime safety checks. It also helps us to 991 /// shorten the iteration-count-check path length for the cases where the 992 /// iteration count of the loop is so small that the main vector loop is 993 /// completely skipped. 994 EpilogueLoopVectorizationInfo &EPI; 995 }; 996 997 /// A specialized derived class of inner loop vectorizer that performs 998 /// vectorization of *main* loops in the process of vectorizing loops and their 999 /// epilogues. 1000 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 1001 public: 1002 EpilogueVectorizerMainLoop( 1003 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 1004 DominatorTree *DT, const TargetLibraryInfo *TLI, 1005 const TargetTransformInfo *TTI, AssumptionCache *AC, 1006 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 1007 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 1008 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 1009 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1010 EPI, LVL, CM, BFI, PSI) {} 1011 /// Implements the interface for creating a vectorized skeleton using the 1012 /// *main loop* strategy (ie the first pass of vplan execution). 1013 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1014 1015 protected: 1016 /// Emits an iteration count bypass check once for the main loop (when \p 1017 /// ForEpilogue is false) and once for the epilogue loop (when \p 1018 /// ForEpilogue is true). 1019 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 1020 bool ForEpilogue); 1021 void printDebugTracesAtStart() override; 1022 void printDebugTracesAtEnd() override; 1023 }; 1024 1025 // A specialized derived class of inner loop vectorizer that performs 1026 // vectorization of *epilogue* loops in the process of vectorizing loops and 1027 // their epilogues. 1028 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 1029 public: 1030 EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 1031 LoopInfo *LI, DominatorTree *DT, 1032 const TargetLibraryInfo *TLI, 1033 const TargetTransformInfo *TTI, AssumptionCache *AC, 1034 OptimizationRemarkEmitter *ORE, 1035 EpilogueLoopVectorizationInfo &EPI, 1036 LoopVectorizationLegality *LVL, 1037 llvm::LoopVectorizationCostModel *CM, 1038 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 1039 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1040 EPI, LVL, CM, BFI, PSI) {} 1041 /// Implements the interface for creating a vectorized skeleton using the 1042 /// *epilogue loop* strategy (ie the second pass of vplan execution). 1043 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1044 1045 protected: 1046 /// Emits an iteration count bypass check after the main vector loop has 1047 /// finished to see if there are any iterations left to execute by either 1048 /// the vector epilogue or the scalar epilogue. 1049 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1050 BasicBlock *Bypass, 1051 BasicBlock *Insert); 1052 void printDebugTracesAtStart() override; 1053 void printDebugTracesAtEnd() override; 1054 }; 1055 } // end namespace llvm 1056 1057 /// Look for a meaningful debug location on the instruction or it's 1058 /// operands. 1059 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1060 if (!I) 1061 return I; 1062 1063 DebugLoc Empty; 1064 if (I->getDebugLoc() != Empty) 1065 return I; 1066 1067 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 1068 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 1069 if (OpInst->getDebugLoc() != Empty) 1070 return OpInst; 1071 } 1072 1073 return I; 1074 } 1075 1076 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 1077 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 1078 const DILocation *DIL = Inst->getDebugLoc(); 1079 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1080 !isa<DbgInfoIntrinsic>(Inst)) { 1081 assert(!VF.isScalable() && "scalable vectors not yet supported."); 1082 auto NewDIL = 1083 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1084 if (NewDIL) 1085 B.SetCurrentDebugLocation(NewDIL.getValue()); 1086 else 1087 LLVM_DEBUG(dbgs() 1088 << "Failed to create new discriminator: " 1089 << DIL->getFilename() << " Line: " << DIL->getLine()); 1090 } 1091 else 1092 B.SetCurrentDebugLocation(DIL); 1093 } else 1094 B.SetCurrentDebugLocation(DebugLoc()); 1095 } 1096 1097 /// Write a record \p DebugMsg about vectorization failure to the debug 1098 /// output stream. If \p I is passed, it is an instruction that prevents 1099 /// vectorization. 1100 #ifndef NDEBUG 1101 static void debugVectorizationFailure(const StringRef DebugMsg, 1102 Instruction *I) { 1103 dbgs() << "LV: Not vectorizing: " << DebugMsg; 1104 if (I != nullptr) 1105 dbgs() << " " << *I; 1106 else 1107 dbgs() << '.'; 1108 dbgs() << '\n'; 1109 } 1110 #endif 1111 1112 /// Create an analysis remark that explains why vectorization failed 1113 /// 1114 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1115 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1116 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1117 /// the location of the remark. \return the remark object that can be 1118 /// streamed to. 1119 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1120 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1121 Value *CodeRegion = TheLoop->getHeader(); 1122 DebugLoc DL = TheLoop->getStartLoc(); 1123 1124 if (I) { 1125 CodeRegion = I->getParent(); 1126 // If there is no debug location attached to the instruction, revert back to 1127 // using the loop's. 1128 if (I->getDebugLoc()) 1129 DL = I->getDebugLoc(); 1130 } 1131 1132 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 1133 R << "loop not vectorized: "; 1134 return R; 1135 } 1136 1137 /// Return a value for Step multiplied by VF. 1138 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) { 1139 assert(isa<ConstantInt>(Step) && "Expected an integer step"); 1140 Constant *StepVal = ConstantInt::get( 1141 Step->getType(), 1142 cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue()); 1143 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1144 } 1145 1146 namespace llvm { 1147 1148 void reportVectorizationFailure(const StringRef DebugMsg, 1149 const StringRef OREMsg, const StringRef ORETag, 1150 OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) { 1151 LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I)); 1152 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1153 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), 1154 ORETag, TheLoop, I) << OREMsg); 1155 } 1156 1157 } // end namespace llvm 1158 1159 #ifndef NDEBUG 1160 /// \return string containing a file name and a line # for the given loop. 1161 static std::string getDebugLocString(const Loop *L) { 1162 std::string Result; 1163 if (L) { 1164 raw_string_ostream OS(Result); 1165 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1166 LoopDbgLoc.print(OS); 1167 else 1168 // Just print the module name. 1169 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1170 OS.flush(); 1171 } 1172 return Result; 1173 } 1174 #endif 1175 1176 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1177 const Instruction *Orig) { 1178 // If the loop was versioned with memchecks, add the corresponding no-alias 1179 // metadata. 1180 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1181 LVer->annotateInstWithNoAlias(To, Orig); 1182 } 1183 1184 void InnerLoopVectorizer::addMetadata(Instruction *To, 1185 Instruction *From) { 1186 propagateMetadata(To, From); 1187 addNewMetadata(To, From); 1188 } 1189 1190 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1191 Instruction *From) { 1192 for (Value *V : To) { 1193 if (Instruction *I = dyn_cast<Instruction>(V)) 1194 addMetadata(I, From); 1195 } 1196 } 1197 1198 namespace llvm { 1199 1200 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1201 // lowered. 1202 enum ScalarEpilogueLowering { 1203 1204 // The default: allowing scalar epilogues. 1205 CM_ScalarEpilogueAllowed, 1206 1207 // Vectorization with OptForSize: don't allow epilogues. 1208 CM_ScalarEpilogueNotAllowedOptSize, 1209 1210 // A special case of vectorisation with OptForSize: loops with a very small 1211 // trip count are considered for vectorization under OptForSize, thereby 1212 // making sure the cost of their loop body is dominant, free of runtime 1213 // guards and scalar iteration overheads. 1214 CM_ScalarEpilogueNotAllowedLowTripLoop, 1215 1216 // Loop hint predicate indicating an epilogue is undesired. 1217 CM_ScalarEpilogueNotNeededUsePredicate, 1218 1219 // Directive indicating we must either tail fold or not vectorize 1220 CM_ScalarEpilogueNotAllowedUsePredicate 1221 }; 1222 1223 /// LoopVectorizationCostModel - estimates the expected speedups due to 1224 /// vectorization. 1225 /// In many cases vectorization is not profitable. This can happen because of 1226 /// a number of reasons. In this class we mainly attempt to predict the 1227 /// expected speedup/slowdowns due to the supported instruction set. We use the 1228 /// TargetTransformInfo to query the different backends for the cost of 1229 /// different operations. 1230 class LoopVectorizationCostModel { 1231 public: 1232 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1233 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1234 LoopVectorizationLegality *Legal, 1235 const TargetTransformInfo &TTI, 1236 const TargetLibraryInfo *TLI, DemandedBits *DB, 1237 AssumptionCache *AC, 1238 OptimizationRemarkEmitter *ORE, const Function *F, 1239 const LoopVectorizeHints *Hints, 1240 InterleavedAccessInfo &IAI) 1241 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1242 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1243 Hints(Hints), InterleaveInfo(IAI) {} 1244 1245 /// \return An upper bound for the vectorization factor, or None if 1246 /// vectorization and interleaving should be avoided up front. 1247 Optional<ElementCount> computeMaxVF(ElementCount UserVF, unsigned UserIC); 1248 1249 /// \return True if runtime checks are required for vectorization, and false 1250 /// otherwise. 1251 bool runtimeChecksRequired(); 1252 1253 /// \return The most profitable vectorization factor and the cost of that VF. 1254 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 1255 /// then this vectorization factor will be selected if vectorization is 1256 /// possible. 1257 VectorizationFactor selectVectorizationFactor(ElementCount MaxVF); 1258 VectorizationFactor 1259 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1260 const LoopVectorizationPlanner &LVP); 1261 1262 /// Setup cost-based decisions for user vectorization factor. 1263 void selectUserVectorizationFactor(ElementCount UserVF) { 1264 collectUniformsAndScalars(UserVF); 1265 collectInstsToScalarize(UserVF); 1266 } 1267 1268 /// \return The size (in bits) of the smallest and widest types in the code 1269 /// that needs to be vectorized. We ignore values that remain scalar such as 1270 /// 64 bit loop indices. 1271 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1272 1273 /// \return The desired interleave count. 1274 /// If interleave count has been specified by metadata it will be returned. 1275 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1276 /// are the selected vectorization factor and the cost of the selected VF. 1277 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1278 1279 /// Memory access instruction may be vectorized in more than one way. 1280 /// Form of instruction after vectorization depends on cost. 1281 /// This function takes cost-based decisions for Load/Store instructions 1282 /// and collects them in a map. This decisions map is used for building 1283 /// the lists of loop-uniform and loop-scalar instructions. 1284 /// The calculated cost is saved with widening decision in order to 1285 /// avoid redundant calculations. 1286 void setCostBasedWideningDecision(ElementCount VF); 1287 1288 /// A struct that represents some properties of the register usage 1289 /// of a loop. 1290 struct RegisterUsage { 1291 /// Holds the number of loop invariant values that are used in the loop. 1292 /// The key is ClassID of target-provided register class. 1293 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1294 /// Holds the maximum number of concurrent live intervals in the loop. 1295 /// The key is ClassID of target-provided register class. 1296 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1297 }; 1298 1299 /// \return Returns information about the register usages of the loop for the 1300 /// given vectorization factors. 1301 SmallVector<RegisterUsage, 8> 1302 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1303 1304 /// Collect values we want to ignore in the cost model. 1305 void collectValuesToIgnore(); 1306 1307 /// Split reductions into those that happen in the loop, and those that happen 1308 /// outside. In loop reductions are collected into InLoopReductionChains. 1309 void collectInLoopReductions(); 1310 1311 /// \returns The smallest bitwidth each instruction can be represented with. 1312 /// The vector equivalents of these instructions should be truncated to this 1313 /// type. 1314 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1315 return MinBWs; 1316 } 1317 1318 /// \returns True if it is more profitable to scalarize instruction \p I for 1319 /// vectorization factor \p VF. 1320 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1321 assert(VF.isVector() && 1322 "Profitable to scalarize relevant only for VF > 1."); 1323 1324 // Cost model is not run in the VPlan-native path - return conservative 1325 // result until this changes. 1326 if (EnableVPlanNativePath) 1327 return false; 1328 1329 auto Scalars = InstsToScalarize.find(VF); 1330 assert(Scalars != InstsToScalarize.end() && 1331 "VF not yet analyzed for scalarization profitability"); 1332 return Scalars->second.find(I) != Scalars->second.end(); 1333 } 1334 1335 /// Returns true if \p I is known to be uniform after vectorization. 1336 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1337 if (VF.isScalar()) 1338 return true; 1339 1340 // Cost model is not run in the VPlan-native path - return conservative 1341 // result until this changes. 1342 if (EnableVPlanNativePath) 1343 return false; 1344 1345 auto UniformsPerVF = Uniforms.find(VF); 1346 assert(UniformsPerVF != Uniforms.end() && 1347 "VF not yet analyzed for uniformity"); 1348 return UniformsPerVF->second.count(I); 1349 } 1350 1351 /// Returns true if \p I is known to be scalar after vectorization. 1352 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1353 if (VF.isScalar()) 1354 return true; 1355 1356 // Cost model is not run in the VPlan-native path - return conservative 1357 // result until this changes. 1358 if (EnableVPlanNativePath) 1359 return false; 1360 1361 auto ScalarsPerVF = Scalars.find(VF); 1362 assert(ScalarsPerVF != Scalars.end() && 1363 "Scalar values are not calculated for VF"); 1364 return ScalarsPerVF->second.count(I); 1365 } 1366 1367 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1368 /// for vectorization factor \p VF. 1369 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1370 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1371 !isProfitableToScalarize(I, VF) && 1372 !isScalarAfterVectorization(I, VF); 1373 } 1374 1375 /// Decision that was taken during cost calculation for memory instruction. 1376 enum InstWidening { 1377 CM_Unknown, 1378 CM_Widen, // For consecutive accesses with stride +1. 1379 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1380 CM_Interleave, 1381 CM_GatherScatter, 1382 CM_Scalarize 1383 }; 1384 1385 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1386 /// instruction \p I and vector width \p VF. 1387 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1388 InstructionCost Cost) { 1389 assert(VF.isVector() && "Expected VF >=2"); 1390 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1391 } 1392 1393 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1394 /// interleaving group \p Grp and vector width \p VF. 1395 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1396 ElementCount VF, InstWidening W, 1397 InstructionCost Cost) { 1398 assert(VF.isVector() && "Expected VF >=2"); 1399 /// Broadcast this decicion to all instructions inside the group. 1400 /// But the cost will be assigned to one instruction only. 1401 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1402 if (auto *I = Grp->getMember(i)) { 1403 if (Grp->getInsertPos() == I) 1404 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1405 else 1406 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1407 } 1408 } 1409 } 1410 1411 /// Return the cost model decision for the given instruction \p I and vector 1412 /// width \p VF. Return CM_Unknown if this instruction did not pass 1413 /// through the cost modeling. 1414 InstWidening getWideningDecision(Instruction *I, ElementCount VF) { 1415 assert(VF.isVector() && "Expected VF to be a vector VF"); 1416 // Cost model is not run in the VPlan-native path - return conservative 1417 // result until this changes. 1418 if (EnableVPlanNativePath) 1419 return CM_GatherScatter; 1420 1421 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1422 auto Itr = WideningDecisions.find(InstOnVF); 1423 if (Itr == WideningDecisions.end()) 1424 return CM_Unknown; 1425 return Itr->second.first; 1426 } 1427 1428 /// Return the vectorization cost for the given instruction \p I and vector 1429 /// width \p VF. 1430 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1431 assert(VF.isVector() && "Expected VF >=2"); 1432 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1433 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1434 "The cost is not calculated"); 1435 return WideningDecisions[InstOnVF].second; 1436 } 1437 1438 /// Return True if instruction \p I is an optimizable truncate whose operand 1439 /// is an induction variable. Such a truncate will be removed by adding a new 1440 /// induction variable with the destination type. 1441 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1442 // If the instruction is not a truncate, return false. 1443 auto *Trunc = dyn_cast<TruncInst>(I); 1444 if (!Trunc) 1445 return false; 1446 1447 // Get the source and destination types of the truncate. 1448 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1449 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1450 1451 // If the truncate is free for the given types, return false. Replacing a 1452 // free truncate with an induction variable would add an induction variable 1453 // update instruction to each iteration of the loop. We exclude from this 1454 // check the primary induction variable since it will need an update 1455 // instruction regardless. 1456 Value *Op = Trunc->getOperand(0); 1457 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1458 return false; 1459 1460 // If the truncated value is not an induction variable, return false. 1461 return Legal->isInductionPhi(Op); 1462 } 1463 1464 /// Collects the instructions to scalarize for each predicated instruction in 1465 /// the loop. 1466 void collectInstsToScalarize(ElementCount VF); 1467 1468 /// Collect Uniform and Scalar values for the given \p VF. 1469 /// The sets depend on CM decision for Load/Store instructions 1470 /// that may be vectorized as interleave, gather-scatter or scalarized. 1471 void collectUniformsAndScalars(ElementCount VF) { 1472 // Do the analysis once. 1473 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1474 return; 1475 setCostBasedWideningDecision(VF); 1476 collectLoopUniforms(VF); 1477 collectLoopScalars(VF); 1478 } 1479 1480 /// Returns true if the target machine supports masked store operation 1481 /// for the given \p DataType and kind of access to \p Ptr. 1482 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) { 1483 return Legal->isConsecutivePtr(Ptr) && 1484 TTI.isLegalMaskedStore(DataType, Alignment); 1485 } 1486 1487 /// Returns true if the target machine supports masked load operation 1488 /// for the given \p DataType and kind of access to \p Ptr. 1489 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) { 1490 return Legal->isConsecutivePtr(Ptr) && 1491 TTI.isLegalMaskedLoad(DataType, Alignment); 1492 } 1493 1494 /// Returns true if the target machine supports masked scatter operation 1495 /// for the given \p DataType. 1496 bool isLegalMaskedScatter(Type *DataType, Align Alignment) { 1497 return TTI.isLegalMaskedScatter(DataType, Alignment); 1498 } 1499 1500 /// Returns true if the target machine supports masked gather operation 1501 /// for the given \p DataType. 1502 bool isLegalMaskedGather(Type *DataType, Align Alignment) { 1503 return TTI.isLegalMaskedGather(DataType, Alignment); 1504 } 1505 1506 /// Returns true if the target machine can represent \p V as a masked gather 1507 /// or scatter operation. 1508 bool isLegalGatherOrScatter(Value *V) { 1509 bool LI = isa<LoadInst>(V); 1510 bool SI = isa<StoreInst>(V); 1511 if (!LI && !SI) 1512 return false; 1513 auto *Ty = getMemInstValueType(V); 1514 Align Align = getLoadStoreAlignment(V); 1515 return (LI && isLegalMaskedGather(Ty, Align)) || 1516 (SI && isLegalMaskedScatter(Ty, Align)); 1517 } 1518 1519 /// Returns true if \p I is an instruction that will be scalarized with 1520 /// predication. Such instructions include conditional stores and 1521 /// instructions that may divide by zero. 1522 /// If a non-zero VF has been calculated, we check if I will be scalarized 1523 /// predication for that VF. 1524 bool isScalarWithPredication(Instruction *I, 1525 ElementCount VF = ElementCount::getFixed(1)); 1526 1527 // Returns true if \p I is an instruction that will be predicated either 1528 // through scalar predication or masked load/store or masked gather/scatter. 1529 // Superset of instructions that return true for isScalarWithPredication. 1530 bool isPredicatedInst(Instruction *I) { 1531 if (!blockNeedsPredication(I->getParent())) 1532 return false; 1533 // Loads and stores that need some form of masked operation are predicated 1534 // instructions. 1535 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1536 return Legal->isMaskRequired(I); 1537 return isScalarWithPredication(I); 1538 } 1539 1540 /// Returns true if \p I is a memory instruction with consecutive memory 1541 /// access that can be widened. 1542 bool 1543 memoryInstructionCanBeWidened(Instruction *I, 1544 ElementCount VF = ElementCount::getFixed(1)); 1545 1546 /// Returns true if \p I is a memory instruction in an interleaved-group 1547 /// of memory accesses that can be vectorized with wide vector loads/stores 1548 /// and shuffles. 1549 bool 1550 interleavedAccessCanBeWidened(Instruction *I, 1551 ElementCount VF = ElementCount::getFixed(1)); 1552 1553 /// Check if \p Instr belongs to any interleaved access group. 1554 bool isAccessInterleaved(Instruction *Instr) { 1555 return InterleaveInfo.isInterleaved(Instr); 1556 } 1557 1558 /// Get the interleaved access group that \p Instr belongs to. 1559 const InterleaveGroup<Instruction> * 1560 getInterleavedAccessGroup(Instruction *Instr) { 1561 return InterleaveInfo.getInterleaveGroup(Instr); 1562 } 1563 1564 /// Returns true if we're required to use a scalar epilogue for at least 1565 /// the final iteration of the original loop. 1566 bool requiresScalarEpilogue() const { 1567 if (!isScalarEpilogueAllowed()) 1568 return false; 1569 // If we might exit from anywhere but the latch, must run the exiting 1570 // iteration in scalar form. 1571 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1572 return true; 1573 return InterleaveInfo.requiresScalarEpilogue(); 1574 } 1575 1576 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1577 /// loop hint annotation. 1578 bool isScalarEpilogueAllowed() const { 1579 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1580 } 1581 1582 /// Returns true if all loop blocks should be masked to fold tail loop. 1583 bool foldTailByMasking() const { return FoldTailByMasking; } 1584 1585 bool blockNeedsPredication(BasicBlock *BB) { 1586 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1587 } 1588 1589 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1590 /// nodes to the chain of instructions representing the reductions. Uses a 1591 /// MapVector to ensure deterministic iteration order. 1592 using ReductionChainMap = 1593 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1594 1595 /// Return the chain of instructions representing an inloop reduction. 1596 const ReductionChainMap &getInLoopReductionChains() const { 1597 return InLoopReductionChains; 1598 } 1599 1600 /// Returns true if the Phi is part of an inloop reduction. 1601 bool isInLoopReduction(PHINode *Phi) const { 1602 return InLoopReductionChains.count(Phi); 1603 } 1604 1605 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1606 /// with factor VF. Return the cost of the instruction, including 1607 /// scalarization overhead if it's needed. 1608 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF); 1609 1610 /// Estimate cost of a call instruction CI if it were vectorized with factor 1611 /// VF. Return the cost of the instruction, including scalarization overhead 1612 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1613 /// scalarized - 1614 /// i.e. either vector version isn't available, or is too expensive. 1615 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1616 bool &NeedToScalarize); 1617 1618 /// Invalidates decisions already taken by the cost model. 1619 void invalidateCostModelingDecisions() { 1620 WideningDecisions.clear(); 1621 Uniforms.clear(); 1622 Scalars.clear(); 1623 } 1624 1625 private: 1626 unsigned NumPredStores = 0; 1627 1628 /// \return An upper bound for the vectorization factor, a power-of-2 larger 1629 /// than zero. One is returned if vectorization should best be avoided due 1630 /// to cost. 1631 ElementCount computeFeasibleMaxVF(unsigned ConstTripCount, 1632 ElementCount UserVF); 1633 1634 /// The vectorization cost is a combination of the cost itself and a boolean 1635 /// indicating whether any of the contributing operations will actually 1636 /// operate on 1637 /// vector values after type legalization in the backend. If this latter value 1638 /// is 1639 /// false, then all operations will be scalarized (i.e. no vectorization has 1640 /// actually taken place). 1641 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1642 1643 /// Returns the expected execution cost. The unit of the cost does 1644 /// not matter because we use the 'cost' units to compare different 1645 /// vector widths. The cost that is returned is *not* normalized by 1646 /// the factor width. 1647 VectorizationCostTy expectedCost(ElementCount VF); 1648 1649 /// Returns the execution time cost of an instruction for a given vector 1650 /// width. Vector width of one means scalar. 1651 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1652 1653 /// The cost-computation logic from getInstructionCost which provides 1654 /// the vector type as an output parameter. 1655 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1656 Type *&VectorTy); 1657 1658 /// Return the cost of instructions in an inloop reduction pattern, if I is 1659 /// part of that pattern. 1660 InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF, 1661 Type *VectorTy, 1662 TTI::TargetCostKind CostKind); 1663 1664 /// Calculate vectorization cost of memory instruction \p I. 1665 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1666 1667 /// The cost computation for scalarized memory instruction. 1668 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1669 1670 /// The cost computation for interleaving group of memory instructions. 1671 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1672 1673 /// The cost computation for Gather/Scatter instruction. 1674 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1675 1676 /// The cost computation for widening instruction \p I with consecutive 1677 /// memory access. 1678 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1679 1680 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1681 /// Load: scalar load + broadcast. 1682 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1683 /// element) 1684 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1685 1686 /// Estimate the overhead of scalarizing an instruction. This is a 1687 /// convenience wrapper for the type-based getScalarizationOverhead API. 1688 InstructionCost getScalarizationOverhead(Instruction *I, ElementCount VF); 1689 1690 /// Returns whether the instruction is a load or store and will be a emitted 1691 /// as a vector operation. 1692 bool isConsecutiveLoadOrStore(Instruction *I); 1693 1694 /// Returns true if an artificially high cost for emulated masked memrefs 1695 /// should be used. 1696 bool useEmulatedMaskMemRefHack(Instruction *I); 1697 1698 /// Map of scalar integer values to the smallest bitwidth they can be legally 1699 /// represented as. The vector equivalents of these values should be truncated 1700 /// to this type. 1701 MapVector<Instruction *, uint64_t> MinBWs; 1702 1703 /// A type representing the costs for instructions if they were to be 1704 /// scalarized rather than vectorized. The entries are Instruction-Cost 1705 /// pairs. 1706 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1707 1708 /// A set containing all BasicBlocks that are known to present after 1709 /// vectorization as a predicated block. 1710 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1711 1712 /// Records whether it is allowed to have the original scalar loop execute at 1713 /// least once. This may be needed as a fallback loop in case runtime 1714 /// aliasing/dependence checks fail, or to handle the tail/remainder 1715 /// iterations when the trip count is unknown or doesn't divide by the VF, 1716 /// or as a peel-loop to handle gaps in interleave-groups. 1717 /// Under optsize and when the trip count is very small we don't allow any 1718 /// iterations to execute in the scalar loop. 1719 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1720 1721 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1722 bool FoldTailByMasking = false; 1723 1724 /// A map holding scalar costs for different vectorization factors. The 1725 /// presence of a cost for an instruction in the mapping indicates that the 1726 /// instruction will be scalarized when vectorizing with the associated 1727 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1728 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1729 1730 /// Holds the instructions known to be uniform after vectorization. 1731 /// The data is collected per VF. 1732 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1733 1734 /// Holds the instructions known to be scalar after vectorization. 1735 /// The data is collected per VF. 1736 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1737 1738 /// Holds the instructions (address computations) that are forced to be 1739 /// scalarized. 1740 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1741 1742 /// PHINodes of the reductions that should be expanded in-loop along with 1743 /// their associated chains of reduction operations, in program order from top 1744 /// (PHI) to bottom 1745 ReductionChainMap InLoopReductionChains; 1746 1747 /// A Map of inloop reduction operations and their immediate chain operand. 1748 /// FIXME: This can be removed once reductions can be costed correctly in 1749 /// vplan. This was added to allow quick lookup to the inloop operations, 1750 /// without having to loop through InLoopReductionChains. 1751 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1752 1753 /// Returns the expected difference in cost from scalarizing the expression 1754 /// feeding a predicated instruction \p PredInst. The instructions to 1755 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1756 /// non-negative return value implies the expression will be scalarized. 1757 /// Currently, only single-use chains are considered for scalarization. 1758 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1759 ElementCount VF); 1760 1761 /// Collect the instructions that are uniform after vectorization. An 1762 /// instruction is uniform if we represent it with a single scalar value in 1763 /// the vectorized loop corresponding to each vector iteration. Examples of 1764 /// uniform instructions include pointer operands of consecutive or 1765 /// interleaved memory accesses. Note that although uniformity implies an 1766 /// instruction will be scalar, the reverse is not true. In general, a 1767 /// scalarized instruction will be represented by VF scalar values in the 1768 /// vectorized loop, each corresponding to an iteration of the original 1769 /// scalar loop. 1770 void collectLoopUniforms(ElementCount VF); 1771 1772 /// Collect the instructions that are scalar after vectorization. An 1773 /// instruction is scalar if it is known to be uniform or will be scalarized 1774 /// during vectorization. Non-uniform scalarized instructions will be 1775 /// represented by VF values in the vectorized loop, each corresponding to an 1776 /// iteration of the original scalar loop. 1777 void collectLoopScalars(ElementCount VF); 1778 1779 /// Keeps cost model vectorization decision and cost for instructions. 1780 /// Right now it is used for memory instructions only. 1781 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1782 std::pair<InstWidening, InstructionCost>>; 1783 1784 DecisionList WideningDecisions; 1785 1786 /// Returns true if \p V is expected to be vectorized and it needs to be 1787 /// extracted. 1788 bool needsExtract(Value *V, ElementCount VF) const { 1789 Instruction *I = dyn_cast<Instruction>(V); 1790 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1791 TheLoop->isLoopInvariant(I)) 1792 return false; 1793 1794 // Assume we can vectorize V (and hence we need extraction) if the 1795 // scalars are not computed yet. This can happen, because it is called 1796 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1797 // the scalars are collected. That should be a safe assumption in most 1798 // cases, because we check if the operands have vectorizable types 1799 // beforehand in LoopVectorizationLegality. 1800 return Scalars.find(VF) == Scalars.end() || 1801 !isScalarAfterVectorization(I, VF); 1802 }; 1803 1804 /// Returns a range containing only operands needing to be extracted. 1805 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1806 ElementCount VF) { 1807 return SmallVector<Value *, 4>(make_filter_range( 1808 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1809 } 1810 1811 /// Determines if we have the infrastructure to vectorize loop \p L and its 1812 /// epilogue, assuming the main loop is vectorized by \p VF. 1813 bool isCandidateForEpilogueVectorization(const Loop &L, 1814 const ElementCount VF) const; 1815 1816 /// Returns true if epilogue vectorization is considered profitable, and 1817 /// false otherwise. 1818 /// \p VF is the vectorization factor chosen for the original loop. 1819 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1820 1821 public: 1822 /// The loop that we evaluate. 1823 Loop *TheLoop; 1824 1825 /// Predicated scalar evolution analysis. 1826 PredicatedScalarEvolution &PSE; 1827 1828 /// Loop Info analysis. 1829 LoopInfo *LI; 1830 1831 /// Vectorization legality. 1832 LoopVectorizationLegality *Legal; 1833 1834 /// Vector target information. 1835 const TargetTransformInfo &TTI; 1836 1837 /// Target Library Info. 1838 const TargetLibraryInfo *TLI; 1839 1840 /// Demanded bits analysis. 1841 DemandedBits *DB; 1842 1843 /// Assumption cache. 1844 AssumptionCache *AC; 1845 1846 /// Interface to emit optimization remarks. 1847 OptimizationRemarkEmitter *ORE; 1848 1849 const Function *TheFunction; 1850 1851 /// Loop Vectorize Hint. 1852 const LoopVectorizeHints *Hints; 1853 1854 /// The interleave access information contains groups of interleaved accesses 1855 /// with the same stride and close to each other. 1856 InterleavedAccessInfo &InterleaveInfo; 1857 1858 /// Values to ignore in the cost model. 1859 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1860 1861 /// Values to ignore in the cost model when VF > 1. 1862 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1863 1864 /// Profitable vector factors. 1865 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1866 }; 1867 1868 } // end namespace llvm 1869 1870 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1871 // vectorization. The loop needs to be annotated with #pragma omp simd 1872 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1873 // vector length information is not provided, vectorization is not considered 1874 // explicit. Interleave hints are not allowed either. These limitations will be 1875 // relaxed in the future. 1876 // Please, note that we are currently forced to abuse the pragma 'clang 1877 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1878 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1879 // provides *explicit vectorization hints* (LV can bypass legal checks and 1880 // assume that vectorization is legal). However, both hints are implemented 1881 // using the same metadata (llvm.loop.vectorize, processed by 1882 // LoopVectorizeHints). This will be fixed in the future when the native IR 1883 // representation for pragma 'omp simd' is introduced. 1884 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1885 OptimizationRemarkEmitter *ORE) { 1886 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 1887 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1888 1889 // Only outer loops with an explicit vectorization hint are supported. 1890 // Unannotated outer loops are ignored. 1891 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1892 return false; 1893 1894 Function *Fn = OuterLp->getHeader()->getParent(); 1895 if (!Hints.allowVectorization(Fn, OuterLp, 1896 true /*VectorizeOnlyWhenForced*/)) { 1897 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1898 return false; 1899 } 1900 1901 if (Hints.getInterleave() > 1) { 1902 // TODO: Interleave support is future work. 1903 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1904 "outer loops.\n"); 1905 Hints.emitRemarkWithHints(); 1906 return false; 1907 } 1908 1909 return true; 1910 } 1911 1912 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1913 OptimizationRemarkEmitter *ORE, 1914 SmallVectorImpl<Loop *> &V) { 1915 // Collect inner loops and outer loops without irreducible control flow. For 1916 // now, only collect outer loops that have explicit vectorization hints. If we 1917 // are stress testing the VPlan H-CFG construction, we collect the outermost 1918 // loop of every loop nest. 1919 if (L.isInnermost() || VPlanBuildStressTest || 1920 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1921 LoopBlocksRPO RPOT(&L); 1922 RPOT.perform(LI); 1923 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1924 V.push_back(&L); 1925 // TODO: Collect inner loops inside marked outer loops in case 1926 // vectorization fails for the outer loop. Do not invoke 1927 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1928 // already known to be reducible. We can use an inherited attribute for 1929 // that. 1930 return; 1931 } 1932 } 1933 for (Loop *InnerL : L) 1934 collectSupportedLoops(*InnerL, LI, ORE, V); 1935 } 1936 1937 namespace { 1938 1939 /// The LoopVectorize Pass. 1940 struct LoopVectorize : public FunctionPass { 1941 /// Pass identification, replacement for typeid 1942 static char ID; 1943 1944 LoopVectorizePass Impl; 1945 1946 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 1947 bool VectorizeOnlyWhenForced = false) 1948 : FunctionPass(ID), 1949 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 1950 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1951 } 1952 1953 bool runOnFunction(Function &F) override { 1954 if (skipFunction(F)) 1955 return false; 1956 1957 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1958 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1959 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1960 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1961 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1962 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1963 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 1964 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1965 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1966 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1967 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1968 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1969 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 1970 1971 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1972 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1973 1974 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1975 GetLAA, *ORE, PSI).MadeAnyChange; 1976 } 1977 1978 void getAnalysisUsage(AnalysisUsage &AU) const override { 1979 AU.addRequired<AssumptionCacheTracker>(); 1980 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1981 AU.addRequired<DominatorTreeWrapperPass>(); 1982 AU.addRequired<LoopInfoWrapperPass>(); 1983 AU.addRequired<ScalarEvolutionWrapperPass>(); 1984 AU.addRequired<TargetTransformInfoWrapperPass>(); 1985 AU.addRequired<AAResultsWrapperPass>(); 1986 AU.addRequired<LoopAccessLegacyAnalysis>(); 1987 AU.addRequired<DemandedBitsWrapperPass>(); 1988 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1989 AU.addRequired<InjectTLIMappingsLegacy>(); 1990 1991 // We currently do not preserve loopinfo/dominator analyses with outer loop 1992 // vectorization. Until this is addressed, mark these analyses as preserved 1993 // only for non-VPlan-native path. 1994 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 1995 if (!EnableVPlanNativePath) { 1996 AU.addPreserved<LoopInfoWrapperPass>(); 1997 AU.addPreserved<DominatorTreeWrapperPass>(); 1998 } 1999 2000 AU.addPreserved<BasicAAWrapperPass>(); 2001 AU.addPreserved<GlobalsAAWrapperPass>(); 2002 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2003 } 2004 }; 2005 2006 } // end anonymous namespace 2007 2008 //===----------------------------------------------------------------------===// 2009 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2010 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2011 //===----------------------------------------------------------------------===// 2012 2013 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2014 // We need to place the broadcast of invariant variables outside the loop, 2015 // but only if it's proven safe to do so. Else, broadcast will be inside 2016 // vector loop body. 2017 Instruction *Instr = dyn_cast<Instruction>(V); 2018 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2019 (!Instr || 2020 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2021 // Place the code for broadcasting invariant variables in the new preheader. 2022 IRBuilder<>::InsertPointGuard Guard(Builder); 2023 if (SafeToHoist) 2024 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2025 2026 // Broadcast the scalar into all locations in the vector. 2027 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2028 2029 return Shuf; 2030 } 2031 2032 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2033 const InductionDescriptor &II, Value *Step, Value *Start, 2034 Instruction *EntryVal) { 2035 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2036 "Expected either an induction phi-node or a truncate of it!"); 2037 2038 // Construct the initial value of the vector IV in the vector loop preheader 2039 auto CurrIP = Builder.saveIP(); 2040 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2041 if (isa<TruncInst>(EntryVal)) { 2042 assert(Start->getType()->isIntegerTy() && 2043 "Truncation requires an integer type"); 2044 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2045 Step = Builder.CreateTrunc(Step, TruncType); 2046 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2047 } 2048 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2049 Value *SteppedStart = 2050 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 2051 2052 // We create vector phi nodes for both integer and floating-point induction 2053 // variables. Here, we determine the kind of arithmetic we will perform. 2054 Instruction::BinaryOps AddOp; 2055 Instruction::BinaryOps MulOp; 2056 if (Step->getType()->isIntegerTy()) { 2057 AddOp = Instruction::Add; 2058 MulOp = Instruction::Mul; 2059 } else { 2060 AddOp = II.getInductionOpcode(); 2061 MulOp = Instruction::FMul; 2062 } 2063 2064 // Multiply the vectorization factor by the step using integer or 2065 // floating-point arithmetic as appropriate. 2066 Value *ConstVF = 2067 getSignedIntOrFpConstant(Step->getType(), VF.getKnownMinValue()); 2068 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 2069 2070 // Create a vector splat to use in the induction update. 2071 // 2072 // FIXME: If the step is non-constant, we create the vector splat with 2073 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2074 // handle a constant vector splat. 2075 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2076 Value *SplatVF = isa<Constant>(Mul) 2077 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2078 : Builder.CreateVectorSplat(VF, Mul); 2079 Builder.restoreIP(CurrIP); 2080 2081 // We may need to add the step a number of times, depending on the unroll 2082 // factor. The last of those goes into the PHI. 2083 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2084 &*LoopVectorBody->getFirstInsertionPt()); 2085 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2086 Instruction *LastInduction = VecInd; 2087 for (unsigned Part = 0; Part < UF; ++Part) { 2088 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 2089 2090 if (isa<TruncInst>(EntryVal)) 2091 addMetadata(LastInduction, EntryVal); 2092 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part); 2093 2094 LastInduction = cast<Instruction>(addFastMathFlag( 2095 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 2096 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2097 } 2098 2099 // Move the last step to the end of the latch block. This ensures consistent 2100 // placement of all induction updates. 2101 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2102 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2103 auto *ICmp = cast<Instruction>(Br->getCondition()); 2104 LastInduction->moveBefore(ICmp); 2105 LastInduction->setName("vec.ind.next"); 2106 2107 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2108 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2109 } 2110 2111 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2112 return Cost->isScalarAfterVectorization(I, VF) || 2113 Cost->isProfitableToScalarize(I, VF); 2114 } 2115 2116 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2117 if (shouldScalarizeInstruction(IV)) 2118 return true; 2119 auto isScalarInst = [&](User *U) -> bool { 2120 auto *I = cast<Instruction>(U); 2121 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2122 }; 2123 return llvm::any_of(IV->users(), isScalarInst); 2124 } 2125 2126 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2127 const InductionDescriptor &ID, const Instruction *EntryVal, 2128 Value *VectorLoopVal, unsigned Part, unsigned Lane) { 2129 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2130 "Expected either an induction phi-node or a truncate of it!"); 2131 2132 // This induction variable is not the phi from the original loop but the 2133 // newly-created IV based on the proof that casted Phi is equal to the 2134 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2135 // re-uses the same InductionDescriptor that original IV uses but we don't 2136 // have to do any recording in this case - that is done when original IV is 2137 // processed. 2138 if (isa<TruncInst>(EntryVal)) 2139 return; 2140 2141 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 2142 if (Casts.empty()) 2143 return; 2144 // Only the first Cast instruction in the Casts vector is of interest. 2145 // The rest of the Casts (if exist) have no uses outside the 2146 // induction update chain itself. 2147 Instruction *CastInst = *Casts.begin(); 2148 if (Lane < UINT_MAX) 2149 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal); 2150 else 2151 VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal); 2152 } 2153 2154 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2155 TruncInst *Trunc) { 2156 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2157 "Primary induction variable must have an integer type"); 2158 2159 auto II = Legal->getInductionVars().find(IV); 2160 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2161 2162 auto ID = II->second; 2163 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2164 2165 // The value from the original loop to which we are mapping the new induction 2166 // variable. 2167 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2168 2169 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2170 2171 // Generate code for the induction step. Note that induction steps are 2172 // required to be loop-invariant 2173 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2174 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2175 "Induction step should be loop invariant"); 2176 if (PSE.getSE()->isSCEVable(IV->getType())) { 2177 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2178 return Exp.expandCodeFor(Step, Step->getType(), 2179 LoopVectorPreHeader->getTerminator()); 2180 } 2181 return cast<SCEVUnknown>(Step)->getValue(); 2182 }; 2183 2184 // The scalar value to broadcast. This is derived from the canonical 2185 // induction variable. If a truncation type is given, truncate the canonical 2186 // induction variable and step. Otherwise, derive these values from the 2187 // induction descriptor. 2188 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2189 Value *ScalarIV = Induction; 2190 if (IV != OldInduction) { 2191 ScalarIV = IV->getType()->isIntegerTy() 2192 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2193 : Builder.CreateCast(Instruction::SIToFP, Induction, 2194 IV->getType()); 2195 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2196 ScalarIV->setName("offset.idx"); 2197 } 2198 if (Trunc) { 2199 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2200 assert(Step->getType()->isIntegerTy() && 2201 "Truncation requires an integer step"); 2202 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2203 Step = Builder.CreateTrunc(Step, TruncType); 2204 } 2205 return ScalarIV; 2206 }; 2207 2208 // Create the vector values from the scalar IV, in the absence of creating a 2209 // vector IV. 2210 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2211 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2212 for (unsigned Part = 0; Part < UF; ++Part) { 2213 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2214 Value *EntryPart = 2215 getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step, 2216 ID.getInductionOpcode()); 2217 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 2218 if (Trunc) 2219 addMetadata(EntryPart, Trunc); 2220 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part); 2221 } 2222 }; 2223 2224 // Now do the actual transformations, and start with creating the step value. 2225 Value *Step = CreateStepValue(ID.getStep()); 2226 if (VF.isZero() || VF.isScalar()) { 2227 Value *ScalarIV = CreateScalarIV(Step); 2228 CreateSplatIV(ScalarIV, Step); 2229 return; 2230 } 2231 2232 // Determine if we want a scalar version of the induction variable. This is 2233 // true if the induction variable itself is not widened, or if it has at 2234 // least one user in the loop that is not widened. 2235 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2236 if (!NeedsScalarIV) { 2237 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal); 2238 return; 2239 } 2240 2241 // Try to create a new independent vector induction variable. If we can't 2242 // create the phi node, we will splat the scalar induction variable in each 2243 // loop iteration. 2244 if (!shouldScalarizeInstruction(EntryVal)) { 2245 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal); 2246 Value *ScalarIV = CreateScalarIV(Step); 2247 // Create scalar steps that can be used by instructions we will later 2248 // scalarize. Note that the addition of the scalar steps will not increase 2249 // the number of instructions in the loop in the common case prior to 2250 // InstCombine. We will be trading one vector extract for each scalar step. 2251 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 2252 return; 2253 } 2254 2255 // All IV users are scalar instructions, so only emit a scalar IV, not a 2256 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2257 // predicate used by the masked loads/stores. 2258 Value *ScalarIV = CreateScalarIV(Step); 2259 if (!Cost->isScalarEpilogueAllowed()) 2260 CreateSplatIV(ScalarIV, Step); 2261 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 2262 } 2263 2264 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2265 Instruction::BinaryOps BinOp) { 2266 // Create and check the types. 2267 auto *ValVTy = cast<FixedVectorType>(Val->getType()); 2268 int VLen = ValVTy->getNumElements(); 2269 2270 Type *STy = Val->getType()->getScalarType(); 2271 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2272 "Induction Step must be an integer or FP"); 2273 assert(Step->getType() == STy && "Step has wrong type"); 2274 2275 SmallVector<Constant *, 8> Indices; 2276 2277 if (STy->isIntegerTy()) { 2278 // Create a vector of consecutive numbers from zero to VF. 2279 for (int i = 0; i < VLen; ++i) 2280 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2281 2282 // Add the consecutive indices to the vector value. 2283 Constant *Cv = ConstantVector::get(Indices); 2284 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2285 Step = Builder.CreateVectorSplat(VLen, Step); 2286 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2287 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2288 // which can be found from the original scalar operations. 2289 Step = Builder.CreateMul(Cv, Step); 2290 return Builder.CreateAdd(Val, Step, "induction"); 2291 } 2292 2293 // Floating point induction. 2294 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2295 "Binary Opcode should be specified for FP induction"); 2296 // Create a vector of consecutive numbers from zero to VF. 2297 for (int i = 0; i < VLen; ++i) 2298 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2299 2300 // Add the consecutive indices to the vector value. 2301 Constant *Cv = ConstantVector::get(Indices); 2302 2303 Step = Builder.CreateVectorSplat(VLen, Step); 2304 2305 // Floating point operations had to be 'fast' to enable the induction. 2306 FastMathFlags Flags; 2307 Flags.setFast(); 2308 2309 Value *MulOp = Builder.CreateFMul(Cv, Step); 2310 if (isa<Instruction>(MulOp)) 2311 // Have to check, MulOp may be a constant 2312 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2313 2314 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2315 if (isa<Instruction>(BOp)) 2316 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2317 return BOp; 2318 } 2319 2320 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2321 Instruction *EntryVal, 2322 const InductionDescriptor &ID) { 2323 // We shouldn't have to build scalar steps if we aren't vectorizing. 2324 assert(VF.isVector() && "VF should be greater than one"); 2325 // Get the value type and ensure it and the step have the same integer type. 2326 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2327 assert(ScalarIVTy == Step->getType() && 2328 "Val and Step should have the same type"); 2329 2330 // We build scalar steps for both integer and floating-point induction 2331 // variables. Here, we determine the kind of arithmetic we will perform. 2332 Instruction::BinaryOps AddOp; 2333 Instruction::BinaryOps MulOp; 2334 if (ScalarIVTy->isIntegerTy()) { 2335 AddOp = Instruction::Add; 2336 MulOp = Instruction::Mul; 2337 } else { 2338 AddOp = ID.getInductionOpcode(); 2339 MulOp = Instruction::FMul; 2340 } 2341 2342 // Determine the number of scalars we need to generate for each unroll 2343 // iteration. If EntryVal is uniform, we only need to generate the first 2344 // lane. Otherwise, we generate all VF values. 2345 unsigned Lanes = 2346 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) 2347 ? 1 2348 : VF.getKnownMinValue(); 2349 assert((!VF.isScalable() || Lanes == 1) && 2350 "Should never scalarize a scalable vector"); 2351 // Compute the scalar steps and save the results in VectorLoopValueMap. 2352 for (unsigned Part = 0; Part < UF; ++Part) { 2353 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2354 auto *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2355 ScalarIVTy->getScalarSizeInBits()); 2356 Value *StartIdx = 2357 createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF); 2358 if (ScalarIVTy->isFloatingPointTy()) 2359 StartIdx = Builder.CreateSIToFP(StartIdx, ScalarIVTy); 2360 StartIdx = addFastMathFlag(Builder.CreateBinOp( 2361 AddOp, StartIdx, getSignedIntOrFpConstant(ScalarIVTy, Lane))); 2362 // The step returned by `createStepForVF` is a runtime-evaluated value 2363 // when VF is scalable. Otherwise, it should be folded into a Constant. 2364 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2365 "Expected StartIdx to be folded to a constant when VF is not " 2366 "scalable"); 2367 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 2368 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 2369 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); 2370 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane); 2371 } 2372 } 2373 } 2374 2375 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 2376 assert(V != Induction && "The new induction variable should not be used."); 2377 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2378 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2379 2380 // If we have a stride that is replaced by one, do it here. Defer this for 2381 // the VPlan-native path until we start running Legal checks in that path. 2382 if (!EnableVPlanNativePath && Legal->hasStride(V)) 2383 V = ConstantInt::get(V->getType(), 1); 2384 2385 // If we have a vector mapped to this value, return it. 2386 if (VectorLoopValueMap.hasVectorValue(V, Part)) 2387 return VectorLoopValueMap.getVectorValue(V, Part); 2388 2389 // If the value has not been vectorized, check if it has been scalarized 2390 // instead. If it has been scalarized, and we actually need the value in 2391 // vector form, we will construct the vector values on demand. 2392 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 2393 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0}); 2394 2395 // If we've scalarized a value, that value should be an instruction. 2396 auto *I = cast<Instruction>(V); 2397 2398 // If we aren't vectorizing, we can just copy the scalar map values over to 2399 // the vector map. 2400 if (VF.isScalar()) { 2401 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 2402 return ScalarValue; 2403 } 2404 2405 // Get the last scalar instruction we generated for V and Part. If the value 2406 // is known to be uniform after vectorization, this corresponds to lane zero 2407 // of the Part unroll iteration. Otherwise, the last instruction is the one 2408 // we created for the last vector lane of the Part unroll iteration. 2409 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) 2410 ? 0 2411 : VF.getKnownMinValue() - 1; 2412 assert((!VF.isScalable() || LastLane == 0) && 2413 "Scalable vectorization can't lead to any scalarized values."); 2414 auto *LastInst = cast<Instruction>( 2415 VectorLoopValueMap.getScalarValue(V, {Part, LastLane})); 2416 2417 // Set the insert point after the last scalarized instruction. This ensures 2418 // the insertelement sequence will directly follow the scalar definitions. 2419 auto OldIP = Builder.saveIP(); 2420 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2421 Builder.SetInsertPoint(&*NewIP); 2422 2423 // However, if we are vectorizing, we need to construct the vector values. 2424 // If the value is known to be uniform after vectorization, we can just 2425 // broadcast the scalar value corresponding to lane zero for each unroll 2426 // iteration. Otherwise, we construct the vector values using insertelement 2427 // instructions. Since the resulting vectors are stored in 2428 // VectorLoopValueMap, we will only generate the insertelements once. 2429 Value *VectorValue = nullptr; 2430 if (Cost->isUniformAfterVectorization(I, VF)) { 2431 VectorValue = getBroadcastInstrs(ScalarValue); 2432 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 2433 } else { 2434 // Initialize packing with insertelements to start from poison. 2435 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2436 Value *Poison = PoisonValue::get(VectorType::get(V->getType(), VF)); 2437 VectorLoopValueMap.setVectorValue(V, Part, Poison); 2438 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 2439 packScalarIntoVectorValue(V, {Part, Lane}); 2440 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 2441 } 2442 Builder.restoreIP(OldIP); 2443 return VectorValue; 2444 } 2445 2446 // If this scalar is unknown, assume that it is a constant or that it is 2447 // loop invariant. Broadcast V and save the value for future uses. 2448 Value *B = getBroadcastInstrs(V); 2449 VectorLoopValueMap.setVectorValue(V, Part, B); 2450 return B; 2451 } 2452 2453 Value * 2454 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 2455 const VPIteration &Instance) { 2456 // If the value is not an instruction contained in the loop, it should 2457 // already be scalar. 2458 if (OrigLoop->isLoopInvariant(V)) 2459 return V; 2460 2461 assert(Instance.Lane > 0 2462 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 2463 : true && "Uniform values only have lane zero"); 2464 2465 // If the value from the original loop has not been vectorized, it is 2466 // represented by UF x VF scalar values in the new loop. Return the requested 2467 // scalar value. 2468 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 2469 return VectorLoopValueMap.getScalarValue(V, Instance); 2470 2471 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2472 // for the given unroll part. If this entry is not a vector type (i.e., the 2473 // vectorization factor is one), there is no need to generate an 2474 // extractelement instruction. 2475 auto *U = getOrCreateVectorValue(V, Instance.Part); 2476 if (!U->getType()->isVectorTy()) { 2477 assert(VF.isScalar() && "Value not scalarized has non-vector type"); 2478 return U; 2479 } 2480 2481 // Otherwise, the value from the original loop has been vectorized and is 2482 // represented by UF vector values. Extract and return the requested scalar 2483 // value from the appropriate vector lane. 2484 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 2485 } 2486 2487 void InnerLoopVectorizer::packScalarIntoVectorValue( 2488 Value *V, const VPIteration &Instance) { 2489 assert(V != Induction && "The new induction variable should not be used."); 2490 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 2491 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2492 2493 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 2494 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 2495 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 2496 Builder.getInt32(Instance.Lane)); 2497 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 2498 } 2499 2500 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2501 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2502 assert(!VF.isScalable() && "Cannot reverse scalable vectors"); 2503 SmallVector<int, 8> ShuffleMask; 2504 for (unsigned i = 0; i < VF.getKnownMinValue(); ++i) 2505 ShuffleMask.push_back(VF.getKnownMinValue() - i - 1); 2506 2507 return Builder.CreateShuffleVector(Vec, ShuffleMask, "reverse"); 2508 } 2509 2510 // Return whether we allow using masked interleave-groups (for dealing with 2511 // strided loads/stores that reside in predicated blocks, or for dealing 2512 // with gaps). 2513 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2514 // If an override option has been passed in for interleaved accesses, use it. 2515 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2516 return EnableMaskedInterleavedMemAccesses; 2517 2518 return TTI.enableMaskedInterleavedAccessVectorization(); 2519 } 2520 2521 // Try to vectorize the interleave group that \p Instr belongs to. 2522 // 2523 // E.g. Translate following interleaved load group (factor = 3): 2524 // for (i = 0; i < N; i+=3) { 2525 // R = Pic[i]; // Member of index 0 2526 // G = Pic[i+1]; // Member of index 1 2527 // B = Pic[i+2]; // Member of index 2 2528 // ... // do something to R, G, B 2529 // } 2530 // To: 2531 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2532 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2533 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2534 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2535 // 2536 // Or translate following interleaved store group (factor = 3): 2537 // for (i = 0; i < N; i+=3) { 2538 // ... do something to R, G, B 2539 // Pic[i] = R; // Member of index 0 2540 // Pic[i+1] = G; // Member of index 1 2541 // Pic[i+2] = B; // Member of index 2 2542 // } 2543 // To: 2544 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2545 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2546 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2547 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2548 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2549 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2550 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2551 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2552 VPValue *BlockInMask) { 2553 Instruction *Instr = Group->getInsertPos(); 2554 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2555 2556 // Prepare for the vector type of the interleaved load/store. 2557 Type *ScalarTy = getMemInstValueType(Instr); 2558 unsigned InterleaveFactor = Group->getFactor(); 2559 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2560 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2561 2562 // Prepare for the new pointers. 2563 SmallVector<Value *, 2> AddrParts; 2564 unsigned Index = Group->getIndex(Instr); 2565 2566 // TODO: extend the masked interleaved-group support to reversed access. 2567 assert((!BlockInMask || !Group->isReverse()) && 2568 "Reversed masked interleave-group not supported."); 2569 2570 // If the group is reverse, adjust the index to refer to the last vector lane 2571 // instead of the first. We adjust the index from the first vector lane, 2572 // rather than directly getting the pointer for lane VF - 1, because the 2573 // pointer operand of the interleaved access is supposed to be uniform. For 2574 // uniform instructions, we're only required to generate a value for the 2575 // first vector lane in each unroll iteration. 2576 assert(!VF.isScalable() && 2577 "scalable vector reverse operation is not implemented"); 2578 if (Group->isReverse()) 2579 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2580 2581 for (unsigned Part = 0; Part < UF; Part++) { 2582 Value *AddrPart = State.get(Addr, {Part, 0}); 2583 setDebugLocFromInst(Builder, AddrPart); 2584 2585 // Notice current instruction could be any index. Need to adjust the address 2586 // to the member of index 0. 2587 // 2588 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2589 // b = A[i]; // Member of index 0 2590 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2591 // 2592 // E.g. A[i+1] = a; // Member of index 1 2593 // A[i] = b; // Member of index 0 2594 // A[i+2] = c; // Member of index 2 (Current instruction) 2595 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2596 2597 bool InBounds = false; 2598 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2599 InBounds = gep->isInBounds(); 2600 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2601 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2602 2603 // Cast to the vector pointer type. 2604 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2605 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2606 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2607 } 2608 2609 setDebugLocFromInst(Builder, Instr); 2610 Value *PoisonVec = PoisonValue::get(VecTy); 2611 2612 Value *MaskForGaps = nullptr; 2613 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2614 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2615 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2616 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2617 } 2618 2619 // Vectorize the interleaved load group. 2620 if (isa<LoadInst>(Instr)) { 2621 // For each unroll part, create a wide load for the group. 2622 SmallVector<Value *, 2> NewLoads; 2623 for (unsigned Part = 0; Part < UF; Part++) { 2624 Instruction *NewLoad; 2625 if (BlockInMask || MaskForGaps) { 2626 assert(useMaskedInterleavedAccesses(*TTI) && 2627 "masked interleaved groups are not allowed."); 2628 Value *GroupMask = MaskForGaps; 2629 if (BlockInMask) { 2630 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2631 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2632 Value *ShuffledMask = Builder.CreateShuffleVector( 2633 BlockInMaskPart, 2634 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2635 "interleaved.mask"); 2636 GroupMask = MaskForGaps 2637 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2638 MaskForGaps) 2639 : ShuffledMask; 2640 } 2641 NewLoad = 2642 Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(), 2643 GroupMask, PoisonVec, "wide.masked.vec"); 2644 } 2645 else 2646 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2647 Group->getAlign(), "wide.vec"); 2648 Group->addMetadata(NewLoad); 2649 NewLoads.push_back(NewLoad); 2650 } 2651 2652 // For each member in the group, shuffle out the appropriate data from the 2653 // wide loads. 2654 unsigned J = 0; 2655 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2656 Instruction *Member = Group->getMember(I); 2657 2658 // Skip the gaps in the group. 2659 if (!Member) 2660 continue; 2661 2662 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2663 auto StrideMask = 2664 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2665 for (unsigned Part = 0; Part < UF; Part++) { 2666 Value *StridedVec = Builder.CreateShuffleVector( 2667 NewLoads[Part], StrideMask, "strided.vec"); 2668 2669 // If this member has different type, cast the result type. 2670 if (Member->getType() != ScalarTy) { 2671 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2672 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2673 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2674 } 2675 2676 if (Group->isReverse()) 2677 StridedVec = reverseVector(StridedVec); 2678 2679 State.set(VPDefs[J], Member, StridedVec, Part); 2680 } 2681 ++J; 2682 } 2683 return; 2684 } 2685 2686 // The sub vector type for current instruction. 2687 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2688 auto *SubVT = VectorType::get(ScalarTy, VF); 2689 2690 // Vectorize the interleaved store group. 2691 for (unsigned Part = 0; Part < UF; Part++) { 2692 // Collect the stored vector from each member. 2693 SmallVector<Value *, 4> StoredVecs; 2694 for (unsigned i = 0; i < InterleaveFactor; i++) { 2695 // Interleaved store group doesn't allow a gap, so each index has a member 2696 assert(Group->getMember(i) && "Fail to get a member from an interleaved store group"); 2697 2698 Value *StoredVec = State.get(StoredValues[i], Part); 2699 2700 if (Group->isReverse()) 2701 StoredVec = reverseVector(StoredVec); 2702 2703 // If this member has different type, cast it to a unified type. 2704 2705 if (StoredVec->getType() != SubVT) 2706 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2707 2708 StoredVecs.push_back(StoredVec); 2709 } 2710 2711 // Concatenate all vectors into a wide vector. 2712 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2713 2714 // Interleave the elements in the wide vector. 2715 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2716 Value *IVec = Builder.CreateShuffleVector( 2717 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2718 "interleaved.vec"); 2719 2720 Instruction *NewStoreInstr; 2721 if (BlockInMask) { 2722 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2723 Value *ShuffledMask = Builder.CreateShuffleVector( 2724 BlockInMaskPart, 2725 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2726 "interleaved.mask"); 2727 NewStoreInstr = Builder.CreateMaskedStore( 2728 IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); 2729 } 2730 else 2731 NewStoreInstr = 2732 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2733 2734 Group->addMetadata(NewStoreInstr); 2735 } 2736 } 2737 2738 void InnerLoopVectorizer::vectorizeMemoryInstruction( 2739 Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, 2740 VPValue *StoredValue, VPValue *BlockInMask) { 2741 // Attempt to issue a wide load. 2742 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2743 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2744 2745 assert((LI || SI) && "Invalid Load/Store instruction"); 2746 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2747 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2748 2749 LoopVectorizationCostModel::InstWidening Decision = 2750 Cost->getWideningDecision(Instr, VF); 2751 assert((Decision == LoopVectorizationCostModel::CM_Widen || 2752 Decision == LoopVectorizationCostModel::CM_Widen_Reverse || 2753 Decision == LoopVectorizationCostModel::CM_GatherScatter) && 2754 "CM decision is not to widen the memory instruction"); 2755 2756 Type *ScalarDataTy = getMemInstValueType(Instr); 2757 2758 auto *DataTy = VectorType::get(ScalarDataTy, VF); 2759 const Align Alignment = getLoadStoreAlignment(Instr); 2760 2761 // Determine if the pointer operand of the access is either consecutive or 2762 // reverse consecutive. 2763 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2764 bool ConsecutiveStride = 2765 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2766 bool CreateGatherScatter = 2767 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2768 2769 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2770 // gather/scatter. Otherwise Decision should have been to Scalarize. 2771 assert((ConsecutiveStride || CreateGatherScatter) && 2772 "The instruction should be scalarized"); 2773 (void)ConsecutiveStride; 2774 2775 VectorParts BlockInMaskParts(UF); 2776 bool isMaskRequired = BlockInMask; 2777 if (isMaskRequired) 2778 for (unsigned Part = 0; Part < UF; ++Part) 2779 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2780 2781 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2782 // Calculate the pointer for the specific unroll-part. 2783 GetElementPtrInst *PartPtr = nullptr; 2784 2785 bool InBounds = false; 2786 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2787 InBounds = gep->isInBounds(); 2788 2789 if (Reverse) { 2790 assert(!VF.isScalable() && 2791 "Reversing vectors is not yet supported for scalable vectors."); 2792 2793 // If the address is consecutive but reversed, then the 2794 // wide store needs to start at the last vector element. 2795 PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP( 2796 ScalarDataTy, Ptr, Builder.getInt32(-Part * VF.getKnownMinValue()))); 2797 PartPtr->setIsInBounds(InBounds); 2798 PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP( 2799 ScalarDataTy, PartPtr, Builder.getInt32(1 - VF.getKnownMinValue()))); 2800 PartPtr->setIsInBounds(InBounds); 2801 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2802 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2803 } else { 2804 Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF); 2805 PartPtr = cast<GetElementPtrInst>( 2806 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 2807 PartPtr->setIsInBounds(InBounds); 2808 } 2809 2810 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2811 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2812 }; 2813 2814 // Handle Stores: 2815 if (SI) { 2816 setDebugLocFromInst(Builder, SI); 2817 2818 for (unsigned Part = 0; Part < UF; ++Part) { 2819 Instruction *NewSI = nullptr; 2820 Value *StoredVal = State.get(StoredValue, Part); 2821 if (CreateGatherScatter) { 2822 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2823 Value *VectorGep = State.get(Addr, Part); 2824 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2825 MaskPart); 2826 } else { 2827 if (Reverse) { 2828 // If we store to reverse consecutive memory locations, then we need 2829 // to reverse the order of elements in the stored value. 2830 StoredVal = reverseVector(StoredVal); 2831 // We don't want to update the value in the map as it might be used in 2832 // another expression. So don't call resetVectorValue(StoredVal). 2833 } 2834 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0})); 2835 if (isMaskRequired) 2836 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2837 BlockInMaskParts[Part]); 2838 else 2839 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2840 } 2841 addMetadata(NewSI, SI); 2842 } 2843 return; 2844 } 2845 2846 // Handle loads. 2847 assert(LI && "Must have a load instruction"); 2848 setDebugLocFromInst(Builder, LI); 2849 for (unsigned Part = 0; Part < UF; ++Part) { 2850 Value *NewLI; 2851 if (CreateGatherScatter) { 2852 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2853 Value *VectorGep = State.get(Addr, Part); 2854 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2855 nullptr, "wide.masked.gather"); 2856 addMetadata(NewLI, LI); 2857 } else { 2858 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0})); 2859 if (isMaskRequired) 2860 NewLI = Builder.CreateMaskedLoad( 2861 VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy), 2862 "wide.masked.load"); 2863 else 2864 NewLI = 2865 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2866 2867 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2868 addMetadata(NewLI, LI); 2869 if (Reverse) 2870 NewLI = reverseVector(NewLI); 2871 } 2872 2873 State.set(Def, Instr, NewLI, Part); 2874 } 2875 } 2876 2877 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPUser &User, 2878 const VPIteration &Instance, 2879 bool IfPredicateInstr, 2880 VPTransformState &State) { 2881 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2882 2883 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2884 // the first lane and part. 2885 if (auto *II = dyn_cast<IntrinsicInst>(Instr)) 2886 if (Instance.Lane != 0 || Instance.Part != 0) 2887 if (II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl) 2888 return; 2889 2890 setDebugLocFromInst(Builder, Instr); 2891 2892 // Does this instruction return a value ? 2893 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2894 2895 Instruction *Cloned = Instr->clone(); 2896 if (!IsVoidRetTy) 2897 Cloned->setName(Instr->getName() + ".cloned"); 2898 2899 // Replace the operands of the cloned instructions with their scalar 2900 // equivalents in the new loop. 2901 for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { 2902 auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); 2903 auto InputInstance = Instance; 2904 if (!Operand || !OrigLoop->contains(Operand) || 2905 (Cost->isUniformAfterVectorization(Operand, State.VF))) 2906 InputInstance.Lane = 0; 2907 auto *NewOp = State.get(User.getOperand(op), InputInstance); 2908 Cloned->setOperand(op, NewOp); 2909 } 2910 addNewMetadata(Cloned, Instr); 2911 2912 // Place the cloned scalar in the new loop. 2913 Builder.Insert(Cloned); 2914 2915 // TODO: Set result for VPValue of VPReciplicateRecipe. This requires 2916 // representing scalar values in VPTransformState. Add the cloned scalar to 2917 // the scalar map entry. 2918 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 2919 2920 // If we just cloned a new assumption, add it the assumption cache. 2921 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2922 if (II->getIntrinsicID() == Intrinsic::assume) 2923 AC->registerAssumption(II); 2924 2925 // End if-block. 2926 if (IfPredicateInstr) 2927 PredicatedInstructions.push_back(Cloned); 2928 } 2929 2930 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2931 Value *End, Value *Step, 2932 Instruction *DL) { 2933 BasicBlock *Header = L->getHeader(); 2934 BasicBlock *Latch = L->getLoopLatch(); 2935 // As we're just creating this loop, it's possible no latch exists 2936 // yet. If so, use the header as this will be a single block loop. 2937 if (!Latch) 2938 Latch = Header; 2939 2940 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2941 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2942 setDebugLocFromInst(Builder, OldInst); 2943 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2944 2945 Builder.SetInsertPoint(Latch->getTerminator()); 2946 setDebugLocFromInst(Builder, OldInst); 2947 2948 // Create i+1 and fill the PHINode. 2949 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2950 Induction->addIncoming(Start, L->getLoopPreheader()); 2951 Induction->addIncoming(Next, Latch); 2952 // Create the compare. 2953 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2954 Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 2955 2956 // Now we have two terminators. Remove the old one from the block. 2957 Latch->getTerminator()->eraseFromParent(); 2958 2959 return Induction; 2960 } 2961 2962 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2963 if (TripCount) 2964 return TripCount; 2965 2966 assert(L && "Create Trip Count for null loop."); 2967 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2968 // Find the loop boundaries. 2969 ScalarEvolution *SE = PSE.getSE(); 2970 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2971 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 2972 "Invalid loop count"); 2973 2974 Type *IdxTy = Legal->getWidestInductionType(); 2975 assert(IdxTy && "No type for induction"); 2976 2977 // The exit count might have the type of i64 while the phi is i32. This can 2978 // happen if we have an induction variable that is sign extended before the 2979 // compare. The only way that we get a backedge taken count is that the 2980 // induction variable was signed and as such will not overflow. In such a case 2981 // truncation is legal. 2982 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2983 IdxTy->getPrimitiveSizeInBits()) 2984 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2985 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2986 2987 // Get the total trip count from the count by adding 1. 2988 const SCEV *ExitCount = SE->getAddExpr( 2989 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2990 2991 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2992 2993 // Expand the trip count and place the new instructions in the preheader. 2994 // Notice that the pre-header does not change, only the loop body. 2995 SCEVExpander Exp(*SE, DL, "induction"); 2996 2997 // Count holds the overall loop count (N). 2998 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2999 L->getLoopPreheader()->getTerminator()); 3000 3001 if (TripCount->getType()->isPointerTy()) 3002 TripCount = 3003 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3004 L->getLoopPreheader()->getTerminator()); 3005 3006 return TripCount; 3007 } 3008 3009 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3010 if (VectorTripCount) 3011 return VectorTripCount; 3012 3013 Value *TC = getOrCreateTripCount(L); 3014 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3015 3016 Type *Ty = TC->getType(); 3017 // This is where we can make the step a runtime constant. 3018 Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF); 3019 3020 // If the tail is to be folded by masking, round the number of iterations N 3021 // up to a multiple of Step instead of rounding down. This is done by first 3022 // adding Step-1 and then rounding down. Note that it's ok if this addition 3023 // overflows: the vector induction variable will eventually wrap to zero given 3024 // that it starts at zero and its Step is a power of two; the loop will then 3025 // exit, with the last early-exit vector comparison also producing all-true. 3026 if (Cost->foldTailByMasking()) { 3027 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3028 "VF*UF must be a power of 2 when folding tail by masking"); 3029 assert(!VF.isScalable() && 3030 "Tail folding not yet supported for scalable vectors"); 3031 TC = Builder.CreateAdd( 3032 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 3033 } 3034 3035 // Now we need to generate the expression for the part of the loop that the 3036 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3037 // iterations are not required for correctness, or N - Step, otherwise. Step 3038 // is equal to the vectorization factor (number of SIMD elements) times the 3039 // unroll factor (number of SIMD instructions). 3040 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3041 3042 // There are two cases where we need to ensure (at least) the last iteration 3043 // runs in the scalar remainder loop. Thus, if the step evenly divides 3044 // the trip count, we set the remainder to be equal to the step. If the step 3045 // does not evenly divide the trip count, no adjustment is necessary since 3046 // there will already be scalar iterations. Note that the minimum iterations 3047 // check ensures that N >= Step. The cases are: 3048 // 1) If there is a non-reversed interleaved group that may speculatively 3049 // access memory out-of-bounds. 3050 // 2) If any instruction may follow a conditionally taken exit. That is, if 3051 // the loop contains multiple exiting blocks, or a single exiting block 3052 // which is not the latch. 3053 if (VF.isVector() && Cost->requiresScalarEpilogue()) { 3054 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3055 R = Builder.CreateSelect(IsZero, Step, R); 3056 } 3057 3058 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3059 3060 return VectorTripCount; 3061 } 3062 3063 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3064 const DataLayout &DL) { 3065 // Verify that V is a vector type with same number of elements as DstVTy. 3066 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3067 unsigned VF = DstFVTy->getNumElements(); 3068 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3069 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3070 Type *SrcElemTy = SrcVecTy->getElementType(); 3071 Type *DstElemTy = DstFVTy->getElementType(); 3072 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3073 "Vector elements must have same size"); 3074 3075 // Do a direct cast if element types are castable. 3076 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3077 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3078 } 3079 // V cannot be directly casted to desired vector type. 3080 // May happen when V is a floating point vector but DstVTy is a vector of 3081 // pointers or vice-versa. Handle this using a two-step bitcast using an 3082 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3083 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3084 "Only one type should be a pointer type"); 3085 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3086 "Only one type should be a floating point type"); 3087 Type *IntTy = 3088 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3089 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3090 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3091 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3092 } 3093 3094 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3095 BasicBlock *Bypass) { 3096 Value *Count = getOrCreateTripCount(L); 3097 // Reuse existing vector loop preheader for TC checks. 3098 // Note that new preheader block is generated for vector loop. 3099 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3100 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3101 3102 // Generate code to check if the loop's trip count is less than VF * UF, or 3103 // equal to it in case a scalar epilogue is required; this implies that the 3104 // vector trip count is zero. This check also covers the case where adding one 3105 // to the backedge-taken count overflowed leading to an incorrect trip count 3106 // of zero. In this case we will also jump to the scalar loop. 3107 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 3108 : ICmpInst::ICMP_ULT; 3109 3110 // If tail is to be folded, vector loop takes care of all iterations. 3111 Value *CheckMinIters = Builder.getFalse(); 3112 if (!Cost->foldTailByMasking()) { 3113 Value *Step = 3114 createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF); 3115 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3116 } 3117 // Create new preheader for vector loop. 3118 LoopVectorPreHeader = 3119 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3120 "vector.ph"); 3121 3122 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3123 DT->getNode(Bypass)->getIDom()) && 3124 "TC check is expected to dominate Bypass"); 3125 3126 // Update dominator for Bypass & LoopExit. 3127 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3128 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3129 3130 ReplaceInstWithInst( 3131 TCCheckBlock->getTerminator(), 3132 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3133 LoopBypassBlocks.push_back(TCCheckBlock); 3134 } 3135 3136 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3137 // Reuse existing vector loop preheader for SCEV checks. 3138 // Note that new preheader block is generated for vector loop. 3139 BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader; 3140 3141 // Generate the code to check that the SCEV assumptions that we made. 3142 // We want the new basic block to start at the first instruction in a 3143 // sequence of instructions that form a check. 3144 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3145 "scev.check"); 3146 Value *SCEVCheck = Exp.expandCodeForPredicate( 3147 &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator()); 3148 3149 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3150 if (C->isZero()) 3151 return; 3152 3153 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3154 (OptForSizeBasedOnProfile && 3155 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3156 "Cannot SCEV check stride or overflow when optimizing for size"); 3157 3158 SCEVCheckBlock->setName("vector.scevcheck"); 3159 // Create new preheader for vector loop. 3160 LoopVectorPreHeader = 3161 SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI, 3162 nullptr, "vector.ph"); 3163 3164 // Update dominator only if this is first RT check. 3165 if (LoopBypassBlocks.empty()) { 3166 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3167 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3168 } 3169 3170 ReplaceInstWithInst( 3171 SCEVCheckBlock->getTerminator(), 3172 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck)); 3173 LoopBypassBlocks.push_back(SCEVCheckBlock); 3174 AddedSafetyChecks = true; 3175 } 3176 3177 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3178 // VPlan-native path does not do any analysis for runtime checks currently. 3179 if (EnableVPlanNativePath) 3180 return; 3181 3182 // Reuse existing vector loop preheader for runtime memory checks. 3183 // Note that new preheader block is generated for vector loop. 3184 BasicBlock *const MemCheckBlock = L->getLoopPreheader(); 3185 3186 // Generate the code that checks in runtime if arrays overlap. We put the 3187 // checks into a separate block to make the more common case of few elements 3188 // faster. 3189 auto *LAI = Legal->getLAI(); 3190 const auto &RtPtrChecking = *LAI->getRuntimePointerChecking(); 3191 if (!RtPtrChecking.Need) 3192 return; 3193 3194 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3195 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3196 "Cannot emit memory checks when optimizing for size, unless forced " 3197 "to vectorize."); 3198 ORE->emit([&]() { 3199 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3200 L->getStartLoc(), L->getHeader()) 3201 << "Code-size may be reduced by not forcing " 3202 "vectorization, or by source-code modifications " 3203 "eliminating the need for runtime checks " 3204 "(e.g., adding 'restrict')."; 3205 }); 3206 } 3207 3208 MemCheckBlock->setName("vector.memcheck"); 3209 // Create new preheader for vector loop. 3210 LoopVectorPreHeader = 3211 SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr, 3212 "vector.ph"); 3213 3214 auto *CondBranch = cast<BranchInst>( 3215 Builder.CreateCondBr(Builder.getTrue(), Bypass, LoopVectorPreHeader)); 3216 ReplaceInstWithInst(MemCheckBlock->getTerminator(), CondBranch); 3217 LoopBypassBlocks.push_back(MemCheckBlock); 3218 AddedSafetyChecks = true; 3219 3220 // Update dominator only if this is first RT check. 3221 if (LoopBypassBlocks.empty()) { 3222 DT->changeImmediateDominator(Bypass, MemCheckBlock); 3223 DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock); 3224 } 3225 3226 Instruction *FirstCheckInst; 3227 Instruction *MemRuntimeCheck; 3228 std::tie(FirstCheckInst, MemRuntimeCheck) = 3229 addRuntimeChecks(MemCheckBlock->getTerminator(), OrigLoop, 3230 RtPtrChecking.getChecks(), RtPtrChecking.getSE()); 3231 assert(MemRuntimeCheck && "no RT checks generated although RtPtrChecking " 3232 "claimed checks are required"); 3233 CondBranch->setCondition(MemRuntimeCheck); 3234 3235 // We currently don't use LoopVersioning for the actual loop cloning but we 3236 // still use it to add the noalias metadata. 3237 LVer = std::make_unique<LoopVersioning>( 3238 *Legal->getLAI(), 3239 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3240 DT, PSE.getSE()); 3241 LVer->prepareNoAliasMetadata(); 3242 } 3243 3244 Value *InnerLoopVectorizer::emitTransformedIndex( 3245 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3246 const InductionDescriptor &ID) const { 3247 3248 SCEVExpander Exp(*SE, DL, "induction"); 3249 auto Step = ID.getStep(); 3250 auto StartValue = ID.getStartValue(); 3251 assert(Index->getType() == Step->getType() && 3252 "Index type does not match StepValue type"); 3253 3254 // Note: the IR at this point is broken. We cannot use SE to create any new 3255 // SCEV and then expand it, hoping that SCEV's simplification will give us 3256 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3257 // lead to various SCEV crashes. So all we can do is to use builder and rely 3258 // on InstCombine for future simplifications. Here we handle some trivial 3259 // cases only. 3260 auto CreateAdd = [&B](Value *X, Value *Y) { 3261 assert(X->getType() == Y->getType() && "Types don't match!"); 3262 if (auto *CX = dyn_cast<ConstantInt>(X)) 3263 if (CX->isZero()) 3264 return Y; 3265 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3266 if (CY->isZero()) 3267 return X; 3268 return B.CreateAdd(X, Y); 3269 }; 3270 3271 auto CreateMul = [&B](Value *X, Value *Y) { 3272 assert(X->getType() == Y->getType() && "Types don't match!"); 3273 if (auto *CX = dyn_cast<ConstantInt>(X)) 3274 if (CX->isOne()) 3275 return Y; 3276 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3277 if (CY->isOne()) 3278 return X; 3279 return B.CreateMul(X, Y); 3280 }; 3281 3282 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3283 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3284 // the DomTree is not kept up-to-date for additional blocks generated in the 3285 // vector loop. By using the header as insertion point, we guarantee that the 3286 // expanded instructions dominate all their uses. 3287 auto GetInsertPoint = [this, &B]() { 3288 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3289 if (InsertBB != LoopVectorBody && 3290 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3291 return LoopVectorBody->getTerminator(); 3292 return &*B.GetInsertPoint(); 3293 }; 3294 switch (ID.getKind()) { 3295 case InductionDescriptor::IK_IntInduction: { 3296 assert(Index->getType() == StartValue->getType() && 3297 "Index type does not match StartValue type"); 3298 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3299 return B.CreateSub(StartValue, Index); 3300 auto *Offset = CreateMul( 3301 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3302 return CreateAdd(StartValue, Offset); 3303 } 3304 case InductionDescriptor::IK_PtrInduction: { 3305 assert(isa<SCEVConstant>(Step) && 3306 "Expected constant step for pointer induction"); 3307 return B.CreateGEP( 3308 StartValue->getType()->getPointerElementType(), StartValue, 3309 CreateMul(Index, 3310 Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()))); 3311 } 3312 case InductionDescriptor::IK_FpInduction: { 3313 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3314 auto InductionBinOp = ID.getInductionBinOp(); 3315 assert(InductionBinOp && 3316 (InductionBinOp->getOpcode() == Instruction::FAdd || 3317 InductionBinOp->getOpcode() == Instruction::FSub) && 3318 "Original bin op should be defined for FP induction"); 3319 3320 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3321 3322 // Floating point operations had to be 'fast' to enable the induction. 3323 FastMathFlags Flags; 3324 Flags.setFast(); 3325 3326 Value *MulExp = B.CreateFMul(StepValue, Index); 3327 if (isa<Instruction>(MulExp)) 3328 // We have to check, the MulExp may be a constant. 3329 cast<Instruction>(MulExp)->setFastMathFlags(Flags); 3330 3331 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3332 "induction"); 3333 if (isa<Instruction>(BOp)) 3334 cast<Instruction>(BOp)->setFastMathFlags(Flags); 3335 3336 return BOp; 3337 } 3338 case InductionDescriptor::IK_NoInduction: 3339 return nullptr; 3340 } 3341 llvm_unreachable("invalid enum"); 3342 } 3343 3344 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3345 LoopScalarBody = OrigLoop->getHeader(); 3346 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3347 LoopExitBlock = OrigLoop->getUniqueExitBlock(); 3348 assert(LoopExitBlock && "Must have an exit block"); 3349 assert(LoopVectorPreHeader && "Invalid loop structure"); 3350 3351 LoopMiddleBlock = 3352 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3353 LI, nullptr, Twine(Prefix) + "middle.block"); 3354 LoopScalarPreHeader = 3355 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3356 nullptr, Twine(Prefix) + "scalar.ph"); 3357 3358 // Set up branch from middle block to the exit and scalar preheader blocks. 3359 // completeLoopSkeleton will update the condition to use an iteration check, 3360 // if required to decide whether to execute the remainder. 3361 BranchInst *BrInst = 3362 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue()); 3363 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3364 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3365 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3366 3367 // We intentionally don't let SplitBlock to update LoopInfo since 3368 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3369 // LoopVectorBody is explicitly added to the correct place few lines later. 3370 LoopVectorBody = 3371 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3372 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3373 3374 // Update dominator for loop exit. 3375 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3376 3377 // Create and register the new vector loop. 3378 Loop *Lp = LI->AllocateLoop(); 3379 Loop *ParentLoop = OrigLoop->getParentLoop(); 3380 3381 // Insert the new loop into the loop nest and register the new basic blocks 3382 // before calling any utilities such as SCEV that require valid LoopInfo. 3383 if (ParentLoop) { 3384 ParentLoop->addChildLoop(Lp); 3385 } else { 3386 LI->addTopLevelLoop(Lp); 3387 } 3388 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3389 return Lp; 3390 } 3391 3392 void InnerLoopVectorizer::createInductionResumeValues( 3393 Loop *L, Value *VectorTripCount, 3394 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3395 assert(VectorTripCount && L && "Expected valid arguments"); 3396 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3397 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3398 "Inconsistent information about additional bypass."); 3399 // We are going to resume the execution of the scalar loop. 3400 // Go over all of the induction variables that we found and fix the 3401 // PHIs that are left in the scalar version of the loop. 3402 // The starting values of PHI nodes depend on the counter of the last 3403 // iteration in the vectorized loop. 3404 // If we come from a bypass edge then we need to start from the original 3405 // start value. 3406 for (auto &InductionEntry : Legal->getInductionVars()) { 3407 PHINode *OrigPhi = InductionEntry.first; 3408 InductionDescriptor II = InductionEntry.second; 3409 3410 // Create phi nodes to merge from the backedge-taken check block. 3411 PHINode *BCResumeVal = 3412 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3413 LoopScalarPreHeader->getTerminator()); 3414 // Copy original phi DL over to the new one. 3415 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3416 Value *&EndValue = IVEndValues[OrigPhi]; 3417 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3418 if (OrigPhi == OldInduction) { 3419 // We know what the end value is. 3420 EndValue = VectorTripCount; 3421 } else { 3422 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3423 Type *StepType = II.getStep()->getType(); 3424 Instruction::CastOps CastOp = 3425 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3426 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3427 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3428 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3429 EndValue->setName("ind.end"); 3430 3431 // Compute the end value for the additional bypass (if applicable). 3432 if (AdditionalBypass.first) { 3433 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3434 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3435 StepType, true); 3436 CRD = 3437 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3438 EndValueFromAdditionalBypass = 3439 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3440 EndValueFromAdditionalBypass->setName("ind.end"); 3441 } 3442 } 3443 // The new PHI merges the original incoming value, in case of a bypass, 3444 // or the value at the end of the vectorized loop. 3445 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3446 3447 // Fix the scalar body counter (PHI node). 3448 // The old induction's phi node in the scalar body needs the truncated 3449 // value. 3450 for (BasicBlock *BB : LoopBypassBlocks) 3451 BCResumeVal->addIncoming(II.getStartValue(), BB); 3452 3453 if (AdditionalBypass.first) 3454 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3455 EndValueFromAdditionalBypass); 3456 3457 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3458 } 3459 } 3460 3461 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3462 MDNode *OrigLoopID) { 3463 assert(L && "Expected valid loop."); 3464 3465 // The trip counts should be cached by now. 3466 Value *Count = getOrCreateTripCount(L); 3467 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3468 3469 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3470 3471 // Add a check in the middle block to see if we have completed 3472 // all of the iterations in the first vector loop. 3473 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3474 // If tail is to be folded, we know we don't need to run the remainder. 3475 if (!Cost->foldTailByMasking()) { 3476 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3477 Count, VectorTripCount, "cmp.n", 3478 LoopMiddleBlock->getTerminator()); 3479 3480 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3481 // of the corresponding compare because they may have ended up with 3482 // different line numbers and we want to avoid awkward line stepping while 3483 // debugging. Eg. if the compare has got a line number inside the loop. 3484 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3485 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3486 } 3487 3488 // Get ready to start creating new instructions into the vectorized body. 3489 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3490 "Inconsistent vector loop preheader"); 3491 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3492 3493 Optional<MDNode *> VectorizedLoopID = 3494 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3495 LLVMLoopVectorizeFollowupVectorized}); 3496 if (VectorizedLoopID.hasValue()) { 3497 L->setLoopID(VectorizedLoopID.getValue()); 3498 3499 // Do not setAlreadyVectorized if loop attributes have been defined 3500 // explicitly. 3501 return LoopVectorPreHeader; 3502 } 3503 3504 // Keep all loop hints from the original loop on the vector loop (we'll 3505 // replace the vectorizer-specific hints below). 3506 if (MDNode *LID = OrigLoop->getLoopID()) 3507 L->setLoopID(LID); 3508 3509 LoopVectorizeHints Hints(L, true, *ORE); 3510 Hints.setAlreadyVectorized(); 3511 3512 #ifdef EXPENSIVE_CHECKS 3513 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3514 LI->verify(*DT); 3515 #endif 3516 3517 return LoopVectorPreHeader; 3518 } 3519 3520 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3521 /* 3522 In this function we generate a new loop. The new loop will contain 3523 the vectorized instructions while the old loop will continue to run the 3524 scalar remainder. 3525 3526 [ ] <-- loop iteration number check. 3527 / | 3528 / v 3529 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3530 | / | 3531 | / v 3532 || [ ] <-- vector pre header. 3533 |/ | 3534 | v 3535 | [ ] \ 3536 | [ ]_| <-- vector loop. 3537 | | 3538 | v 3539 | -[ ] <--- middle-block. 3540 | / | 3541 | / v 3542 -|- >[ ] <--- new preheader. 3543 | | 3544 | v 3545 | [ ] \ 3546 | [ ]_| <-- old scalar loop to handle remainder. 3547 \ | 3548 \ v 3549 >[ ] <-- exit block. 3550 ... 3551 */ 3552 3553 // Get the metadata of the original loop before it gets modified. 3554 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3555 3556 // Create an empty vector loop, and prepare basic blocks for the runtime 3557 // checks. 3558 Loop *Lp = createVectorLoopSkeleton(""); 3559 3560 // Now, compare the new count to zero. If it is zero skip the vector loop and 3561 // jump to the scalar loop. This check also covers the case where the 3562 // backedge-taken count is uint##_max: adding one to it will overflow leading 3563 // to an incorrect trip count of zero. In this (rare) case we will also jump 3564 // to the scalar loop. 3565 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3566 3567 // Generate the code to check any assumptions that we've made for SCEV 3568 // expressions. 3569 emitSCEVChecks(Lp, LoopScalarPreHeader); 3570 3571 // Generate the code that checks in runtime if arrays overlap. We put the 3572 // checks into a separate block to make the more common case of few elements 3573 // faster. 3574 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3575 3576 // Some loops have a single integer induction variable, while other loops 3577 // don't. One example is c++ iterators that often have multiple pointer 3578 // induction variables. In the code below we also support a case where we 3579 // don't have a single induction variable. 3580 // 3581 // We try to obtain an induction variable from the original loop as hard 3582 // as possible. However if we don't find one that: 3583 // - is an integer 3584 // - counts from zero, stepping by one 3585 // - is the size of the widest induction variable type 3586 // then we create a new one. 3587 OldInduction = Legal->getPrimaryInduction(); 3588 Type *IdxTy = Legal->getWidestInductionType(); 3589 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3590 // The loop step is equal to the vectorization factor (num of SIMD elements) 3591 // times the unroll factor (num of SIMD instructions). 3592 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3593 Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF); 3594 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3595 Induction = 3596 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3597 getDebugLocFromInstOrOperands(OldInduction)); 3598 3599 // Emit phis for the new starting index of the scalar loop. 3600 createInductionResumeValues(Lp, CountRoundDown); 3601 3602 return completeLoopSkeleton(Lp, OrigLoopID); 3603 } 3604 3605 // Fix up external users of the induction variable. At this point, we are 3606 // in LCSSA form, with all external PHIs that use the IV having one input value, 3607 // coming from the remainder loop. We need those PHIs to also have a correct 3608 // value for the IV when arriving directly from the middle block. 3609 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3610 const InductionDescriptor &II, 3611 Value *CountRoundDown, Value *EndValue, 3612 BasicBlock *MiddleBlock) { 3613 // There are two kinds of external IV usages - those that use the value 3614 // computed in the last iteration (the PHI) and those that use the penultimate 3615 // value (the value that feeds into the phi from the loop latch). 3616 // We allow both, but they, obviously, have different values. 3617 3618 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3619 3620 DenseMap<Value *, Value *> MissingVals; 3621 3622 // An external user of the last iteration's value should see the value that 3623 // the remainder loop uses to initialize its own IV. 3624 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3625 for (User *U : PostInc->users()) { 3626 Instruction *UI = cast<Instruction>(U); 3627 if (!OrigLoop->contains(UI)) { 3628 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3629 MissingVals[UI] = EndValue; 3630 } 3631 } 3632 3633 // An external user of the penultimate value need to see EndValue - Step. 3634 // The simplest way to get this is to recompute it from the constituent SCEVs, 3635 // that is Start + (Step * (CRD - 1)). 3636 for (User *U : OrigPhi->users()) { 3637 auto *UI = cast<Instruction>(U); 3638 if (!OrigLoop->contains(UI)) { 3639 const DataLayout &DL = 3640 OrigLoop->getHeader()->getModule()->getDataLayout(); 3641 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3642 3643 IRBuilder<> B(MiddleBlock->getTerminator()); 3644 Value *CountMinusOne = B.CreateSub( 3645 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3646 Value *CMO = 3647 !II.getStep()->getType()->isIntegerTy() 3648 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3649 II.getStep()->getType()) 3650 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3651 CMO->setName("cast.cmo"); 3652 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3653 Escape->setName("ind.escape"); 3654 MissingVals[UI] = Escape; 3655 } 3656 } 3657 3658 for (auto &I : MissingVals) { 3659 PHINode *PHI = cast<PHINode>(I.first); 3660 // One corner case we have to handle is two IVs "chasing" each-other, 3661 // that is %IV2 = phi [...], [ %IV1, %latch ] 3662 // In this case, if IV1 has an external use, we need to avoid adding both 3663 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3664 // don't already have an incoming value for the middle block. 3665 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3666 PHI->addIncoming(I.second, MiddleBlock); 3667 } 3668 } 3669 3670 namespace { 3671 3672 struct CSEDenseMapInfo { 3673 static bool canHandle(const Instruction *I) { 3674 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3675 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3676 } 3677 3678 static inline Instruction *getEmptyKey() { 3679 return DenseMapInfo<Instruction *>::getEmptyKey(); 3680 } 3681 3682 static inline Instruction *getTombstoneKey() { 3683 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3684 } 3685 3686 static unsigned getHashValue(const Instruction *I) { 3687 assert(canHandle(I) && "Unknown instruction!"); 3688 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3689 I->value_op_end())); 3690 } 3691 3692 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3693 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3694 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3695 return LHS == RHS; 3696 return LHS->isIdenticalTo(RHS); 3697 } 3698 }; 3699 3700 } // end anonymous namespace 3701 3702 ///Perform cse of induction variable instructions. 3703 static void cse(BasicBlock *BB) { 3704 // Perform simple cse. 3705 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3706 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3707 Instruction *In = &*I++; 3708 3709 if (!CSEDenseMapInfo::canHandle(In)) 3710 continue; 3711 3712 // Check if we can replace this instruction with any of the 3713 // visited instructions. 3714 if (Instruction *V = CSEMap.lookup(In)) { 3715 In->replaceAllUsesWith(V); 3716 In->eraseFromParent(); 3717 continue; 3718 } 3719 3720 CSEMap[In] = In; 3721 } 3722 } 3723 3724 InstructionCost 3725 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3726 bool &NeedToScalarize) { 3727 assert(!VF.isScalable() && "scalable vectors not yet supported."); 3728 Function *F = CI->getCalledFunction(); 3729 Type *ScalarRetTy = CI->getType(); 3730 SmallVector<Type *, 4> Tys, ScalarTys; 3731 for (auto &ArgOp : CI->arg_operands()) 3732 ScalarTys.push_back(ArgOp->getType()); 3733 3734 // Estimate cost of scalarized vector call. The source operands are assumed 3735 // to be vectors, so we need to extract individual elements from there, 3736 // execute VF scalar calls, and then gather the result into the vector return 3737 // value. 3738 InstructionCost ScalarCallCost = 3739 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3740 if (VF.isScalar()) 3741 return ScalarCallCost; 3742 3743 // Compute corresponding vector type for return value and arguments. 3744 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3745 for (Type *ScalarTy : ScalarTys) 3746 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3747 3748 // Compute costs of unpacking argument values for the scalar calls and 3749 // packing the return values to a vector. 3750 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3751 3752 InstructionCost Cost = 3753 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3754 3755 // If we can't emit a vector call for this function, then the currently found 3756 // cost is the cost we need to return. 3757 NeedToScalarize = true; 3758 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3759 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3760 3761 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3762 return Cost; 3763 3764 // If the corresponding vector cost is cheaper, return its cost. 3765 InstructionCost VectorCallCost = 3766 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3767 if (VectorCallCost < Cost) { 3768 NeedToScalarize = false; 3769 Cost = VectorCallCost; 3770 } 3771 return Cost; 3772 } 3773 3774 InstructionCost 3775 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3776 ElementCount VF) { 3777 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3778 assert(ID && "Expected intrinsic call!"); 3779 3780 IntrinsicCostAttributes CostAttrs(ID, *CI, VF); 3781 return TTI.getIntrinsicInstrCost(CostAttrs, 3782 TargetTransformInfo::TCK_RecipThroughput); 3783 } 3784 3785 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3786 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3787 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3788 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3789 } 3790 3791 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3792 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3793 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3794 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3795 } 3796 3797 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3798 // For every instruction `I` in MinBWs, truncate the operands, create a 3799 // truncated version of `I` and reextend its result. InstCombine runs 3800 // later and will remove any ext/trunc pairs. 3801 SmallPtrSet<Value *, 4> Erased; 3802 for (const auto &KV : Cost->getMinimalBitwidths()) { 3803 // If the value wasn't vectorized, we must maintain the original scalar 3804 // type. The absence of the value from VectorLoopValueMap indicates that it 3805 // wasn't vectorized. 3806 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3807 continue; 3808 for (unsigned Part = 0; Part < UF; ++Part) { 3809 Value *I = getOrCreateVectorValue(KV.first, Part); 3810 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3811 continue; 3812 Type *OriginalTy = I->getType(); 3813 Type *ScalarTruncatedTy = 3814 IntegerType::get(OriginalTy->getContext(), KV.second); 3815 auto *TruncatedTy = FixedVectorType::get( 3816 ScalarTruncatedTy, 3817 cast<FixedVectorType>(OriginalTy)->getNumElements()); 3818 if (TruncatedTy == OriginalTy) 3819 continue; 3820 3821 IRBuilder<> B(cast<Instruction>(I)); 3822 auto ShrinkOperand = [&](Value *V) -> Value * { 3823 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3824 if (ZI->getSrcTy() == TruncatedTy) 3825 return ZI->getOperand(0); 3826 return B.CreateZExtOrTrunc(V, TruncatedTy); 3827 }; 3828 3829 // The actual instruction modification depends on the instruction type, 3830 // unfortunately. 3831 Value *NewI = nullptr; 3832 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3833 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3834 ShrinkOperand(BO->getOperand(1))); 3835 3836 // Any wrapping introduced by shrinking this operation shouldn't be 3837 // considered undefined behavior. So, we can't unconditionally copy 3838 // arithmetic wrapping flags to NewI. 3839 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3840 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3841 NewI = 3842 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3843 ShrinkOperand(CI->getOperand(1))); 3844 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3845 NewI = B.CreateSelect(SI->getCondition(), 3846 ShrinkOperand(SI->getTrueValue()), 3847 ShrinkOperand(SI->getFalseValue())); 3848 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3849 switch (CI->getOpcode()) { 3850 default: 3851 llvm_unreachable("Unhandled cast!"); 3852 case Instruction::Trunc: 3853 NewI = ShrinkOperand(CI->getOperand(0)); 3854 break; 3855 case Instruction::SExt: 3856 NewI = B.CreateSExtOrTrunc( 3857 CI->getOperand(0), 3858 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3859 break; 3860 case Instruction::ZExt: 3861 NewI = B.CreateZExtOrTrunc( 3862 CI->getOperand(0), 3863 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3864 break; 3865 } 3866 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3867 auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType()) 3868 ->getNumElements(); 3869 auto *O0 = B.CreateZExtOrTrunc( 3870 SI->getOperand(0), 3871 FixedVectorType::get(ScalarTruncatedTy, Elements0)); 3872 auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType()) 3873 ->getNumElements(); 3874 auto *O1 = B.CreateZExtOrTrunc( 3875 SI->getOperand(1), 3876 FixedVectorType::get(ScalarTruncatedTy, Elements1)); 3877 3878 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3879 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3880 // Don't do anything with the operands, just extend the result. 3881 continue; 3882 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3883 auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType()) 3884 ->getNumElements(); 3885 auto *O0 = B.CreateZExtOrTrunc( 3886 IE->getOperand(0), 3887 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3888 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3889 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3890 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3891 auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType()) 3892 ->getNumElements(); 3893 auto *O0 = B.CreateZExtOrTrunc( 3894 EE->getOperand(0), 3895 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3896 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3897 } else { 3898 // If we don't know what to do, be conservative and don't do anything. 3899 continue; 3900 } 3901 3902 // Lastly, extend the result. 3903 NewI->takeName(cast<Instruction>(I)); 3904 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3905 I->replaceAllUsesWith(Res); 3906 cast<Instruction>(I)->eraseFromParent(); 3907 Erased.insert(I); 3908 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3909 } 3910 } 3911 3912 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3913 for (const auto &KV : Cost->getMinimalBitwidths()) { 3914 // If the value wasn't vectorized, we must maintain the original scalar 3915 // type. The absence of the value from VectorLoopValueMap indicates that it 3916 // wasn't vectorized. 3917 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3918 continue; 3919 for (unsigned Part = 0; Part < UF; ++Part) { 3920 Value *I = getOrCreateVectorValue(KV.first, Part); 3921 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3922 if (Inst && Inst->use_empty()) { 3923 Value *NewI = Inst->getOperand(0); 3924 Inst->eraseFromParent(); 3925 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3926 } 3927 } 3928 } 3929 } 3930 3931 void InnerLoopVectorizer::fixVectorizedLoop() { 3932 // Insert truncates and extends for any truncated instructions as hints to 3933 // InstCombine. 3934 if (VF.isVector()) 3935 truncateToMinimalBitwidths(); 3936 3937 // Fix widened non-induction PHIs by setting up the PHI operands. 3938 if (OrigPHIsToFix.size()) { 3939 assert(EnableVPlanNativePath && 3940 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3941 fixNonInductionPHIs(); 3942 } 3943 3944 // At this point every instruction in the original loop is widened to a 3945 // vector form. Now we need to fix the recurrences in the loop. These PHI 3946 // nodes are currently empty because we did not want to introduce cycles. 3947 // This is the second stage of vectorizing recurrences. 3948 fixCrossIterationPHIs(); 3949 3950 // Forget the original basic block. 3951 PSE.getSE()->forgetLoop(OrigLoop); 3952 3953 // Fix-up external users of the induction variables. 3954 for (auto &Entry : Legal->getInductionVars()) 3955 fixupIVUsers(Entry.first, Entry.second, 3956 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3957 IVEndValues[Entry.first], LoopMiddleBlock); 3958 3959 fixLCSSAPHIs(); 3960 for (Instruction *PI : PredicatedInstructions) 3961 sinkScalarOperands(&*PI); 3962 3963 // Remove redundant induction instructions. 3964 cse(LoopVectorBody); 3965 3966 // Set/update profile weights for the vector and remainder loops as original 3967 // loop iterations are now distributed among them. Note that original loop 3968 // represented by LoopScalarBody becomes remainder loop after vectorization. 3969 // 3970 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3971 // end up getting slightly roughened result but that should be OK since 3972 // profile is not inherently precise anyway. Note also possible bypass of 3973 // vector code caused by legality checks is ignored, assigning all the weight 3974 // to the vector loop, optimistically. 3975 // 3976 // For scalable vectorization we can't know at compile time how many iterations 3977 // of the loop are handled in one vector iteration, so instead assume a pessimistic 3978 // vscale of '1'. 3979 setProfileInfoAfterUnrolling( 3980 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 3981 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 3982 } 3983 3984 void InnerLoopVectorizer::fixCrossIterationPHIs() { 3985 // In order to support recurrences we need to be able to vectorize Phi nodes. 3986 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3987 // stage #2: We now need to fix the recurrences by adding incoming edges to 3988 // the currently empty PHI nodes. At this point every instruction in the 3989 // original loop is widened to a vector form so we can use them to construct 3990 // the incoming edges. 3991 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 3992 // Handle first-order recurrences and reductions that need to be fixed. 3993 if (Legal->isFirstOrderRecurrence(&Phi)) 3994 fixFirstOrderRecurrence(&Phi); 3995 else if (Legal->isReductionVariable(&Phi)) 3996 fixReduction(&Phi); 3997 } 3998 } 3999 4000 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 4001 // This is the second phase of vectorizing first-order recurrences. An 4002 // overview of the transformation is described below. Suppose we have the 4003 // following loop. 4004 // 4005 // for (int i = 0; i < n; ++i) 4006 // b[i] = a[i] - a[i - 1]; 4007 // 4008 // There is a first-order recurrence on "a". For this loop, the shorthand 4009 // scalar IR looks like: 4010 // 4011 // scalar.ph: 4012 // s_init = a[-1] 4013 // br scalar.body 4014 // 4015 // scalar.body: 4016 // i = phi [0, scalar.ph], [i+1, scalar.body] 4017 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4018 // s2 = a[i] 4019 // b[i] = s2 - s1 4020 // br cond, scalar.body, ... 4021 // 4022 // In this example, s1 is a recurrence because it's value depends on the 4023 // previous iteration. In the first phase of vectorization, we created a 4024 // temporary value for s1. We now complete the vectorization and produce the 4025 // shorthand vector IR shown below (for VF = 4, UF = 1). 4026 // 4027 // vector.ph: 4028 // v_init = vector(..., ..., ..., a[-1]) 4029 // br vector.body 4030 // 4031 // vector.body 4032 // i = phi [0, vector.ph], [i+4, vector.body] 4033 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4034 // v2 = a[i, i+1, i+2, i+3]; 4035 // v3 = vector(v1(3), v2(0, 1, 2)) 4036 // b[i, i+1, i+2, i+3] = v2 - v3 4037 // br cond, vector.body, middle.block 4038 // 4039 // middle.block: 4040 // x = v2(3) 4041 // br scalar.ph 4042 // 4043 // scalar.ph: 4044 // s_init = phi [x, middle.block], [a[-1], otherwise] 4045 // br scalar.body 4046 // 4047 // After execution completes the vector loop, we extract the next value of 4048 // the recurrence (x) to use as the initial value in the scalar loop. 4049 4050 // Get the original loop preheader and single loop latch. 4051 auto *Preheader = OrigLoop->getLoopPreheader(); 4052 auto *Latch = OrigLoop->getLoopLatch(); 4053 4054 // Get the initial and previous values of the scalar recurrence. 4055 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4056 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4057 4058 // Create a vector from the initial value. 4059 auto *VectorInit = ScalarInit; 4060 if (VF.isVector()) { 4061 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4062 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 4063 VectorInit = Builder.CreateInsertElement( 4064 PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4065 Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init"); 4066 } 4067 4068 // We constructed a temporary phi node in the first phase of vectorization. 4069 // This phi node will eventually be deleted. 4070 Builder.SetInsertPoint( 4071 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 4072 4073 // Create a phi node for the new recurrence. The current value will either be 4074 // the initial value inserted into a vector or loop-varying vector value. 4075 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4076 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4077 4078 // Get the vectorized previous value of the last part UF - 1. It appears last 4079 // among all unrolled iterations, due to the order of their construction. 4080 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 4081 4082 // Find and set the insertion point after the previous value if it is an 4083 // instruction. 4084 BasicBlock::iterator InsertPt; 4085 // Note that the previous value may have been constant-folded so it is not 4086 // guaranteed to be an instruction in the vector loop. 4087 // FIXME: Loop invariant values do not form recurrences. We should deal with 4088 // them earlier. 4089 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart)) 4090 InsertPt = LoopVectorBody->getFirstInsertionPt(); 4091 else { 4092 Instruction *PreviousInst = cast<Instruction>(PreviousLastPart); 4093 if (isa<PHINode>(PreviousLastPart)) 4094 // If the previous value is a phi node, we should insert after all the phi 4095 // nodes in the block containing the PHI to avoid breaking basic block 4096 // verification. Note that the basic block may be different to 4097 // LoopVectorBody, in case we predicate the loop. 4098 InsertPt = PreviousInst->getParent()->getFirstInsertionPt(); 4099 else 4100 InsertPt = ++PreviousInst->getIterator(); 4101 } 4102 Builder.SetInsertPoint(&*InsertPt); 4103 4104 // We will construct a vector for the recurrence by combining the values for 4105 // the current and previous iterations. This is the required shuffle mask. 4106 assert(!VF.isScalable()); 4107 SmallVector<int, 8> ShuffleMask(VF.getKnownMinValue()); 4108 ShuffleMask[0] = VF.getKnownMinValue() - 1; 4109 for (unsigned I = 1; I < VF.getKnownMinValue(); ++I) 4110 ShuffleMask[I] = I + VF.getKnownMinValue() - 1; 4111 4112 // The vector from which to take the initial value for the current iteration 4113 // (actual or unrolled). Initially, this is the vector phi node. 4114 Value *Incoming = VecPhi; 4115 4116 // Shuffle the current and previous vector and update the vector parts. 4117 for (unsigned Part = 0; Part < UF; ++Part) { 4118 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 4119 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 4120 auto *Shuffle = 4121 VF.isVector() 4122 ? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask) 4123 : Incoming; 4124 PhiPart->replaceAllUsesWith(Shuffle); 4125 cast<Instruction>(PhiPart)->eraseFromParent(); 4126 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 4127 Incoming = PreviousPart; 4128 } 4129 4130 // Fix the latch value of the new recurrence in the vector loop. 4131 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4132 4133 // Extract the last vector element in the middle block. This will be the 4134 // initial value for the recurrence when jumping to the scalar loop. 4135 auto *ExtractForScalar = Incoming; 4136 if (VF.isVector()) { 4137 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4138 ExtractForScalar = Builder.CreateExtractElement( 4139 ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1), 4140 "vector.recur.extract"); 4141 } 4142 // Extract the second last element in the middle block if the 4143 // Phi is used outside the loop. We need to extract the phi itself 4144 // and not the last element (the phi update in the current iteration). This 4145 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4146 // when the scalar loop is not run at all. 4147 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4148 if (VF.isVector()) 4149 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4150 Incoming, Builder.getInt32(VF.getKnownMinValue() - 2), 4151 "vector.recur.extract.for.phi"); 4152 // When loop is unrolled without vectorizing, initialize 4153 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 4154 // `Incoming`. This is analogous to the vectorized case above: extracting the 4155 // second last element when VF > 1. 4156 else if (UF > 1) 4157 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 4158 4159 // Fix the initial value of the original recurrence in the scalar loop. 4160 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4161 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4162 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4163 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4164 Start->addIncoming(Incoming, BB); 4165 } 4166 4167 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4168 Phi->setName("scalar.recur"); 4169 4170 // Finally, fix users of the recurrence outside the loop. The users will need 4171 // either the last value of the scalar recurrence or the last value of the 4172 // vector recurrence we extracted in the middle block. Since the loop is in 4173 // LCSSA form, we just need to find all the phi nodes for the original scalar 4174 // recurrence in the exit block, and then add an edge for the middle block. 4175 // Note that LCSSA does not imply single entry when the original scalar loop 4176 // had multiple exiting edges (as we always run the last iteration in the 4177 // scalar epilogue); in that case, the exiting path through middle will be 4178 // dynamically dead and the value picked for the phi doesn't matter. 4179 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4180 if (any_of(LCSSAPhi.incoming_values(), 4181 [Phi](Value *V) { return V == Phi; })) 4182 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4183 } 4184 4185 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 4186 // Get it's reduction variable descriptor. 4187 assert(Legal->isReductionVariable(Phi) && 4188 "Unable to find the reduction variable"); 4189 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 4190 4191 RecurKind RK = RdxDesc.getRecurrenceKind(); 4192 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4193 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4194 setDebugLocFromInst(Builder, ReductionStartValue); 4195 bool IsInLoopReductionPhi = Cost->isInLoopReduction(Phi); 4196 4197 // This is the vector-clone of the value that leaves the loop. 4198 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 4199 4200 // Wrap flags are in general invalid after vectorization, clear them. 4201 clearReductionWrapFlags(RdxDesc); 4202 4203 // Fix the vector-loop phi. 4204 4205 // Reductions do not have to start at zero. They can start with 4206 // any loop invariant values. 4207 BasicBlock *Latch = OrigLoop->getLoopLatch(); 4208 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 4209 4210 for (unsigned Part = 0; Part < UF; ++Part) { 4211 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 4212 Value *Val = getOrCreateVectorValue(LoopVal, Part); 4213 cast<PHINode>(VecRdxPhi) 4214 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4215 } 4216 4217 // Before each round, move the insertion point right between 4218 // the PHIs and the values we are going to write. 4219 // This allows us to write both PHINodes and the extractelement 4220 // instructions. 4221 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4222 4223 setDebugLocFromInst(Builder, LoopExitInst); 4224 4225 // If tail is folded by masking, the vector value to leave the loop should be 4226 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4227 // instead of the former. For an inloop reduction the reduction will already 4228 // be predicated, and does not need to be handled here. 4229 if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) { 4230 for (unsigned Part = 0; Part < UF; ++Part) { 4231 Value *VecLoopExitInst = 4232 VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4233 Value *Sel = nullptr; 4234 for (User *U : VecLoopExitInst->users()) { 4235 if (isa<SelectInst>(U)) { 4236 assert(!Sel && "Reduction exit feeding two selects"); 4237 Sel = U; 4238 } else 4239 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4240 } 4241 assert(Sel && "Reduction exit feeds no select"); 4242 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel); 4243 4244 // If the target can create a predicated operator for the reduction at no 4245 // extra cost in the loop (for example a predicated vadd), it can be 4246 // cheaper for the select to remain in the loop than be sunk out of it, 4247 // and so use the select value for the phi instead of the old 4248 // LoopExitValue. 4249 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 4250 if (PreferPredicatedReductionSelect || 4251 TTI->preferPredicatedReductionSelect( 4252 RdxDesc.getOpcode(), Phi->getType(), 4253 TargetTransformInfo::ReductionFlags())) { 4254 auto *VecRdxPhi = cast<PHINode>(getOrCreateVectorValue(Phi, Part)); 4255 VecRdxPhi->setIncomingValueForBlock( 4256 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4257 } 4258 } 4259 } 4260 4261 // If the vector reduction can be performed in a smaller type, we truncate 4262 // then extend the loop exit value to enable InstCombine to evaluate the 4263 // entire expression in the smaller type. 4264 if (VF.isVector() && Phi->getType() != RdxDesc.getRecurrenceType()) { 4265 assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!"); 4266 assert(!VF.isScalable() && "scalable vectors not yet supported."); 4267 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4268 Builder.SetInsertPoint( 4269 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4270 VectorParts RdxParts(UF); 4271 for (unsigned Part = 0; Part < UF; ++Part) { 4272 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4273 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4274 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4275 : Builder.CreateZExt(Trunc, VecTy); 4276 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 4277 UI != RdxParts[Part]->user_end();) 4278 if (*UI != Trunc) { 4279 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 4280 RdxParts[Part] = Extnd; 4281 } else { 4282 ++UI; 4283 } 4284 } 4285 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4286 for (unsigned Part = 0; Part < UF; ++Part) { 4287 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4288 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 4289 } 4290 } 4291 4292 // Reduce all of the unrolled parts into a single vector. 4293 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 4294 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4295 4296 // The middle block terminator has already been assigned a DebugLoc here (the 4297 // OrigLoop's single latch terminator). We want the whole middle block to 4298 // appear to execute on this line because: (a) it is all compiler generated, 4299 // (b) these instructions are always executed after evaluating the latch 4300 // conditional branch, and (c) other passes may add new predecessors which 4301 // terminate on this line. This is the easiest way to ensure we don't 4302 // accidentally cause an extra step back into the loop while debugging. 4303 setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator()); 4304 for (unsigned Part = 1; Part < UF; ++Part) { 4305 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4306 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 4307 // Floating point operations had to be 'fast' to enable the reduction. 4308 ReducedPartRdx = addFastMathFlag( 4309 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 4310 ReducedPartRdx, "bin.rdx"), 4311 RdxDesc.getFastMathFlags()); 4312 else 4313 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4314 } 4315 4316 // Create the reduction after the loop. Note that inloop reductions create the 4317 // target reduction in the loop using a Reduction recipe. 4318 if (VF.isVector() && !IsInLoopReductionPhi) { 4319 ReducedPartRdx = 4320 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx); 4321 // If the reduction can be performed in a smaller type, we need to extend 4322 // the reduction to the wider type before we branch to the original loop. 4323 if (Phi->getType() != RdxDesc.getRecurrenceType()) 4324 ReducedPartRdx = 4325 RdxDesc.isSigned() 4326 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 4327 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 4328 } 4329 4330 // Create a phi node that merges control-flow from the backedge-taken check 4331 // block and the middle block. 4332 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4333 LoopScalarPreHeader->getTerminator()); 4334 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4335 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4336 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4337 4338 // Now, we need to fix the users of the reduction variable 4339 // inside and outside of the scalar remainder loop. 4340 4341 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4342 // in the exit blocks. See comment on analogous loop in 4343 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4344 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4345 if (any_of(LCSSAPhi.incoming_values(), 4346 [LoopExitInst](Value *V) { return V == LoopExitInst; })) 4347 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4348 4349 // Fix the scalar loop reduction variable with the incoming reduction sum 4350 // from the vector body and from the backedge value. 4351 int IncomingEdgeBlockIdx = 4352 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4353 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4354 // Pick the other block. 4355 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4356 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4357 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4358 } 4359 4360 void InnerLoopVectorizer::clearReductionWrapFlags( 4361 RecurrenceDescriptor &RdxDesc) { 4362 RecurKind RK = RdxDesc.getRecurrenceKind(); 4363 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4364 return; 4365 4366 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4367 assert(LoopExitInstr && "null loop exit instruction"); 4368 SmallVector<Instruction *, 8> Worklist; 4369 SmallPtrSet<Instruction *, 8> Visited; 4370 Worklist.push_back(LoopExitInstr); 4371 Visited.insert(LoopExitInstr); 4372 4373 while (!Worklist.empty()) { 4374 Instruction *Cur = Worklist.pop_back_val(); 4375 if (isa<OverflowingBinaryOperator>(Cur)) 4376 for (unsigned Part = 0; Part < UF; ++Part) { 4377 Value *V = getOrCreateVectorValue(Cur, Part); 4378 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4379 } 4380 4381 for (User *U : Cur->users()) { 4382 Instruction *UI = cast<Instruction>(U); 4383 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4384 Visited.insert(UI).second) 4385 Worklist.push_back(UI); 4386 } 4387 } 4388 } 4389 4390 void InnerLoopVectorizer::fixLCSSAPHIs() { 4391 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4392 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4393 // Some phis were already hand updated by the reduction and recurrence 4394 // code above, leave them alone. 4395 continue; 4396 4397 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4398 // Non-instruction incoming values will have only one value. 4399 unsigned LastLane = 0; 4400 if (isa<Instruction>(IncomingValue)) 4401 LastLane = Cost->isUniformAfterVectorization( 4402 cast<Instruction>(IncomingValue), VF) 4403 ? 0 4404 : VF.getKnownMinValue() - 1; 4405 assert((!VF.isScalable() || LastLane == 0) && 4406 "scalable vectors dont support non-uniform scalars yet"); 4407 // Can be a loop invariant incoming value or the last scalar value to be 4408 // extracted from the vectorized loop. 4409 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4410 Value *lastIncomingValue = 4411 getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane }); 4412 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4413 } 4414 } 4415 4416 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4417 // The basic block and loop containing the predicated instruction. 4418 auto *PredBB = PredInst->getParent(); 4419 auto *VectorLoop = LI->getLoopFor(PredBB); 4420 4421 // Initialize a worklist with the operands of the predicated instruction. 4422 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4423 4424 // Holds instructions that we need to analyze again. An instruction may be 4425 // reanalyzed if we don't yet know if we can sink it or not. 4426 SmallVector<Instruction *, 8> InstsToReanalyze; 4427 4428 // Returns true if a given use occurs in the predicated block. Phi nodes use 4429 // their operands in their corresponding predecessor blocks. 4430 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4431 auto *I = cast<Instruction>(U.getUser()); 4432 BasicBlock *BB = I->getParent(); 4433 if (auto *Phi = dyn_cast<PHINode>(I)) 4434 BB = Phi->getIncomingBlock( 4435 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4436 return BB == PredBB; 4437 }; 4438 4439 // Iteratively sink the scalarized operands of the predicated instruction 4440 // into the block we created for it. When an instruction is sunk, it's 4441 // operands are then added to the worklist. The algorithm ends after one pass 4442 // through the worklist doesn't sink a single instruction. 4443 bool Changed; 4444 do { 4445 // Add the instructions that need to be reanalyzed to the worklist, and 4446 // reset the changed indicator. 4447 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4448 InstsToReanalyze.clear(); 4449 Changed = false; 4450 4451 while (!Worklist.empty()) { 4452 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4453 4454 // We can't sink an instruction if it is a phi node, is already in the 4455 // predicated block, is not in the loop, or may have side effects. 4456 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4457 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4458 continue; 4459 4460 // It's legal to sink the instruction if all its uses occur in the 4461 // predicated block. Otherwise, there's nothing to do yet, and we may 4462 // need to reanalyze the instruction. 4463 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4464 InstsToReanalyze.push_back(I); 4465 continue; 4466 } 4467 4468 // Move the instruction to the beginning of the predicated block, and add 4469 // it's operands to the worklist. 4470 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4471 Worklist.insert(I->op_begin(), I->op_end()); 4472 4473 // The sinking may have enabled other instructions to be sunk, so we will 4474 // need to iterate. 4475 Changed = true; 4476 } 4477 } while (Changed); 4478 } 4479 4480 void InnerLoopVectorizer::fixNonInductionPHIs() { 4481 for (PHINode *OrigPhi : OrigPHIsToFix) { 4482 PHINode *NewPhi = 4483 cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0)); 4484 unsigned NumIncomingValues = OrigPhi->getNumIncomingValues(); 4485 4486 SmallVector<BasicBlock *, 2> ScalarBBPredecessors( 4487 predecessors(OrigPhi->getParent())); 4488 SmallVector<BasicBlock *, 2> VectorBBPredecessors( 4489 predecessors(NewPhi->getParent())); 4490 assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() && 4491 "Scalar and Vector BB should have the same number of predecessors"); 4492 4493 // The insertion point in Builder may be invalidated by the time we get 4494 // here. Force the Builder insertion point to something valid so that we do 4495 // not run into issues during insertion point restore in 4496 // getOrCreateVectorValue calls below. 4497 Builder.SetInsertPoint(NewPhi); 4498 4499 // The predecessor order is preserved and we can rely on mapping between 4500 // scalar and vector block predecessors. 4501 for (unsigned i = 0; i < NumIncomingValues; ++i) { 4502 BasicBlock *NewPredBB = VectorBBPredecessors[i]; 4503 4504 // When looking up the new scalar/vector values to fix up, use incoming 4505 // values from original phi. 4506 Value *ScIncV = 4507 OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]); 4508 4509 // Scalar incoming value may need a broadcast 4510 Value *NewIncV = getOrCreateVectorValue(ScIncV, 0); 4511 NewPhi->addIncoming(NewIncV, NewPredBB); 4512 } 4513 } 4514 } 4515 4516 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, 4517 VPUser &Operands, unsigned UF, 4518 ElementCount VF, bool IsPtrLoopInvariant, 4519 SmallBitVector &IsIndexLoopInvariant, 4520 VPTransformState &State) { 4521 // Construct a vector GEP by widening the operands of the scalar GEP as 4522 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4523 // results in a vector of pointers when at least one operand of the GEP 4524 // is vector-typed. Thus, to keep the representation compact, we only use 4525 // vector-typed operands for loop-varying values. 4526 4527 if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4528 // If we are vectorizing, but the GEP has only loop-invariant operands, 4529 // the GEP we build (by only using vector-typed operands for 4530 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4531 // produce a vector of pointers, we need to either arbitrarily pick an 4532 // operand to broadcast, or broadcast a clone of the original GEP. 4533 // Here, we broadcast a clone of the original. 4534 // 4535 // TODO: If at some point we decide to scalarize instructions having 4536 // loop-invariant operands, this special case will no longer be 4537 // required. We would add the scalarization decision to 4538 // collectLoopScalars() and teach getVectorValue() to broadcast 4539 // the lane-zero scalar value. 4540 auto *Clone = Builder.Insert(GEP->clone()); 4541 for (unsigned Part = 0; Part < UF; ++Part) { 4542 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4543 State.set(VPDef, GEP, EntryPart, Part); 4544 addMetadata(EntryPart, GEP); 4545 } 4546 } else { 4547 // If the GEP has at least one loop-varying operand, we are sure to 4548 // produce a vector of pointers. But if we are only unrolling, we want 4549 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4550 // produce with the code below will be scalar (if VF == 1) or vector 4551 // (otherwise). Note that for the unroll-only case, we still maintain 4552 // values in the vector mapping with initVector, as we do for other 4553 // instructions. 4554 for (unsigned Part = 0; Part < UF; ++Part) { 4555 // The pointer operand of the new GEP. If it's loop-invariant, we 4556 // won't broadcast it. 4557 auto *Ptr = IsPtrLoopInvariant ? State.get(Operands.getOperand(0), {0, 0}) 4558 : State.get(Operands.getOperand(0), Part); 4559 4560 // Collect all the indices for the new GEP. If any index is 4561 // loop-invariant, we won't broadcast it. 4562 SmallVector<Value *, 4> Indices; 4563 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { 4564 VPValue *Operand = Operands.getOperand(I); 4565 if (IsIndexLoopInvariant[I - 1]) 4566 Indices.push_back(State.get(Operand, {0, 0})); 4567 else 4568 Indices.push_back(State.get(Operand, Part)); 4569 } 4570 4571 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4572 // but it should be a vector, otherwise. 4573 auto *NewGEP = 4574 GEP->isInBounds() 4575 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4576 Indices) 4577 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4578 assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) && 4579 "NewGEP is not a pointer vector"); 4580 State.set(VPDef, GEP, NewGEP, Part); 4581 addMetadata(NewGEP, GEP); 4582 } 4583 } 4584 } 4585 4586 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4587 RecurrenceDescriptor *RdxDesc, 4588 Value *StartV, unsigned UF, 4589 ElementCount VF) { 4590 assert(!VF.isScalable() && "scalable vectors not yet supported."); 4591 PHINode *P = cast<PHINode>(PN); 4592 if (EnableVPlanNativePath) { 4593 // Currently we enter here in the VPlan-native path for non-induction 4594 // PHIs where all control flow is uniform. We simply widen these PHIs. 4595 // Create a vector phi with no operands - the vector phi operands will be 4596 // set at the end of vector code generation. 4597 Type *VecTy = 4598 (VF.isScalar()) ? PN->getType() : VectorType::get(PN->getType(), VF); 4599 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4600 VectorLoopValueMap.setVectorValue(P, 0, VecPhi); 4601 OrigPHIsToFix.push_back(P); 4602 4603 return; 4604 } 4605 4606 assert(PN->getParent() == OrigLoop->getHeader() && 4607 "Non-header phis should have been handled elsewhere"); 4608 4609 // In order to support recurrences we need to be able to vectorize Phi nodes. 4610 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4611 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4612 // this value when we vectorize all of the instructions that use the PHI. 4613 if (RdxDesc || Legal->isFirstOrderRecurrence(P)) { 4614 Value *Iden = nullptr; 4615 bool ScalarPHI = 4616 (VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN)); 4617 Type *VecTy = 4618 ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), VF); 4619 4620 if (RdxDesc) { 4621 assert(Legal->isReductionVariable(P) && StartV && 4622 "RdxDesc should only be set for reduction variables; in that case " 4623 "a StartV is also required"); 4624 RecurKind RK = RdxDesc->getRecurrenceKind(); 4625 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) { 4626 // MinMax reduction have the start value as their identify. 4627 if (ScalarPHI) { 4628 Iden = StartV; 4629 } else { 4630 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4631 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4632 StartV = Iden = Builder.CreateVectorSplat(VF, StartV, "minmax.ident"); 4633 } 4634 } else { 4635 Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity( 4636 RK, VecTy->getScalarType()); 4637 Iden = IdenC; 4638 4639 if (!ScalarPHI) { 4640 Iden = ConstantVector::getSplat(VF, IdenC); 4641 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4642 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4643 Constant *Zero = Builder.getInt32(0); 4644 StartV = Builder.CreateInsertElement(Iden, StartV, Zero); 4645 } 4646 } 4647 } 4648 4649 for (unsigned Part = 0; Part < UF; ++Part) { 4650 // This is phase one of vectorizing PHIs. 4651 Value *EntryPart = PHINode::Create( 4652 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4653 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 4654 if (StartV) { 4655 // Make sure to add the reduction start value only to the 4656 // first unroll part. 4657 Value *StartVal = (Part == 0) ? StartV : Iden; 4658 cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader); 4659 } 4660 } 4661 return; 4662 } 4663 4664 assert(!Legal->isReductionVariable(P) && 4665 "reductions should be handled above"); 4666 4667 setDebugLocFromInst(Builder, P); 4668 4669 // This PHINode must be an induction variable. 4670 // Make sure that we know about it. 4671 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4672 4673 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4674 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4675 4676 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4677 // which can be found from the original scalar operations. 4678 switch (II.getKind()) { 4679 case InductionDescriptor::IK_NoInduction: 4680 llvm_unreachable("Unknown induction"); 4681 case InductionDescriptor::IK_IntInduction: 4682 case InductionDescriptor::IK_FpInduction: 4683 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4684 case InductionDescriptor::IK_PtrInduction: { 4685 // Handle the pointer induction variable case. 4686 assert(P->getType()->isPointerTy() && "Unexpected type."); 4687 4688 if (Cost->isScalarAfterVectorization(P, VF)) { 4689 // This is the normalized GEP that starts counting at zero. 4690 Value *PtrInd = 4691 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4692 // Determine the number of scalars we need to generate for each unroll 4693 // iteration. If the instruction is uniform, we only need to generate the 4694 // first lane. Otherwise, we generate all VF values. 4695 unsigned Lanes = 4696 Cost->isUniformAfterVectorization(P, VF) ? 1 : VF.getKnownMinValue(); 4697 for (unsigned Part = 0; Part < UF; ++Part) { 4698 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4699 Constant *Idx = ConstantInt::get(PtrInd->getType(), 4700 Lane + Part * VF.getKnownMinValue()); 4701 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4702 Value *SclrGep = 4703 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4704 SclrGep->setName("next.gep"); 4705 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep); 4706 } 4707 } 4708 return; 4709 } 4710 assert(isa<SCEVConstant>(II.getStep()) && 4711 "Induction step not a SCEV constant!"); 4712 Type *PhiType = II.getStep()->getType(); 4713 4714 // Build a pointer phi 4715 Value *ScalarStartValue = II.getStartValue(); 4716 Type *ScStValueType = ScalarStartValue->getType(); 4717 PHINode *NewPointerPhi = 4718 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4719 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4720 4721 // A pointer induction, performed by using a gep 4722 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4723 Instruction *InductionLoc = LoopLatch->getTerminator(); 4724 const SCEV *ScalarStep = II.getStep(); 4725 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4726 Value *ScalarStepValue = 4727 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4728 Value *InductionGEP = GetElementPtrInst::Create( 4729 ScStValueType->getPointerElementType(), NewPointerPhi, 4730 Builder.CreateMul( 4731 ScalarStepValue, 4732 ConstantInt::get(PhiType, VF.getKnownMinValue() * UF)), 4733 "ptr.ind", InductionLoc); 4734 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4735 4736 // Create UF many actual address geps that use the pointer 4737 // phi as base and a vectorized version of the step value 4738 // (<step*0, ..., step*N>) as offset. 4739 for (unsigned Part = 0; Part < UF; ++Part) { 4740 SmallVector<Constant *, 8> Indices; 4741 // Create a vector of consecutive numbers from zero to VF. 4742 for (unsigned i = 0; i < VF.getKnownMinValue(); ++i) 4743 Indices.push_back( 4744 ConstantInt::get(PhiType, i + Part * VF.getKnownMinValue())); 4745 Constant *StartOffset = ConstantVector::get(Indices); 4746 4747 Value *GEP = Builder.CreateGEP( 4748 ScStValueType->getPointerElementType(), NewPointerPhi, 4749 Builder.CreateMul( 4750 StartOffset, 4751 Builder.CreateVectorSplat(VF.getKnownMinValue(), ScalarStepValue), 4752 "vector.gep")); 4753 VectorLoopValueMap.setVectorValue(P, Part, GEP); 4754 } 4755 } 4756 } 4757 } 4758 4759 /// A helper function for checking whether an integer division-related 4760 /// instruction may divide by zero (in which case it must be predicated if 4761 /// executed conditionally in the scalar code). 4762 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4763 /// Non-zero divisors that are non compile-time constants will not be 4764 /// converted into multiplication, so we will still end up scalarizing 4765 /// the division, but can do so w/o predication. 4766 static bool mayDivideByZero(Instruction &I) { 4767 assert((I.getOpcode() == Instruction::UDiv || 4768 I.getOpcode() == Instruction::SDiv || 4769 I.getOpcode() == Instruction::URem || 4770 I.getOpcode() == Instruction::SRem) && 4771 "Unexpected instruction"); 4772 Value *Divisor = I.getOperand(1); 4773 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4774 return !CInt || CInt->isZero(); 4775 } 4776 4777 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def, 4778 VPUser &User, 4779 VPTransformState &State) { 4780 switch (I.getOpcode()) { 4781 case Instruction::Call: 4782 case Instruction::Br: 4783 case Instruction::PHI: 4784 case Instruction::GetElementPtr: 4785 case Instruction::Select: 4786 llvm_unreachable("This instruction is handled by a different recipe."); 4787 case Instruction::UDiv: 4788 case Instruction::SDiv: 4789 case Instruction::SRem: 4790 case Instruction::URem: 4791 case Instruction::Add: 4792 case Instruction::FAdd: 4793 case Instruction::Sub: 4794 case Instruction::FSub: 4795 case Instruction::FNeg: 4796 case Instruction::Mul: 4797 case Instruction::FMul: 4798 case Instruction::FDiv: 4799 case Instruction::FRem: 4800 case Instruction::Shl: 4801 case Instruction::LShr: 4802 case Instruction::AShr: 4803 case Instruction::And: 4804 case Instruction::Or: 4805 case Instruction::Xor: { 4806 // Just widen unops and binops. 4807 setDebugLocFromInst(Builder, &I); 4808 4809 for (unsigned Part = 0; Part < UF; ++Part) { 4810 SmallVector<Value *, 2> Ops; 4811 for (VPValue *VPOp : User.operands()) 4812 Ops.push_back(State.get(VPOp, Part)); 4813 4814 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4815 4816 if (auto *VecOp = dyn_cast<Instruction>(V)) 4817 VecOp->copyIRFlags(&I); 4818 4819 // Use this vector value for all users of the original instruction. 4820 State.set(Def, &I, V, Part); 4821 addMetadata(V, &I); 4822 } 4823 4824 break; 4825 } 4826 case Instruction::ICmp: 4827 case Instruction::FCmp: { 4828 // Widen compares. Generate vector compares. 4829 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4830 auto *Cmp = cast<CmpInst>(&I); 4831 setDebugLocFromInst(Builder, Cmp); 4832 for (unsigned Part = 0; Part < UF; ++Part) { 4833 Value *A = State.get(User.getOperand(0), Part); 4834 Value *B = State.get(User.getOperand(1), Part); 4835 Value *C = nullptr; 4836 if (FCmp) { 4837 // Propagate fast math flags. 4838 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4839 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4840 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4841 } else { 4842 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4843 } 4844 State.set(Def, &I, C, Part); 4845 addMetadata(C, &I); 4846 } 4847 4848 break; 4849 } 4850 4851 case Instruction::ZExt: 4852 case Instruction::SExt: 4853 case Instruction::FPToUI: 4854 case Instruction::FPToSI: 4855 case Instruction::FPExt: 4856 case Instruction::PtrToInt: 4857 case Instruction::IntToPtr: 4858 case Instruction::SIToFP: 4859 case Instruction::UIToFP: 4860 case Instruction::Trunc: 4861 case Instruction::FPTrunc: 4862 case Instruction::BitCast: { 4863 auto *CI = cast<CastInst>(&I); 4864 setDebugLocFromInst(Builder, CI); 4865 4866 /// Vectorize casts. 4867 Type *DestTy = 4868 (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); 4869 4870 for (unsigned Part = 0; Part < UF; ++Part) { 4871 Value *A = State.get(User.getOperand(0), Part); 4872 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4873 State.set(Def, &I, Cast, Part); 4874 addMetadata(Cast, &I); 4875 } 4876 break; 4877 } 4878 default: 4879 // This instruction is not vectorized by simple widening. 4880 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4881 llvm_unreachable("Unhandled instruction!"); 4882 } // end of switch. 4883 } 4884 4885 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4886 VPUser &ArgOperands, 4887 VPTransformState &State) { 4888 assert(!isa<DbgInfoIntrinsic>(I) && 4889 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4890 setDebugLocFromInst(Builder, &I); 4891 4892 Module *M = I.getParent()->getParent()->getParent(); 4893 auto *CI = cast<CallInst>(&I); 4894 4895 SmallVector<Type *, 4> Tys; 4896 for (Value *ArgOperand : CI->arg_operands()) 4897 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4898 4899 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4900 4901 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4902 // version of the instruction. 4903 // Is it beneficial to perform intrinsic call compared to lib call? 4904 bool NeedToScalarize = false; 4905 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4906 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4907 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4908 assert((UseVectorIntrinsic || !NeedToScalarize) && 4909 "Instruction should be scalarized elsewhere."); 4910 assert(IntrinsicCost.isValid() && CallCost.isValid() && 4911 "Cannot have invalid costs while widening"); 4912 4913 for (unsigned Part = 0; Part < UF; ++Part) { 4914 SmallVector<Value *, 4> Args; 4915 for (auto &I : enumerate(ArgOperands.operands())) { 4916 // Some intrinsics have a scalar argument - don't replace it with a 4917 // vector. 4918 Value *Arg; 4919 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4920 Arg = State.get(I.value(), Part); 4921 else 4922 Arg = State.get(I.value(), {0, 0}); 4923 Args.push_back(Arg); 4924 } 4925 4926 Function *VectorF; 4927 if (UseVectorIntrinsic) { 4928 // Use vector version of the intrinsic. 4929 Type *TysForDecl[] = {CI->getType()}; 4930 if (VF.isVector()) { 4931 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 4932 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4933 } 4934 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4935 assert(VectorF && "Can't retrieve vector intrinsic."); 4936 } else { 4937 // Use vector version of the function call. 4938 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4939 #ifndef NDEBUG 4940 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4941 "Can't create vector function."); 4942 #endif 4943 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4944 } 4945 SmallVector<OperandBundleDef, 1> OpBundles; 4946 CI->getOperandBundlesAsDefs(OpBundles); 4947 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4948 4949 if (isa<FPMathOperator>(V)) 4950 V->copyFastMathFlags(CI); 4951 4952 State.set(Def, &I, V, Part); 4953 addMetadata(V, &I); 4954 } 4955 } 4956 4957 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, 4958 VPUser &Operands, 4959 bool InvariantCond, 4960 VPTransformState &State) { 4961 setDebugLocFromInst(Builder, &I); 4962 4963 // The condition can be loop invariant but still defined inside the 4964 // loop. This means that we can't just use the original 'cond' value. 4965 // We have to take the 'vectorized' value and pick the first lane. 4966 // Instcombine will make this a no-op. 4967 auto *InvarCond = 4968 InvariantCond ? State.get(Operands.getOperand(0), {0, 0}) : nullptr; 4969 4970 for (unsigned Part = 0; Part < UF; ++Part) { 4971 Value *Cond = 4972 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); 4973 Value *Op0 = State.get(Operands.getOperand(1), Part); 4974 Value *Op1 = State.get(Operands.getOperand(2), Part); 4975 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); 4976 State.set(VPDef, &I, Sel, Part); 4977 addMetadata(Sel, &I); 4978 } 4979 } 4980 4981 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4982 // We should not collect Scalars more than once per VF. Right now, this 4983 // function is called from collectUniformsAndScalars(), which already does 4984 // this check. Collecting Scalars for VF=1 does not make any sense. 4985 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4986 "This function should not be visited twice for the same VF"); 4987 4988 SmallSetVector<Instruction *, 8> Worklist; 4989 4990 // These sets are used to seed the analysis with pointers used by memory 4991 // accesses that will remain scalar. 4992 SmallSetVector<Instruction *, 8> ScalarPtrs; 4993 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4994 auto *Latch = TheLoop->getLoopLatch(); 4995 4996 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4997 // The pointer operands of loads and stores will be scalar as long as the 4998 // memory access is not a gather or scatter operation. The value operand of a 4999 // store will remain scalar if the store is scalarized. 5000 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 5001 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 5002 assert(WideningDecision != CM_Unknown && 5003 "Widening decision should be ready at this moment"); 5004 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 5005 if (Ptr == Store->getValueOperand()) 5006 return WideningDecision == CM_Scalarize; 5007 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 5008 "Ptr is neither a value or pointer operand"); 5009 return WideningDecision != CM_GatherScatter; 5010 }; 5011 5012 // A helper that returns true if the given value is a bitcast or 5013 // getelementptr instruction contained in the loop. 5014 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 5015 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 5016 isa<GetElementPtrInst>(V)) && 5017 !TheLoop->isLoopInvariant(V); 5018 }; 5019 5020 auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { 5021 if (!isa<PHINode>(Ptr) || 5022 !Legal->getInductionVars().count(cast<PHINode>(Ptr))) 5023 return false; 5024 auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; 5025 if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) 5026 return false; 5027 return isScalarUse(MemAccess, Ptr); 5028 }; 5029 5030 // A helper that evaluates a memory access's use of a pointer. If the 5031 // pointer is actually the pointer induction of a loop, it is being 5032 // inserted into Worklist. If the use will be a scalar use, and the 5033 // pointer is only used by memory accesses, we place the pointer in 5034 // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. 5035 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5036 if (isScalarPtrInduction(MemAccess, Ptr)) { 5037 Worklist.insert(cast<Instruction>(Ptr)); 5038 Instruction *Update = cast<Instruction>( 5039 cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); 5040 Worklist.insert(Update); 5041 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr 5042 << "\n"); 5043 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update 5044 << "\n"); 5045 return; 5046 } 5047 // We only care about bitcast and getelementptr instructions contained in 5048 // the loop. 5049 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5050 return; 5051 5052 // If the pointer has already been identified as scalar (e.g., if it was 5053 // also identified as uniform), there's nothing to do. 5054 auto *I = cast<Instruction>(Ptr); 5055 if (Worklist.count(I)) 5056 return; 5057 5058 // If the use of the pointer will be a scalar use, and all users of the 5059 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 5060 // place the pointer in PossibleNonScalarPtrs. 5061 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 5062 return isa<LoadInst>(U) || isa<StoreInst>(U); 5063 })) 5064 ScalarPtrs.insert(I); 5065 else 5066 PossibleNonScalarPtrs.insert(I); 5067 }; 5068 5069 // We seed the scalars analysis with three classes of instructions: (1) 5070 // instructions marked uniform-after-vectorization and (2) bitcast, 5071 // getelementptr and (pointer) phi instructions used by memory accesses 5072 // requiring a scalar use. 5073 // 5074 // (1) Add to the worklist all instructions that have been identified as 5075 // uniform-after-vectorization. 5076 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5077 5078 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5079 // memory accesses requiring a scalar use. The pointer operands of loads and 5080 // stores will be scalar as long as the memory accesses is not a gather or 5081 // scatter operation. The value operand of a store will remain scalar if the 5082 // store is scalarized. 5083 for (auto *BB : TheLoop->blocks()) 5084 for (auto &I : *BB) { 5085 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5086 evaluatePtrUse(Load, Load->getPointerOperand()); 5087 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5088 evaluatePtrUse(Store, Store->getPointerOperand()); 5089 evaluatePtrUse(Store, Store->getValueOperand()); 5090 } 5091 } 5092 for (auto *I : ScalarPtrs) 5093 if (!PossibleNonScalarPtrs.count(I)) { 5094 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5095 Worklist.insert(I); 5096 } 5097 5098 // Insert the forced scalars. 5099 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5100 // induction variable when the PHI user is scalarized. 5101 auto ForcedScalar = ForcedScalars.find(VF); 5102 if (ForcedScalar != ForcedScalars.end()) 5103 for (auto *I : ForcedScalar->second) 5104 Worklist.insert(I); 5105 5106 // Expand the worklist by looking through any bitcasts and getelementptr 5107 // instructions we've already identified as scalar. This is similar to the 5108 // expansion step in collectLoopUniforms(); however, here we're only 5109 // expanding to include additional bitcasts and getelementptr instructions. 5110 unsigned Idx = 0; 5111 while (Idx != Worklist.size()) { 5112 Instruction *Dst = Worklist[Idx++]; 5113 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5114 continue; 5115 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5116 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 5117 auto *J = cast<Instruction>(U); 5118 return !TheLoop->contains(J) || Worklist.count(J) || 5119 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5120 isScalarUse(J, Src)); 5121 })) { 5122 Worklist.insert(Src); 5123 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5124 } 5125 } 5126 5127 // An induction variable will remain scalar if all users of the induction 5128 // variable and induction variable update remain scalar. 5129 for (auto &Induction : Legal->getInductionVars()) { 5130 auto *Ind = Induction.first; 5131 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5132 5133 // If tail-folding is applied, the primary induction variable will be used 5134 // to feed a vector compare. 5135 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 5136 continue; 5137 5138 // Determine if all users of the induction variable are scalar after 5139 // vectorization. 5140 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5141 auto *I = cast<Instruction>(U); 5142 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5143 }); 5144 if (!ScalarInd) 5145 continue; 5146 5147 // Determine if all users of the induction variable update instruction are 5148 // scalar after vectorization. 5149 auto ScalarIndUpdate = 5150 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5151 auto *I = cast<Instruction>(U); 5152 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5153 }); 5154 if (!ScalarIndUpdate) 5155 continue; 5156 5157 // The induction variable and its update instruction will remain scalar. 5158 Worklist.insert(Ind); 5159 Worklist.insert(IndUpdate); 5160 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5161 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 5162 << "\n"); 5163 } 5164 5165 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5166 } 5167 5168 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, 5169 ElementCount VF) { 5170 if (!blockNeedsPredication(I->getParent())) 5171 return false; 5172 switch(I->getOpcode()) { 5173 default: 5174 break; 5175 case Instruction::Load: 5176 case Instruction::Store: { 5177 if (!Legal->isMaskRequired(I)) 5178 return false; 5179 auto *Ptr = getLoadStorePointerOperand(I); 5180 auto *Ty = getMemInstValueType(I); 5181 // We have already decided how to vectorize this instruction, get that 5182 // result. 5183 if (VF.isVector()) { 5184 InstWidening WideningDecision = getWideningDecision(I, VF); 5185 assert(WideningDecision != CM_Unknown && 5186 "Widening decision should be ready at this moment"); 5187 return WideningDecision == CM_Scalarize; 5188 } 5189 const Align Alignment = getLoadStoreAlignment(I); 5190 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 5191 isLegalMaskedGather(Ty, Alignment)) 5192 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 5193 isLegalMaskedScatter(Ty, Alignment)); 5194 } 5195 case Instruction::UDiv: 5196 case Instruction::SDiv: 5197 case Instruction::SRem: 5198 case Instruction::URem: 5199 return mayDivideByZero(*I); 5200 } 5201 return false; 5202 } 5203 5204 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5205 Instruction *I, ElementCount VF) { 5206 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5207 assert(getWideningDecision(I, VF) == CM_Unknown && 5208 "Decision should not be set yet."); 5209 auto *Group = getInterleavedAccessGroup(I); 5210 assert(Group && "Must have a group."); 5211 5212 // If the instruction's allocated size doesn't equal it's type size, it 5213 // requires padding and will be scalarized. 5214 auto &DL = I->getModule()->getDataLayout(); 5215 auto *ScalarTy = getMemInstValueType(I); 5216 if (hasIrregularType(ScalarTy, DL, VF)) 5217 return false; 5218 5219 // Check if masking is required. 5220 // A Group may need masking for one of two reasons: it resides in a block that 5221 // needs predication, or it was decided to use masking to deal with gaps. 5222 bool PredicatedAccessRequiresMasking = 5223 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 5224 bool AccessWithGapsRequiresMasking = 5225 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 5226 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 5227 return true; 5228 5229 // If masked interleaving is required, we expect that the user/target had 5230 // enabled it, because otherwise it either wouldn't have been created or 5231 // it should have been invalidated by the CostModel. 5232 assert(useMaskedInterleavedAccesses(TTI) && 5233 "Masked interleave-groups for predicated accesses are not enabled."); 5234 5235 auto *Ty = getMemInstValueType(I); 5236 const Align Alignment = getLoadStoreAlignment(I); 5237 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5238 : TTI.isLegalMaskedStore(Ty, Alignment); 5239 } 5240 5241 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5242 Instruction *I, ElementCount VF) { 5243 // Get and ensure we have a valid memory instruction. 5244 LoadInst *LI = dyn_cast<LoadInst>(I); 5245 StoreInst *SI = dyn_cast<StoreInst>(I); 5246 assert((LI || SI) && "Invalid memory instruction"); 5247 5248 auto *Ptr = getLoadStorePointerOperand(I); 5249 5250 // In order to be widened, the pointer should be consecutive, first of all. 5251 if (!Legal->isConsecutivePtr(Ptr)) 5252 return false; 5253 5254 // If the instruction is a store located in a predicated block, it will be 5255 // scalarized. 5256 if (isScalarWithPredication(I)) 5257 return false; 5258 5259 // If the instruction's allocated size doesn't equal it's type size, it 5260 // requires padding and will be scalarized. 5261 auto &DL = I->getModule()->getDataLayout(); 5262 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5263 if (hasIrregularType(ScalarTy, DL, VF)) 5264 return false; 5265 5266 return true; 5267 } 5268 5269 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5270 // We should not collect Uniforms more than once per VF. Right now, 5271 // this function is called from collectUniformsAndScalars(), which 5272 // already does this check. Collecting Uniforms for VF=1 does not make any 5273 // sense. 5274 5275 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5276 "This function should not be visited twice for the same VF"); 5277 5278 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5279 // not analyze again. Uniforms.count(VF) will return 1. 5280 Uniforms[VF].clear(); 5281 5282 // We now know that the loop is vectorizable! 5283 // Collect instructions inside the loop that will remain uniform after 5284 // vectorization. 5285 5286 // Global values, params and instructions outside of current loop are out of 5287 // scope. 5288 auto isOutOfScope = [&](Value *V) -> bool { 5289 Instruction *I = dyn_cast<Instruction>(V); 5290 return (!I || !TheLoop->contains(I)); 5291 }; 5292 5293 SetVector<Instruction *> Worklist; 5294 BasicBlock *Latch = TheLoop->getLoopLatch(); 5295 5296 // Instructions that are scalar with predication must not be considered 5297 // uniform after vectorization, because that would create an erroneous 5298 // replicating region where only a single instance out of VF should be formed. 5299 // TODO: optimize such seldom cases if found important, see PR40816. 5300 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5301 if (isOutOfScope(I)) { 5302 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5303 << *I << "\n"); 5304 return; 5305 } 5306 if (isScalarWithPredication(I, VF)) { 5307 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5308 << *I << "\n"); 5309 return; 5310 } 5311 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5312 Worklist.insert(I); 5313 }; 5314 5315 // Start with the conditional branch. If the branch condition is an 5316 // instruction contained in the loop that is only used by the branch, it is 5317 // uniform. 5318 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5319 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5320 addToWorklistIfAllowed(Cmp); 5321 5322 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5323 InstWidening WideningDecision = getWideningDecision(I, VF); 5324 assert(WideningDecision != CM_Unknown && 5325 "Widening decision should be ready at this moment"); 5326 5327 // A uniform memory op is itself uniform. We exclude uniform stores 5328 // here as they demand the last lane, not the first one. 5329 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5330 assert(WideningDecision == CM_Scalarize); 5331 return true; 5332 } 5333 5334 return (WideningDecision == CM_Widen || 5335 WideningDecision == CM_Widen_Reverse || 5336 WideningDecision == CM_Interleave); 5337 }; 5338 5339 5340 // Returns true if Ptr is the pointer operand of a memory access instruction 5341 // I, and I is known to not require scalarization. 5342 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5343 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5344 }; 5345 5346 // Holds a list of values which are known to have at least one uniform use. 5347 // Note that there may be other uses which aren't uniform. A "uniform use" 5348 // here is something which only demands lane 0 of the unrolled iterations; 5349 // it does not imply that all lanes produce the same value (e.g. this is not 5350 // the usual meaning of uniform) 5351 SmallPtrSet<Value *, 8> HasUniformUse; 5352 5353 // Scan the loop for instructions which are either a) known to have only 5354 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5355 for (auto *BB : TheLoop->blocks()) 5356 for (auto &I : *BB) { 5357 // If there's no pointer operand, there's nothing to do. 5358 auto *Ptr = getLoadStorePointerOperand(&I); 5359 if (!Ptr) 5360 continue; 5361 5362 // A uniform memory op is itself uniform. We exclude uniform stores 5363 // here as they demand the last lane, not the first one. 5364 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5365 addToWorklistIfAllowed(&I); 5366 5367 if (isUniformDecision(&I, VF)) { 5368 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5369 HasUniformUse.insert(Ptr); 5370 } 5371 } 5372 5373 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5374 // demanding) users. Since loops are assumed to be in LCSSA form, this 5375 // disallows uses outside the loop as well. 5376 for (auto *V : HasUniformUse) { 5377 if (isOutOfScope(V)) 5378 continue; 5379 auto *I = cast<Instruction>(V); 5380 auto UsersAreMemAccesses = 5381 llvm::all_of(I->users(), [&](User *U) -> bool { 5382 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5383 }); 5384 if (UsersAreMemAccesses) 5385 addToWorklistIfAllowed(I); 5386 } 5387 5388 // Expand Worklist in topological order: whenever a new instruction 5389 // is added , its users should be already inside Worklist. It ensures 5390 // a uniform instruction will only be used by uniform instructions. 5391 unsigned idx = 0; 5392 while (idx != Worklist.size()) { 5393 Instruction *I = Worklist[idx++]; 5394 5395 for (auto OV : I->operand_values()) { 5396 // isOutOfScope operands cannot be uniform instructions. 5397 if (isOutOfScope(OV)) 5398 continue; 5399 // First order recurrence Phi's should typically be considered 5400 // non-uniform. 5401 auto *OP = dyn_cast<PHINode>(OV); 5402 if (OP && Legal->isFirstOrderRecurrence(OP)) 5403 continue; 5404 // If all the users of the operand are uniform, then add the 5405 // operand into the uniform worklist. 5406 auto *OI = cast<Instruction>(OV); 5407 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5408 auto *J = cast<Instruction>(U); 5409 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5410 })) 5411 addToWorklistIfAllowed(OI); 5412 } 5413 } 5414 5415 // For an instruction to be added into Worklist above, all its users inside 5416 // the loop should also be in Worklist. However, this condition cannot be 5417 // true for phi nodes that form a cyclic dependence. We must process phi 5418 // nodes separately. An induction variable will remain uniform if all users 5419 // of the induction variable and induction variable update remain uniform. 5420 // The code below handles both pointer and non-pointer induction variables. 5421 for (auto &Induction : Legal->getInductionVars()) { 5422 auto *Ind = Induction.first; 5423 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5424 5425 // Determine if all users of the induction variable are uniform after 5426 // vectorization. 5427 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5428 auto *I = cast<Instruction>(U); 5429 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5430 isVectorizedMemAccessUse(I, Ind); 5431 }); 5432 if (!UniformInd) 5433 continue; 5434 5435 // Determine if all users of the induction variable update instruction are 5436 // uniform after vectorization. 5437 auto UniformIndUpdate = 5438 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5439 auto *I = cast<Instruction>(U); 5440 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5441 isVectorizedMemAccessUse(I, IndUpdate); 5442 }); 5443 if (!UniformIndUpdate) 5444 continue; 5445 5446 // The induction variable and its update instruction will remain uniform. 5447 addToWorklistIfAllowed(Ind); 5448 addToWorklistIfAllowed(IndUpdate); 5449 } 5450 5451 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5452 } 5453 5454 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5455 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5456 5457 if (Legal->getRuntimePointerChecking()->Need) { 5458 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5459 "runtime pointer checks needed. Enable vectorization of this " 5460 "loop with '#pragma clang loop vectorize(enable)' when " 5461 "compiling with -Os/-Oz", 5462 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5463 return true; 5464 } 5465 5466 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5467 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5468 "runtime SCEV checks needed. Enable vectorization of this " 5469 "loop with '#pragma clang loop vectorize(enable)' when " 5470 "compiling with -Os/-Oz", 5471 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5472 return true; 5473 } 5474 5475 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5476 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5477 reportVectorizationFailure("Runtime stride check for small trip count", 5478 "runtime stride == 1 checks needed. Enable vectorization of " 5479 "this loop without such check by compiling with -Os/-Oz", 5480 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5481 return true; 5482 } 5483 5484 return false; 5485 } 5486 5487 Optional<ElementCount> 5488 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5489 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5490 // TODO: It may by useful to do since it's still likely to be dynamically 5491 // uniform if the target can skip. 5492 reportVectorizationFailure( 5493 "Not inserting runtime ptr check for divergent target", 5494 "runtime pointer checks needed. Not enabled for divergent target", 5495 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5496 return None; 5497 } 5498 5499 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5500 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5501 if (TC == 1) { 5502 reportVectorizationFailure("Single iteration (non) loop", 5503 "loop trip count is one, irrelevant for vectorization", 5504 "SingleIterationLoop", ORE, TheLoop); 5505 return None; 5506 } 5507 5508 ElementCount MaxVF = computeFeasibleMaxVF(TC, UserVF); 5509 5510 switch (ScalarEpilogueStatus) { 5511 case CM_ScalarEpilogueAllowed: 5512 return MaxVF; 5513 case CM_ScalarEpilogueNotAllowedUsePredicate: 5514 LLVM_FALLTHROUGH; 5515 case CM_ScalarEpilogueNotNeededUsePredicate: 5516 LLVM_DEBUG( 5517 dbgs() << "LV: vector predicate hint/switch found.\n" 5518 << "LV: Not allowing scalar epilogue, creating predicated " 5519 << "vector loop.\n"); 5520 break; 5521 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5522 // fallthrough as a special case of OptForSize 5523 case CM_ScalarEpilogueNotAllowedOptSize: 5524 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5525 LLVM_DEBUG( 5526 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5527 else 5528 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5529 << "count.\n"); 5530 5531 // Bail if runtime checks are required, which are not good when optimising 5532 // for size. 5533 if (runtimeChecksRequired()) 5534 return None; 5535 5536 break; 5537 } 5538 5539 // The only loops we can vectorize without a scalar epilogue, are loops with 5540 // a bottom-test and a single exiting block. We'd have to handle the fact 5541 // that not every instruction executes on the last iteration. This will 5542 // require a lane mask which varies through the vector loop body. (TODO) 5543 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5544 // If there was a tail-folding hint/switch, but we can't fold the tail by 5545 // masking, fallback to a vectorization with a scalar epilogue. 5546 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5547 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5548 "scalar epilogue instead.\n"); 5549 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5550 return MaxVF; 5551 } 5552 return None; 5553 } 5554 5555 // Now try the tail folding 5556 5557 // Invalidate interleave groups that require an epilogue if we can't mask 5558 // the interleave-group. 5559 if (!useMaskedInterleavedAccesses(TTI)) { 5560 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5561 "No decisions should have been taken at this point"); 5562 // Note: There is no need to invalidate any cost modeling decisions here, as 5563 // non where taken so far. 5564 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5565 } 5566 5567 assert(!MaxVF.isScalable() && 5568 "Scalable vectors do not yet support tail folding"); 5569 assert((UserVF.isNonZero() || isPowerOf2_32(MaxVF.getFixedValue())) && 5570 "MaxVF must be a power of 2"); 5571 unsigned MaxVFtimesIC = 5572 UserIC ? MaxVF.getFixedValue() * UserIC : MaxVF.getFixedValue(); 5573 // Avoid tail folding if the trip count is known to be a multiple of any VF we 5574 // chose. 5575 ScalarEvolution *SE = PSE.getSE(); 5576 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5577 const SCEV *ExitCount = SE->getAddExpr( 5578 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5579 const SCEV *Rem = SE->getURemExpr( 5580 ExitCount, SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5581 if (Rem->isZero()) { 5582 // Accept MaxVF if we do not have a tail. 5583 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5584 return MaxVF; 5585 } 5586 5587 // If we don't know the precise trip count, or if the trip count that we 5588 // found modulo the vectorization factor is not zero, try to fold the tail 5589 // by masking. 5590 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5591 if (Legal->prepareToFoldTailByMasking()) { 5592 FoldTailByMasking = true; 5593 return MaxVF; 5594 } 5595 5596 // If there was a tail-folding hint/switch, but we can't fold the tail by 5597 // masking, fallback to a vectorization with a scalar epilogue. 5598 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5599 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5600 "scalar epilogue instead.\n"); 5601 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5602 return MaxVF; 5603 } 5604 5605 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5606 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5607 return None; 5608 } 5609 5610 if (TC == 0) { 5611 reportVectorizationFailure( 5612 "Unable to calculate the loop count due to complex control flow", 5613 "unable to calculate the loop count due to complex control flow", 5614 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5615 return None; 5616 } 5617 5618 reportVectorizationFailure( 5619 "Cannot optimize for size and vectorize at the same time.", 5620 "cannot optimize for size and vectorize at the same time. " 5621 "Enable vectorization of this loop with '#pragma clang loop " 5622 "vectorize(enable)' when compiling with -Os/-Oz", 5623 "NoTailLoopWithOptForSize", ORE, TheLoop); 5624 return None; 5625 } 5626 5627 ElementCount 5628 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5629 ElementCount UserVF) { 5630 bool IgnoreScalableUserVF = UserVF.isScalable() && 5631 !TTI.supportsScalableVectors() && 5632 !ForceTargetSupportsScalableVectors; 5633 if (IgnoreScalableUserVF) { 5634 LLVM_DEBUG( 5635 dbgs() << "LV: Ignoring VF=" << UserVF 5636 << " because target does not support scalable vectors.\n"); 5637 ORE->emit([&]() { 5638 return OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreScalableUserVF", 5639 TheLoop->getStartLoc(), 5640 TheLoop->getHeader()) 5641 << "Ignoring VF=" << ore::NV("UserVF", UserVF) 5642 << " because target does not support scalable vectors."; 5643 }); 5644 } 5645 5646 // Beyond this point two scenarios are handled. If UserVF isn't specified 5647 // then a suitable VF is chosen. If UserVF is specified and there are 5648 // dependencies, check if it's legal. However, if a UserVF is specified and 5649 // there are no dependencies, then there's nothing to do. 5650 if (UserVF.isNonZero() && !IgnoreScalableUserVF && 5651 Legal->isSafeForAnyVectorWidth()) 5652 return UserVF; 5653 5654 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5655 unsigned SmallestType, WidestType; 5656 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5657 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5658 5659 // Get the maximum safe dependence distance in bits computed by LAA. 5660 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5661 // the memory accesses that is most restrictive (involved in the smallest 5662 // dependence distance). 5663 unsigned MaxSafeVectorWidthInBits = Legal->getMaxSafeVectorWidthInBits(); 5664 5665 // If the user vectorization factor is legally unsafe, clamp it to a safe 5666 // value. Otherwise, return as is. 5667 if (UserVF.isNonZero() && !IgnoreScalableUserVF) { 5668 unsigned MaxSafeElements = 5669 PowerOf2Floor(MaxSafeVectorWidthInBits / WidestType); 5670 ElementCount MaxSafeVF = ElementCount::getFixed(MaxSafeElements); 5671 5672 if (UserVF.isScalable()) { 5673 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5674 5675 // Scale VF by vscale before checking if it's safe. 5676 MaxSafeVF = ElementCount::getScalable( 5677 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5678 5679 if (MaxSafeVF.isZero()) { 5680 // The dependence distance is too small to use scalable vectors, 5681 // fallback on fixed. 5682 LLVM_DEBUG( 5683 dbgs() 5684 << "LV: Max legal vector width too small, scalable vectorization " 5685 "unfeasible. Using fixed-width vectorization instead.\n"); 5686 ORE->emit([&]() { 5687 return OptimizationRemarkAnalysis(DEBUG_TYPE, "ScalableVFUnfeasible", 5688 TheLoop->getStartLoc(), 5689 TheLoop->getHeader()) 5690 << "Max legal vector width too small, scalable vectorization " 5691 << "unfeasible. Using fixed-width vectorization instead."; 5692 }); 5693 return computeFeasibleMaxVF( 5694 ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue())); 5695 } 5696 } 5697 5698 LLVM_DEBUG(dbgs() << "LV: The max safe VF is: " << MaxSafeVF << ".\n"); 5699 5700 if (ElementCount::isKnownLE(UserVF, MaxSafeVF)) 5701 return UserVF; 5702 5703 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5704 << " is unsafe, clamping to max safe VF=" << MaxSafeVF 5705 << ".\n"); 5706 ORE->emit([&]() { 5707 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5708 TheLoop->getStartLoc(), 5709 TheLoop->getHeader()) 5710 << "User-specified vectorization factor " 5711 << ore::NV("UserVectorizationFactor", UserVF) 5712 << " is unsafe, clamping to maximum safe vectorization factor " 5713 << ore::NV("VectorizationFactor", MaxSafeVF); 5714 }); 5715 return MaxSafeVF; 5716 } 5717 5718 WidestRegister = std::min(WidestRegister, MaxSafeVectorWidthInBits); 5719 5720 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5721 // Note that both WidestRegister and WidestType may not be a powers of 2. 5722 unsigned MaxVectorSize = PowerOf2Floor(WidestRegister / WidestType); 5723 5724 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5725 << " / " << WidestType << " bits.\n"); 5726 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5727 << WidestRegister << " bits.\n"); 5728 5729 assert(MaxVectorSize <= WidestRegister && 5730 "Did not expect to pack so many elements" 5731 " into one vector!"); 5732 if (MaxVectorSize == 0) { 5733 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5734 MaxVectorSize = 1; 5735 return ElementCount::getFixed(MaxVectorSize); 5736 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 5737 isPowerOf2_32(ConstTripCount)) { 5738 // We need to clamp the VF to be the ConstTripCount. There is no point in 5739 // choosing a higher viable VF as done in the loop below. 5740 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5741 << ConstTripCount << "\n"); 5742 MaxVectorSize = ConstTripCount; 5743 return ElementCount::getFixed(MaxVectorSize); 5744 } 5745 5746 unsigned MaxVF = MaxVectorSize; 5747 if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) || 5748 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5749 // Collect all viable vectorization factors larger than the default MaxVF 5750 // (i.e. MaxVectorSize). 5751 SmallVector<ElementCount, 8> VFs; 5752 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5753 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 5754 VFs.push_back(ElementCount::getFixed(VS)); 5755 5756 // For each VF calculate its register usage. 5757 auto RUs = calculateRegisterUsage(VFs); 5758 5759 // Select the largest VF which doesn't require more registers than existing 5760 // ones. 5761 for (int i = RUs.size() - 1; i >= 0; --i) { 5762 bool Selected = true; 5763 for (auto& pair : RUs[i].MaxLocalUsers) { 5764 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5765 if (pair.second > TargetNumRegisters) 5766 Selected = false; 5767 } 5768 if (Selected) { 5769 MaxVF = VFs[i].getKnownMinValue(); 5770 break; 5771 } 5772 } 5773 if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) { 5774 if (MaxVF < MinVF) { 5775 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5776 << ") with target's minimum: " << MinVF << '\n'); 5777 MaxVF = MinVF; 5778 } 5779 } 5780 } 5781 return ElementCount::getFixed(MaxVF); 5782 } 5783 5784 VectorizationFactor 5785 LoopVectorizationCostModel::selectVectorizationFactor(ElementCount MaxVF) { 5786 // FIXME: This can be fixed for scalable vectors later, because at this stage 5787 // the LoopVectorizer will only consider vectorizing a loop with scalable 5788 // vectors when the loop has a hint to enable vectorization for a given VF. 5789 assert(!MaxVF.isScalable() && "scalable vectors not yet supported"); 5790 5791 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5792 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5793 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5794 5795 unsigned Width = 1; 5796 const float ScalarCost = *ExpectedCost.getValue(); 5797 float Cost = ScalarCost; 5798 5799 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5800 if (ForceVectorization && MaxVF.isVector()) { 5801 // Ignore scalar width, because the user explicitly wants vectorization. 5802 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5803 // evaluation. 5804 Cost = std::numeric_limits<float>::max(); 5805 } 5806 5807 for (unsigned i = 2; i <= MaxVF.getFixedValue(); i *= 2) { 5808 // Notice that the vector loop needs to be executed less times, so 5809 // we need to divide the cost of the vector loops by the width of 5810 // the vector elements. 5811 VectorizationCostTy C = expectedCost(ElementCount::getFixed(i)); 5812 assert(C.first.isValid() && "Unexpected invalid cost for vector loop"); 5813 float VectorCost = *C.first.getValue() / (float)i; 5814 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5815 << " costs: " << (int)VectorCost << ".\n"); 5816 if (!C.second && !ForceVectorization) { 5817 LLVM_DEBUG( 5818 dbgs() << "LV: Not considering vector loop of width " << i 5819 << " because it will not generate any vector instructions.\n"); 5820 continue; 5821 } 5822 5823 // If profitable add it to ProfitableVF list. 5824 if (VectorCost < ScalarCost) { 5825 ProfitableVFs.push_back(VectorizationFactor( 5826 {ElementCount::getFixed(i), (unsigned)VectorCost})); 5827 } 5828 5829 if (VectorCost < Cost) { 5830 Cost = VectorCost; 5831 Width = i; 5832 } 5833 } 5834 5835 if (!EnableCondStoresVectorization && NumPredStores) { 5836 reportVectorizationFailure("There are conditional stores.", 5837 "store that is conditionally executed prevents vectorization", 5838 "ConditionalStore", ORE, TheLoop); 5839 Width = 1; 5840 Cost = ScalarCost; 5841 } 5842 5843 LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5844 << "LV: Vectorization seems to be not beneficial, " 5845 << "but was forced by a user.\n"); 5846 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5847 VectorizationFactor Factor = {ElementCount::getFixed(Width), 5848 (unsigned)(Width * Cost)}; 5849 return Factor; 5850 } 5851 5852 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5853 const Loop &L, ElementCount VF) const { 5854 // Cross iteration phis such as reductions need special handling and are 5855 // currently unsupported. 5856 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 5857 return Legal->isFirstOrderRecurrence(&Phi) || 5858 Legal->isReductionVariable(&Phi); 5859 })) 5860 return false; 5861 5862 // Phis with uses outside of the loop require special handling and are 5863 // currently unsupported. 5864 for (auto &Entry : Legal->getInductionVars()) { 5865 // Look for uses of the value of the induction at the last iteration. 5866 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5867 for (User *U : PostInc->users()) 5868 if (!L.contains(cast<Instruction>(U))) 5869 return false; 5870 // Look for uses of penultimate value of the induction. 5871 for (User *U : Entry.first->users()) 5872 if (!L.contains(cast<Instruction>(U))) 5873 return false; 5874 } 5875 5876 // Induction variables that are widened require special handling that is 5877 // currently not supported. 5878 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5879 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5880 this->isProfitableToScalarize(Entry.first, VF)); 5881 })) 5882 return false; 5883 5884 return true; 5885 } 5886 5887 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5888 const ElementCount VF) const { 5889 // FIXME: We need a much better cost-model to take different parameters such 5890 // as register pressure, code size increase and cost of extra branches into 5891 // account. For now we apply a very crude heuristic and only consider loops 5892 // with vectorization factors larger than a certain value. 5893 // We also consider epilogue vectorization unprofitable for targets that don't 5894 // consider interleaving beneficial (eg. MVE). 5895 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5896 return false; 5897 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 5898 return true; 5899 return false; 5900 } 5901 5902 VectorizationFactor 5903 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5904 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5905 VectorizationFactor Result = VectorizationFactor::Disabled(); 5906 if (!EnableEpilogueVectorization) { 5907 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5908 return Result; 5909 } 5910 5911 if (!isScalarEpilogueAllowed()) { 5912 LLVM_DEBUG( 5913 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5914 "allowed.\n";); 5915 return Result; 5916 } 5917 5918 // FIXME: This can be fixed for scalable vectors later, because at this stage 5919 // the LoopVectorizer will only consider vectorizing a loop with scalable 5920 // vectors when the loop has a hint to enable vectorization for a given VF. 5921 if (MainLoopVF.isScalable()) { 5922 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not " 5923 "yet supported.\n"); 5924 return Result; 5925 } 5926 5927 // Not really a cost consideration, but check for unsupported cases here to 5928 // simplify the logic. 5929 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5930 LLVM_DEBUG( 5931 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5932 "not a supported candidate.\n";); 5933 return Result; 5934 } 5935 5936 if (EpilogueVectorizationForceVF > 1) { 5937 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5938 if (LVP.hasPlanWithVFs( 5939 {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)})) 5940 return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0}; 5941 else { 5942 LLVM_DEBUG( 5943 dbgs() 5944 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5945 return Result; 5946 } 5947 } 5948 5949 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5950 TheLoop->getHeader()->getParent()->hasMinSize()) { 5951 LLVM_DEBUG( 5952 dbgs() 5953 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5954 return Result; 5955 } 5956 5957 if (!isEpilogueVectorizationProfitable(MainLoopVF)) 5958 return Result; 5959 5960 for (auto &NextVF : ProfitableVFs) 5961 if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) && 5962 (Result.Width.getFixedValue() == 1 || NextVF.Cost < Result.Cost) && 5963 LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width})) 5964 Result = NextVF; 5965 5966 if (Result != VectorizationFactor::Disabled()) 5967 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5968 << Result.Width.getFixedValue() << "\n";); 5969 return Result; 5970 } 5971 5972 std::pair<unsigned, unsigned> 5973 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5974 unsigned MinWidth = -1U; 5975 unsigned MaxWidth = 8; 5976 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5977 5978 // For each block. 5979 for (BasicBlock *BB : TheLoop->blocks()) { 5980 // For each instruction in the loop. 5981 for (Instruction &I : BB->instructionsWithoutDebug()) { 5982 Type *T = I.getType(); 5983 5984 // Skip ignored values. 5985 if (ValuesToIgnore.count(&I)) 5986 continue; 5987 5988 // Only examine Loads, Stores and PHINodes. 5989 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5990 continue; 5991 5992 // Examine PHI nodes that are reduction variables. Update the type to 5993 // account for the recurrence type. 5994 if (auto *PN = dyn_cast<PHINode>(&I)) { 5995 if (!Legal->isReductionVariable(PN)) 5996 continue; 5997 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN]; 5998 if (PreferInLoopReductions || 5999 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 6000 RdxDesc.getRecurrenceType(), 6001 TargetTransformInfo::ReductionFlags())) 6002 continue; 6003 T = RdxDesc.getRecurrenceType(); 6004 } 6005 6006 // Examine the stored values. 6007 if (auto *ST = dyn_cast<StoreInst>(&I)) 6008 T = ST->getValueOperand()->getType(); 6009 6010 // Ignore loaded pointer types and stored pointer types that are not 6011 // vectorizable. 6012 // 6013 // FIXME: The check here attempts to predict whether a load or store will 6014 // be vectorized. We only know this for certain after a VF has 6015 // been selected. Here, we assume that if an access can be 6016 // vectorized, it will be. We should also look at extending this 6017 // optimization to non-pointer types. 6018 // 6019 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6020 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 6021 continue; 6022 6023 MinWidth = std::min(MinWidth, 6024 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6025 MaxWidth = std::max(MaxWidth, 6026 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6027 } 6028 } 6029 6030 return {MinWidth, MaxWidth}; 6031 } 6032 6033 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 6034 unsigned LoopCost) { 6035 // -- The interleave heuristics -- 6036 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6037 // There are many micro-architectural considerations that we can't predict 6038 // at this level. For example, frontend pressure (on decode or fetch) due to 6039 // code size, or the number and capabilities of the execution ports. 6040 // 6041 // We use the following heuristics to select the interleave count: 6042 // 1. If the code has reductions, then we interleave to break the cross 6043 // iteration dependency. 6044 // 2. If the loop is really small, then we interleave to reduce the loop 6045 // overhead. 6046 // 3. We don't interleave if we think that we will spill registers to memory 6047 // due to the increased register pressure. 6048 6049 if (!isScalarEpilogueAllowed()) 6050 return 1; 6051 6052 // We used the distance for the interleave count. 6053 if (Legal->getMaxSafeDepDistBytes() != -1U) 6054 return 1; 6055 6056 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6057 const bool HasReductions = !Legal->getReductionVars().empty(); 6058 // Do not interleave loops with a relatively small known or estimated trip 6059 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6060 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6061 // because with the above conditions interleaving can expose ILP and break 6062 // cross iteration dependences for reductions. 6063 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6064 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6065 return 1; 6066 6067 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6068 // We divide by these constants so assume that we have at least one 6069 // instruction that uses at least one register. 6070 for (auto& pair : R.MaxLocalUsers) { 6071 pair.second = std::max(pair.second, 1U); 6072 } 6073 6074 // We calculate the interleave count using the following formula. 6075 // Subtract the number of loop invariants from the number of available 6076 // registers. These registers are used by all of the interleaved instances. 6077 // Next, divide the remaining registers by the number of registers that is 6078 // required by the loop, in order to estimate how many parallel instances 6079 // fit without causing spills. All of this is rounded down if necessary to be 6080 // a power of two. We want power of two interleave count to simplify any 6081 // addressing operations or alignment considerations. 6082 // We also want power of two interleave counts to ensure that the induction 6083 // variable of the vector loop wraps to zero, when tail is folded by masking; 6084 // this currently happens when OptForSize, in which case IC is set to 1 above. 6085 unsigned IC = UINT_MAX; 6086 6087 for (auto& pair : R.MaxLocalUsers) { 6088 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6089 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6090 << " registers of " 6091 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6092 if (VF.isScalar()) { 6093 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6094 TargetNumRegisters = ForceTargetNumScalarRegs; 6095 } else { 6096 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6097 TargetNumRegisters = ForceTargetNumVectorRegs; 6098 } 6099 unsigned MaxLocalUsers = pair.second; 6100 unsigned LoopInvariantRegs = 0; 6101 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6102 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6103 6104 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6105 // Don't count the induction variable as interleaved. 6106 if (EnableIndVarRegisterHeur) { 6107 TmpIC = 6108 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6109 std::max(1U, (MaxLocalUsers - 1))); 6110 } 6111 6112 IC = std::min(IC, TmpIC); 6113 } 6114 6115 // Clamp the interleave ranges to reasonable counts. 6116 unsigned MaxInterleaveCount = 6117 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6118 6119 // Check if the user has overridden the max. 6120 if (VF.isScalar()) { 6121 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6122 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6123 } else { 6124 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6125 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6126 } 6127 6128 // If trip count is known or estimated compile time constant, limit the 6129 // interleave count to be less than the trip count divided by VF, provided it 6130 // is at least 1. 6131 // 6132 // For scalable vectors we can't know if interleaving is beneficial. It may 6133 // not be beneficial for small loops if none of the lanes in the second vector 6134 // iterations is enabled. However, for larger loops, there is likely to be a 6135 // similar benefit as for fixed-width vectors. For now, we choose to leave 6136 // the InterleaveCount as if vscale is '1', although if some information about 6137 // the vector is known (e.g. min vector size), we can make a better decision. 6138 if (BestKnownTC) { 6139 MaxInterleaveCount = 6140 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6141 // Make sure MaxInterleaveCount is greater than 0. 6142 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6143 } 6144 6145 assert(MaxInterleaveCount > 0 && 6146 "Maximum interleave count must be greater than 0"); 6147 6148 // Clamp the calculated IC to be between the 1 and the max interleave count 6149 // that the target and trip count allows. 6150 if (IC > MaxInterleaveCount) 6151 IC = MaxInterleaveCount; 6152 else 6153 // Make sure IC is greater than 0. 6154 IC = std::max(1u, IC); 6155 6156 assert(IC > 0 && "Interleave count must be greater than 0."); 6157 6158 // If we did not calculate the cost for VF (because the user selected the VF) 6159 // then we calculate the cost of VF here. 6160 if (LoopCost == 0) { 6161 assert(expectedCost(VF).first.isValid() && "Expected a valid cost"); 6162 LoopCost = *expectedCost(VF).first.getValue(); 6163 } 6164 6165 assert(LoopCost && "Non-zero loop cost expected"); 6166 6167 // Interleave if we vectorized this loop and there is a reduction that could 6168 // benefit from interleaving. 6169 if (VF.isVector() && HasReductions) { 6170 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6171 return IC; 6172 } 6173 6174 // Note that if we've already vectorized the loop we will have done the 6175 // runtime check and so interleaving won't require further checks. 6176 bool InterleavingRequiresRuntimePointerCheck = 6177 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6178 6179 // We want to interleave small loops in order to reduce the loop overhead and 6180 // potentially expose ILP opportunities. 6181 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6182 << "LV: IC is " << IC << '\n' 6183 << "LV: VF is " << VF << '\n'); 6184 const bool AggressivelyInterleaveReductions = 6185 TTI.enableAggressiveInterleaving(HasReductions); 6186 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6187 // We assume that the cost overhead is 1 and we use the cost model 6188 // to estimate the cost of the loop and interleave until the cost of the 6189 // loop overhead is about 5% of the cost of the loop. 6190 unsigned SmallIC = 6191 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6192 6193 // Interleave until store/load ports (estimated by max interleave count) are 6194 // saturated. 6195 unsigned NumStores = Legal->getNumStores(); 6196 unsigned NumLoads = Legal->getNumLoads(); 6197 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6198 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6199 6200 // If we have a scalar reduction (vector reductions are already dealt with 6201 // by this point), we can increase the critical path length if the loop 6202 // we're interleaving is inside another loop. Limit, by default to 2, so the 6203 // critical path only gets increased by one reduction operation. 6204 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6205 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6206 SmallIC = std::min(SmallIC, F); 6207 StoresIC = std::min(StoresIC, F); 6208 LoadsIC = std::min(LoadsIC, F); 6209 } 6210 6211 if (EnableLoadStoreRuntimeInterleave && 6212 std::max(StoresIC, LoadsIC) > SmallIC) { 6213 LLVM_DEBUG( 6214 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6215 return std::max(StoresIC, LoadsIC); 6216 } 6217 6218 // If there are scalar reductions and TTI has enabled aggressive 6219 // interleaving for reductions, we will interleave to expose ILP. 6220 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6221 AggressivelyInterleaveReductions) { 6222 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6223 // Interleave no less than SmallIC but not as aggressive as the normal IC 6224 // to satisfy the rare situation when resources are too limited. 6225 return std::max(IC / 2, SmallIC); 6226 } else { 6227 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6228 return SmallIC; 6229 } 6230 } 6231 6232 // Interleave if this is a large loop (small loops are already dealt with by 6233 // this point) that could benefit from interleaving. 6234 if (AggressivelyInterleaveReductions) { 6235 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6236 return IC; 6237 } 6238 6239 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6240 return 1; 6241 } 6242 6243 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6244 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6245 // This function calculates the register usage by measuring the highest number 6246 // of values that are alive at a single location. Obviously, this is a very 6247 // rough estimation. We scan the loop in a topological order in order and 6248 // assign a number to each instruction. We use RPO to ensure that defs are 6249 // met before their users. We assume that each instruction that has in-loop 6250 // users starts an interval. We record every time that an in-loop value is 6251 // used, so we have a list of the first and last occurrences of each 6252 // instruction. Next, we transpose this data structure into a multi map that 6253 // holds the list of intervals that *end* at a specific location. This multi 6254 // map allows us to perform a linear search. We scan the instructions linearly 6255 // and record each time that a new interval starts, by placing it in a set. 6256 // If we find this value in the multi-map then we remove it from the set. 6257 // The max register usage is the maximum size of the set. 6258 // We also search for instructions that are defined outside the loop, but are 6259 // used inside the loop. We need this number separately from the max-interval 6260 // usage number because when we unroll, loop-invariant values do not take 6261 // more register. 6262 LoopBlocksDFS DFS(TheLoop); 6263 DFS.perform(LI); 6264 6265 RegisterUsage RU; 6266 6267 // Each 'key' in the map opens a new interval. The values 6268 // of the map are the index of the 'last seen' usage of the 6269 // instruction that is the key. 6270 using IntervalMap = DenseMap<Instruction *, unsigned>; 6271 6272 // Maps instruction to its index. 6273 SmallVector<Instruction *, 64> IdxToInstr; 6274 // Marks the end of each interval. 6275 IntervalMap EndPoint; 6276 // Saves the list of instruction indices that are used in the loop. 6277 SmallPtrSet<Instruction *, 8> Ends; 6278 // Saves the list of values that are used in the loop but are 6279 // defined outside the loop, such as arguments and constants. 6280 SmallPtrSet<Value *, 8> LoopInvariants; 6281 6282 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6283 for (Instruction &I : BB->instructionsWithoutDebug()) { 6284 IdxToInstr.push_back(&I); 6285 6286 // Save the end location of each USE. 6287 for (Value *U : I.operands()) { 6288 auto *Instr = dyn_cast<Instruction>(U); 6289 6290 // Ignore non-instruction values such as arguments, constants, etc. 6291 if (!Instr) 6292 continue; 6293 6294 // If this instruction is outside the loop then record it and continue. 6295 if (!TheLoop->contains(Instr)) { 6296 LoopInvariants.insert(Instr); 6297 continue; 6298 } 6299 6300 // Overwrite previous end points. 6301 EndPoint[Instr] = IdxToInstr.size(); 6302 Ends.insert(Instr); 6303 } 6304 } 6305 } 6306 6307 // Saves the list of intervals that end with the index in 'key'. 6308 using InstrList = SmallVector<Instruction *, 2>; 6309 DenseMap<unsigned, InstrList> TransposeEnds; 6310 6311 // Transpose the EndPoints to a list of values that end at each index. 6312 for (auto &Interval : EndPoint) 6313 TransposeEnds[Interval.second].push_back(Interval.first); 6314 6315 SmallPtrSet<Instruction *, 8> OpenIntervals; 6316 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6317 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6318 6319 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6320 6321 // A lambda that gets the register usage for the given type and VF. 6322 const auto &TTICapture = TTI; 6323 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) { 6324 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6325 return 0U; 6326 return TTICapture.getRegUsageForType(VectorType::get(Ty, VF)); 6327 }; 6328 6329 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6330 Instruction *I = IdxToInstr[i]; 6331 6332 // Remove all of the instructions that end at this location. 6333 InstrList &List = TransposeEnds[i]; 6334 for (Instruction *ToRemove : List) 6335 OpenIntervals.erase(ToRemove); 6336 6337 // Ignore instructions that are never used within the loop. 6338 if (!Ends.count(I)) 6339 continue; 6340 6341 // Skip ignored values. 6342 if (ValuesToIgnore.count(I)) 6343 continue; 6344 6345 // For each VF find the maximum usage of registers. 6346 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6347 // Count the number of live intervals. 6348 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6349 6350 if (VFs[j].isScalar()) { 6351 for (auto Inst : OpenIntervals) { 6352 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6353 if (RegUsage.find(ClassID) == RegUsage.end()) 6354 RegUsage[ClassID] = 1; 6355 else 6356 RegUsage[ClassID] += 1; 6357 } 6358 } else { 6359 collectUniformsAndScalars(VFs[j]); 6360 for (auto Inst : OpenIntervals) { 6361 // Skip ignored values for VF > 1. 6362 if (VecValuesToIgnore.count(Inst)) 6363 continue; 6364 if (isScalarAfterVectorization(Inst, VFs[j])) { 6365 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6366 if (RegUsage.find(ClassID) == RegUsage.end()) 6367 RegUsage[ClassID] = 1; 6368 else 6369 RegUsage[ClassID] += 1; 6370 } else { 6371 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6372 if (RegUsage.find(ClassID) == RegUsage.end()) 6373 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6374 else 6375 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6376 } 6377 } 6378 } 6379 6380 for (auto& pair : RegUsage) { 6381 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6382 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6383 else 6384 MaxUsages[j][pair.first] = pair.second; 6385 } 6386 } 6387 6388 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6389 << OpenIntervals.size() << '\n'); 6390 6391 // Add the current instruction to the list of open intervals. 6392 OpenIntervals.insert(I); 6393 } 6394 6395 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6396 SmallMapVector<unsigned, unsigned, 4> Invariant; 6397 6398 for (auto Inst : LoopInvariants) { 6399 unsigned Usage = 6400 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6401 unsigned ClassID = 6402 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6403 if (Invariant.find(ClassID) == Invariant.end()) 6404 Invariant[ClassID] = Usage; 6405 else 6406 Invariant[ClassID] += Usage; 6407 } 6408 6409 LLVM_DEBUG({ 6410 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6411 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6412 << " item\n"; 6413 for (const auto &pair : MaxUsages[i]) { 6414 dbgs() << "LV(REG): RegisterClass: " 6415 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6416 << " registers\n"; 6417 } 6418 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6419 << " item\n"; 6420 for (const auto &pair : Invariant) { 6421 dbgs() << "LV(REG): RegisterClass: " 6422 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6423 << " registers\n"; 6424 } 6425 }); 6426 6427 RU.LoopInvariantRegs = Invariant; 6428 RU.MaxLocalUsers = MaxUsages[i]; 6429 RUs[i] = RU; 6430 } 6431 6432 return RUs; 6433 } 6434 6435 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6436 // TODO: Cost model for emulated masked load/store is completely 6437 // broken. This hack guides the cost model to use an artificially 6438 // high enough value to practically disable vectorization with such 6439 // operations, except where previously deployed legality hack allowed 6440 // using very low cost values. This is to avoid regressions coming simply 6441 // from moving "masked load/store" check from legality to cost model. 6442 // Masked Load/Gather emulation was previously never allowed. 6443 // Limited number of Masked Store/Scatter emulation was allowed. 6444 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 6445 return isa<LoadInst>(I) || 6446 (isa<StoreInst>(I) && 6447 NumPredStores > NumberOfStoresToPredicate); 6448 } 6449 6450 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6451 // If we aren't vectorizing the loop, or if we've already collected the 6452 // instructions to scalarize, there's nothing to do. Collection may already 6453 // have occurred if we have a user-selected VF and are now computing the 6454 // expected cost for interleaving. 6455 if (VF.isScalar() || VF.isZero() || 6456 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6457 return; 6458 6459 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6460 // not profitable to scalarize any instructions, the presence of VF in the 6461 // map will indicate that we've analyzed it already. 6462 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6463 6464 // Find all the instructions that are scalar with predication in the loop and 6465 // determine if it would be better to not if-convert the blocks they are in. 6466 // If so, we also record the instructions to scalarize. 6467 for (BasicBlock *BB : TheLoop->blocks()) { 6468 if (!blockNeedsPredication(BB)) 6469 continue; 6470 for (Instruction &I : *BB) 6471 if (isScalarWithPredication(&I)) { 6472 ScalarCostsTy ScalarCosts; 6473 // Do not apply discount logic if hacked cost is needed 6474 // for emulated masked memrefs. 6475 if (!useEmulatedMaskMemRefHack(&I) && 6476 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6477 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6478 // Remember that BB will remain after vectorization. 6479 PredicatedBBsAfterVectorization.insert(BB); 6480 } 6481 } 6482 } 6483 6484 int LoopVectorizationCostModel::computePredInstDiscount( 6485 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6486 assert(!isUniformAfterVectorization(PredInst, VF) && 6487 "Instruction marked uniform-after-vectorization will be predicated"); 6488 6489 // Initialize the discount to zero, meaning that the scalar version and the 6490 // vector version cost the same. 6491 InstructionCost Discount = 0; 6492 6493 // Holds instructions to analyze. The instructions we visit are mapped in 6494 // ScalarCosts. Those instructions are the ones that would be scalarized if 6495 // we find that the scalar version costs less. 6496 SmallVector<Instruction *, 8> Worklist; 6497 6498 // Returns true if the given instruction can be scalarized. 6499 auto canBeScalarized = [&](Instruction *I) -> bool { 6500 // We only attempt to scalarize instructions forming a single-use chain 6501 // from the original predicated block that would otherwise be vectorized. 6502 // Although not strictly necessary, we give up on instructions we know will 6503 // already be scalar to avoid traversing chains that are unlikely to be 6504 // beneficial. 6505 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6506 isScalarAfterVectorization(I, VF)) 6507 return false; 6508 6509 // If the instruction is scalar with predication, it will be analyzed 6510 // separately. We ignore it within the context of PredInst. 6511 if (isScalarWithPredication(I)) 6512 return false; 6513 6514 // If any of the instruction's operands are uniform after vectorization, 6515 // the instruction cannot be scalarized. This prevents, for example, a 6516 // masked load from being scalarized. 6517 // 6518 // We assume we will only emit a value for lane zero of an instruction 6519 // marked uniform after vectorization, rather than VF identical values. 6520 // Thus, if we scalarize an instruction that uses a uniform, we would 6521 // create uses of values corresponding to the lanes we aren't emitting code 6522 // for. This behavior can be changed by allowing getScalarValue to clone 6523 // the lane zero values for uniforms rather than asserting. 6524 for (Use &U : I->operands()) 6525 if (auto *J = dyn_cast<Instruction>(U.get())) 6526 if (isUniformAfterVectorization(J, VF)) 6527 return false; 6528 6529 // Otherwise, we can scalarize the instruction. 6530 return true; 6531 }; 6532 6533 // Compute the expected cost discount from scalarizing the entire expression 6534 // feeding the predicated instruction. We currently only consider expressions 6535 // that are single-use instruction chains. 6536 Worklist.push_back(PredInst); 6537 while (!Worklist.empty()) { 6538 Instruction *I = Worklist.pop_back_val(); 6539 6540 // If we've already analyzed the instruction, there's nothing to do. 6541 if (ScalarCosts.find(I) != ScalarCosts.end()) 6542 continue; 6543 6544 // Compute the cost of the vector instruction. Note that this cost already 6545 // includes the scalarization overhead of the predicated instruction. 6546 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6547 6548 // Compute the cost of the scalarized instruction. This cost is the cost of 6549 // the instruction as if it wasn't if-converted and instead remained in the 6550 // predicated block. We will scale this cost by block probability after 6551 // computing the scalarization overhead. 6552 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6553 InstructionCost ScalarCost = 6554 VF.getKnownMinValue() * 6555 getInstructionCost(I, ElementCount::getFixed(1)).first; 6556 6557 // Compute the scalarization overhead of needed insertelement instructions 6558 // and phi nodes. 6559 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6560 ScalarCost += TTI.getScalarizationOverhead( 6561 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6562 APInt::getAllOnesValue(VF.getKnownMinValue()), true, false); 6563 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6564 ScalarCost += 6565 VF.getKnownMinValue() * 6566 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6567 } 6568 6569 // Compute the scalarization overhead of needed extractelement 6570 // instructions. For each of the instruction's operands, if the operand can 6571 // be scalarized, add it to the worklist; otherwise, account for the 6572 // overhead. 6573 for (Use &U : I->operands()) 6574 if (auto *J = dyn_cast<Instruction>(U.get())) { 6575 assert(VectorType::isValidElementType(J->getType()) && 6576 "Instruction has non-scalar type"); 6577 if (canBeScalarized(J)) 6578 Worklist.push_back(J); 6579 else if (needsExtract(J, VF)) { 6580 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6581 ScalarCost += TTI.getScalarizationOverhead( 6582 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6583 APInt::getAllOnesValue(VF.getKnownMinValue()), false, true); 6584 } 6585 } 6586 6587 // Scale the total scalar cost by block probability. 6588 ScalarCost /= getReciprocalPredBlockProb(); 6589 6590 // Compute the discount. A non-negative discount means the vector version 6591 // of the instruction costs more, and scalarizing would be beneficial. 6592 Discount += VectorCost - ScalarCost; 6593 ScalarCosts[I] = ScalarCost; 6594 } 6595 6596 return *Discount.getValue(); 6597 } 6598 6599 LoopVectorizationCostModel::VectorizationCostTy 6600 LoopVectorizationCostModel::expectedCost(ElementCount VF) { 6601 VectorizationCostTy Cost; 6602 6603 // For each block. 6604 for (BasicBlock *BB : TheLoop->blocks()) { 6605 VectorizationCostTy BlockCost; 6606 6607 // For each instruction in the old loop. 6608 for (Instruction &I : BB->instructionsWithoutDebug()) { 6609 // Skip ignored values. 6610 if (ValuesToIgnore.count(&I) || 6611 (VF.isVector() && VecValuesToIgnore.count(&I))) 6612 continue; 6613 6614 VectorizationCostTy C = getInstructionCost(&I, VF); 6615 6616 // Check if we should override the cost. 6617 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6618 C.first = InstructionCost(ForceTargetInstructionCost); 6619 6620 BlockCost.first += C.first; 6621 BlockCost.second |= C.second; 6622 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6623 << " for VF " << VF << " For instruction: " << I 6624 << '\n'); 6625 } 6626 6627 // If we are vectorizing a predicated block, it will have been 6628 // if-converted. This means that the block's instructions (aside from 6629 // stores and instructions that may divide by zero) will now be 6630 // unconditionally executed. For the scalar case, we may not always execute 6631 // the predicated block, if it is an if-else block. Thus, scale the block's 6632 // cost by the probability of executing it. blockNeedsPredication from 6633 // Legal is used so as to not include all blocks in tail folded loops. 6634 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6635 BlockCost.first /= getReciprocalPredBlockProb(); 6636 6637 Cost.first += BlockCost.first; 6638 Cost.second |= BlockCost.second; 6639 } 6640 6641 return Cost; 6642 } 6643 6644 /// Gets Address Access SCEV after verifying that the access pattern 6645 /// is loop invariant except the induction variable dependence. 6646 /// 6647 /// This SCEV can be sent to the Target in order to estimate the address 6648 /// calculation cost. 6649 static const SCEV *getAddressAccessSCEV( 6650 Value *Ptr, 6651 LoopVectorizationLegality *Legal, 6652 PredicatedScalarEvolution &PSE, 6653 const Loop *TheLoop) { 6654 6655 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6656 if (!Gep) 6657 return nullptr; 6658 6659 // We are looking for a gep with all loop invariant indices except for one 6660 // which should be an induction variable. 6661 auto SE = PSE.getSE(); 6662 unsigned NumOperands = Gep->getNumOperands(); 6663 for (unsigned i = 1; i < NumOperands; ++i) { 6664 Value *Opd = Gep->getOperand(i); 6665 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6666 !Legal->isInductionVariable(Opd)) 6667 return nullptr; 6668 } 6669 6670 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6671 return PSE.getSCEV(Ptr); 6672 } 6673 6674 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6675 return Legal->hasStride(I->getOperand(0)) || 6676 Legal->hasStride(I->getOperand(1)); 6677 } 6678 6679 InstructionCost 6680 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6681 ElementCount VF) { 6682 assert(VF.isVector() && 6683 "Scalarization cost of instruction implies vectorization."); 6684 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6685 Type *ValTy = getMemInstValueType(I); 6686 auto SE = PSE.getSE(); 6687 6688 unsigned AS = getLoadStoreAddressSpace(I); 6689 Value *Ptr = getLoadStorePointerOperand(I); 6690 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6691 6692 // Figure out whether the access is strided and get the stride value 6693 // if it's known in compile time 6694 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6695 6696 // Get the cost of the scalar memory instruction and address computation. 6697 InstructionCost Cost = 6698 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6699 6700 // Don't pass *I here, since it is scalar but will actually be part of a 6701 // vectorized loop where the user of it is a vectorized instruction. 6702 const Align Alignment = getLoadStoreAlignment(I); 6703 Cost += VF.getKnownMinValue() * 6704 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6705 AS, TTI::TCK_RecipThroughput); 6706 6707 // Get the overhead of the extractelement and insertelement instructions 6708 // we might create due to scalarization. 6709 Cost += getScalarizationOverhead(I, VF); 6710 6711 // If we have a predicated store, it may not be executed for each vector 6712 // lane. Scale the cost by the probability of executing the predicated 6713 // block. 6714 if (isPredicatedInst(I)) { 6715 Cost /= getReciprocalPredBlockProb(); 6716 6717 if (useEmulatedMaskMemRefHack(I)) 6718 // Artificially setting to a high enough value to practically disable 6719 // vectorization with such operations. 6720 Cost = 3000000; 6721 } 6722 6723 return Cost; 6724 } 6725 6726 InstructionCost 6727 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6728 ElementCount VF) { 6729 Type *ValTy = getMemInstValueType(I); 6730 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6731 Value *Ptr = getLoadStorePointerOperand(I); 6732 unsigned AS = getLoadStoreAddressSpace(I); 6733 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6734 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6735 6736 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6737 "Stride should be 1 or -1 for consecutive memory access"); 6738 const Align Alignment = getLoadStoreAlignment(I); 6739 InstructionCost Cost = 0; 6740 if (Legal->isMaskRequired(I)) 6741 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6742 CostKind); 6743 else 6744 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6745 CostKind, I); 6746 6747 bool Reverse = ConsecutiveStride < 0; 6748 if (Reverse) 6749 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6750 return Cost; 6751 } 6752 6753 InstructionCost 6754 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6755 ElementCount VF) { 6756 assert(Legal->isUniformMemOp(*I)); 6757 6758 Type *ValTy = getMemInstValueType(I); 6759 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6760 const Align Alignment = getLoadStoreAlignment(I); 6761 unsigned AS = getLoadStoreAddressSpace(I); 6762 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6763 if (isa<LoadInst>(I)) { 6764 return TTI.getAddressComputationCost(ValTy) + 6765 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6766 CostKind) + 6767 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6768 } 6769 StoreInst *SI = cast<StoreInst>(I); 6770 6771 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6772 return TTI.getAddressComputationCost(ValTy) + 6773 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6774 CostKind) + 6775 (isLoopInvariantStoreValue 6776 ? 0 6777 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6778 VF.getKnownMinValue() - 1)); 6779 } 6780 6781 InstructionCost 6782 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6783 ElementCount VF) { 6784 Type *ValTy = getMemInstValueType(I); 6785 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6786 const Align Alignment = getLoadStoreAlignment(I); 6787 const Value *Ptr = getLoadStorePointerOperand(I); 6788 6789 return TTI.getAddressComputationCost(VectorTy) + 6790 TTI.getGatherScatterOpCost( 6791 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6792 TargetTransformInfo::TCK_RecipThroughput, I); 6793 } 6794 6795 InstructionCost 6796 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6797 ElementCount VF) { 6798 Type *ValTy = getMemInstValueType(I); 6799 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6800 unsigned AS = getLoadStoreAddressSpace(I); 6801 6802 auto Group = getInterleavedAccessGroup(I); 6803 assert(Group && "Fail to get an interleaved access group."); 6804 6805 unsigned InterleaveFactor = Group->getFactor(); 6806 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6807 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6808 6809 // Holds the indices of existing members in an interleaved load group. 6810 // An interleaved store group doesn't need this as it doesn't allow gaps. 6811 SmallVector<unsigned, 4> Indices; 6812 if (isa<LoadInst>(I)) { 6813 for (unsigned i = 0; i < InterleaveFactor; i++) 6814 if (Group->getMember(i)) 6815 Indices.push_back(i); 6816 } 6817 6818 // Calculate the cost of the whole interleaved group. 6819 bool UseMaskForGaps = 6820 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 6821 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6822 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6823 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6824 6825 if (Group->isReverse()) { 6826 // TODO: Add support for reversed masked interleaved access. 6827 assert(!Legal->isMaskRequired(I) && 6828 "Reverse masked interleaved access not supported."); 6829 Cost += Group->getNumMembers() * 6830 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6831 } 6832 return Cost; 6833 } 6834 6835 InstructionCost LoopVectorizationCostModel::getReductionPatternCost( 6836 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6837 // Early exit for no inloop reductions 6838 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6839 return InstructionCost::getInvalid(); 6840 auto *VectorTy = cast<VectorType>(Ty); 6841 6842 // We are looking for a pattern of, and finding the minimal acceptable cost: 6843 // reduce(mul(ext(A), ext(B))) or 6844 // reduce(mul(A, B)) or 6845 // reduce(ext(A)) or 6846 // reduce(A). 6847 // The basic idea is that we walk down the tree to do that, finding the root 6848 // reduction instruction in InLoopReductionImmediateChains. From there we find 6849 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6850 // of the components. If the reduction cost is lower then we return it for the 6851 // reduction instruction and 0 for the other instructions in the pattern. If 6852 // it is not we return an invalid cost specifying the orignal cost method 6853 // should be used. 6854 Instruction *RetI = I; 6855 if ((RetI->getOpcode() == Instruction::SExt || 6856 RetI->getOpcode() == Instruction::ZExt)) { 6857 if (!RetI->hasOneUser()) 6858 return InstructionCost::getInvalid(); 6859 RetI = RetI->user_back(); 6860 } 6861 if (RetI->getOpcode() == Instruction::Mul && 6862 RetI->user_back()->getOpcode() == Instruction::Add) { 6863 if (!RetI->hasOneUser()) 6864 return InstructionCost::getInvalid(); 6865 RetI = RetI->user_back(); 6866 } 6867 6868 // Test if the found instruction is a reduction, and if not return an invalid 6869 // cost specifying the parent to use the original cost modelling. 6870 if (!InLoopReductionImmediateChains.count(RetI)) 6871 return InstructionCost::getInvalid(); 6872 6873 // Find the reduction this chain is a part of and calculate the basic cost of 6874 // the reduction on its own. 6875 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6876 Instruction *ReductionPhi = LastChain; 6877 while (!isa<PHINode>(ReductionPhi)) 6878 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6879 6880 RecurrenceDescriptor RdxDesc = 6881 Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; 6882 unsigned BaseCost = TTI.getArithmeticReductionCost(RdxDesc.getOpcode(), 6883 VectorTy, false, CostKind); 6884 6885 // Get the operand that was not the reduction chain and match it to one of the 6886 // patterns, returning the better cost if it is found. 6887 Instruction *RedOp = RetI->getOperand(1) == LastChain 6888 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6889 : dyn_cast<Instruction>(RetI->getOperand(1)); 6890 6891 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6892 6893 if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) && 6894 !TheLoop->isLoopInvariant(RedOp)) { 6895 bool IsUnsigned = isa<ZExtInst>(RedOp); 6896 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6897 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6898 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6899 CostKind); 6900 6901 unsigned ExtCost = 6902 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6903 TTI::CastContextHint::None, CostKind, RedOp); 6904 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6905 return I == RetI ? *RedCost.getValue() : 0; 6906 } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) { 6907 Instruction *Mul = RedOp; 6908 Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0)); 6909 Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1)); 6910 if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) && 6911 Op0->getOpcode() == Op1->getOpcode() && 6912 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6913 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 6914 bool IsUnsigned = isa<ZExtInst>(Op0); 6915 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6916 // reduce(mul(ext, ext)) 6917 unsigned ExtCost = 6918 TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, 6919 TTI::CastContextHint::None, CostKind, Op0); 6920 unsigned MulCost = 6921 TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); 6922 6923 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6924 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6925 CostKind); 6926 6927 if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) 6928 return I == RetI ? *RedCost.getValue() : 0; 6929 } else { 6930 unsigned MulCost = 6931 TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); 6932 6933 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6934 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 6935 CostKind); 6936 6937 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 6938 return I == RetI ? *RedCost.getValue() : 0; 6939 } 6940 } 6941 6942 return I == RetI ? BaseCost : InstructionCost::getInvalid(); 6943 } 6944 6945 InstructionCost 6946 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6947 ElementCount VF) { 6948 // Calculate scalar cost only. Vectorization cost should be ready at this 6949 // moment. 6950 if (VF.isScalar()) { 6951 Type *ValTy = getMemInstValueType(I); 6952 const Align Alignment = getLoadStoreAlignment(I); 6953 unsigned AS = getLoadStoreAddressSpace(I); 6954 6955 return TTI.getAddressComputationCost(ValTy) + 6956 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 6957 TTI::TCK_RecipThroughput, I); 6958 } 6959 return getWideningCost(I, VF); 6960 } 6961 6962 LoopVectorizationCostModel::VectorizationCostTy 6963 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6964 ElementCount VF) { 6965 // If we know that this instruction will remain uniform, check the cost of 6966 // the scalar version. 6967 if (isUniformAfterVectorization(I, VF)) 6968 VF = ElementCount::getFixed(1); 6969 6970 if (VF.isVector() && isProfitableToScalarize(I, VF)) 6971 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6972 6973 // Forced scalars do not have any scalarization overhead. 6974 auto ForcedScalar = ForcedScalars.find(VF); 6975 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 6976 auto InstSet = ForcedScalar->second; 6977 if (InstSet.count(I)) 6978 return VectorizationCostTy( 6979 (getInstructionCost(I, ElementCount::getFixed(1)).first * 6980 VF.getKnownMinValue()), 6981 false); 6982 } 6983 6984 Type *VectorTy; 6985 InstructionCost C = getInstructionCost(I, VF, VectorTy); 6986 6987 bool TypeNotScalarized = 6988 VF.isVector() && VectorTy->isVectorTy() && 6989 TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue(); 6990 return VectorizationCostTy(C, TypeNotScalarized); 6991 } 6992 6993 InstructionCost 6994 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6995 ElementCount VF) { 6996 6997 assert(!VF.isScalable() && 6998 "cannot compute scalarization overhead for scalable vectorization"); 6999 if (VF.isScalar()) 7000 return 0; 7001 7002 InstructionCost Cost = 0; 7003 Type *RetTy = ToVectorTy(I->getType(), VF); 7004 if (!RetTy->isVoidTy() && 7005 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7006 Cost += TTI.getScalarizationOverhead( 7007 cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()), 7008 true, false); 7009 7010 // Some targets keep addresses scalar. 7011 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7012 return Cost; 7013 7014 // Some targets support efficient element stores. 7015 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7016 return Cost; 7017 7018 // Collect operands to consider. 7019 CallInst *CI = dyn_cast<CallInst>(I); 7020 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 7021 7022 // Skip operands that do not require extraction/scalarization and do not incur 7023 // any overhead. 7024 return Cost + TTI.getOperandsScalarizationOverhead( 7025 filterExtractingOperands(Ops, VF), VF.getKnownMinValue()); 7026 } 7027 7028 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7029 if (VF.isScalar()) 7030 return; 7031 NumPredStores = 0; 7032 for (BasicBlock *BB : TheLoop->blocks()) { 7033 // For each instruction in the old loop. 7034 for (Instruction &I : *BB) { 7035 Value *Ptr = getLoadStorePointerOperand(&I); 7036 if (!Ptr) 7037 continue; 7038 7039 // TODO: We should generate better code and update the cost model for 7040 // predicated uniform stores. Today they are treated as any other 7041 // predicated store (see added test cases in 7042 // invariant-store-vectorization.ll). 7043 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 7044 NumPredStores++; 7045 7046 if (Legal->isUniformMemOp(I)) { 7047 // TODO: Avoid replicating loads and stores instead of 7048 // relying on instcombine to remove them. 7049 // Load: Scalar load + broadcast 7050 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7051 InstructionCost Cost = getUniformMemOpCost(&I, VF); 7052 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7053 continue; 7054 } 7055 7056 // We assume that widening is the best solution when possible. 7057 if (memoryInstructionCanBeWidened(&I, VF)) { 7058 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7059 int ConsecutiveStride = 7060 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 7061 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7062 "Expected consecutive stride."); 7063 InstWidening Decision = 7064 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7065 setWideningDecision(&I, VF, Decision, Cost); 7066 continue; 7067 } 7068 7069 // Choose between Interleaving, Gather/Scatter or Scalarization. 7070 InstructionCost InterleaveCost = std::numeric_limits<int>::max(); 7071 unsigned NumAccesses = 1; 7072 if (isAccessInterleaved(&I)) { 7073 auto Group = getInterleavedAccessGroup(&I); 7074 assert(Group && "Fail to get an interleaved access group."); 7075 7076 // Make one decision for the whole group. 7077 if (getWideningDecision(&I, VF) != CM_Unknown) 7078 continue; 7079 7080 NumAccesses = Group->getNumMembers(); 7081 if (interleavedAccessCanBeWidened(&I, VF)) 7082 InterleaveCost = getInterleaveGroupCost(&I, VF); 7083 } 7084 7085 InstructionCost GatherScatterCost = 7086 isLegalGatherOrScatter(&I) 7087 ? getGatherScatterCost(&I, VF) * NumAccesses 7088 : std::numeric_limits<int>::max(); 7089 7090 InstructionCost ScalarizationCost = 7091 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7092 7093 // Choose better solution for the current VF, 7094 // write down this decision and use it during vectorization. 7095 InstructionCost Cost; 7096 InstWidening Decision; 7097 if (InterleaveCost <= GatherScatterCost && 7098 InterleaveCost < ScalarizationCost) { 7099 Decision = CM_Interleave; 7100 Cost = InterleaveCost; 7101 } else if (GatherScatterCost < ScalarizationCost) { 7102 Decision = CM_GatherScatter; 7103 Cost = GatherScatterCost; 7104 } else { 7105 Decision = CM_Scalarize; 7106 Cost = ScalarizationCost; 7107 } 7108 // If the instructions belongs to an interleave group, the whole group 7109 // receives the same decision. The whole group receives the cost, but 7110 // the cost will actually be assigned to one instruction. 7111 if (auto Group = getInterleavedAccessGroup(&I)) 7112 setWideningDecision(Group, VF, Decision, Cost); 7113 else 7114 setWideningDecision(&I, VF, Decision, Cost); 7115 } 7116 } 7117 7118 // Make sure that any load of address and any other address computation 7119 // remains scalar unless there is gather/scatter support. This avoids 7120 // inevitable extracts into address registers, and also has the benefit of 7121 // activating LSR more, since that pass can't optimize vectorized 7122 // addresses. 7123 if (TTI.prefersVectorizedAddressing()) 7124 return; 7125 7126 // Start with all scalar pointer uses. 7127 SmallPtrSet<Instruction *, 8> AddrDefs; 7128 for (BasicBlock *BB : TheLoop->blocks()) 7129 for (Instruction &I : *BB) { 7130 Instruction *PtrDef = 7131 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7132 if (PtrDef && TheLoop->contains(PtrDef) && 7133 getWideningDecision(&I, VF) != CM_GatherScatter) 7134 AddrDefs.insert(PtrDef); 7135 } 7136 7137 // Add all instructions used to generate the addresses. 7138 SmallVector<Instruction *, 4> Worklist; 7139 append_range(Worklist, AddrDefs); 7140 while (!Worklist.empty()) { 7141 Instruction *I = Worklist.pop_back_val(); 7142 for (auto &Op : I->operands()) 7143 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7144 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7145 AddrDefs.insert(InstOp).second) 7146 Worklist.push_back(InstOp); 7147 } 7148 7149 for (auto *I : AddrDefs) { 7150 if (isa<LoadInst>(I)) { 7151 // Setting the desired widening decision should ideally be handled in 7152 // by cost functions, but since this involves the task of finding out 7153 // if the loaded register is involved in an address computation, it is 7154 // instead changed here when we know this is the case. 7155 InstWidening Decision = getWideningDecision(I, VF); 7156 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7157 // Scalarize a widened load of address. 7158 setWideningDecision( 7159 I, VF, CM_Scalarize, 7160 (VF.getKnownMinValue() * 7161 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7162 else if (auto Group = getInterleavedAccessGroup(I)) { 7163 // Scalarize an interleave group of address loads. 7164 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7165 if (Instruction *Member = Group->getMember(I)) 7166 setWideningDecision( 7167 Member, VF, CM_Scalarize, 7168 (VF.getKnownMinValue() * 7169 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7170 } 7171 } 7172 } else 7173 // Make sure I gets scalarized and a cost estimate without 7174 // scalarization overhead. 7175 ForcedScalars[VF].insert(I); 7176 } 7177 } 7178 7179 InstructionCost 7180 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7181 Type *&VectorTy) { 7182 Type *RetTy = I->getType(); 7183 if (canTruncateToMinimalBitwidth(I, VF)) 7184 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7185 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 7186 auto SE = PSE.getSE(); 7187 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7188 7189 // TODO: We need to estimate the cost of intrinsic calls. 7190 switch (I->getOpcode()) { 7191 case Instruction::GetElementPtr: 7192 // We mark this instruction as zero-cost because the cost of GEPs in 7193 // vectorized code depends on whether the corresponding memory instruction 7194 // is scalarized or not. Therefore, we handle GEPs with the memory 7195 // instruction cost. 7196 return 0; 7197 case Instruction::Br: { 7198 // In cases of scalarized and predicated instructions, there will be VF 7199 // predicated blocks in the vectorized loop. Each branch around these 7200 // blocks requires also an extract of its vector compare i1 element. 7201 bool ScalarPredicatedBB = false; 7202 BranchInst *BI = cast<BranchInst>(I); 7203 if (VF.isVector() && BI->isConditional() && 7204 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7205 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7206 ScalarPredicatedBB = true; 7207 7208 if (ScalarPredicatedBB) { 7209 // Return cost for branches around scalarized and predicated blocks. 7210 assert(!VF.isScalable() && "scalable vectors not yet supported."); 7211 auto *Vec_i1Ty = 7212 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7213 return (TTI.getScalarizationOverhead( 7214 Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), 7215 false, true) + 7216 (TTI.getCFInstrCost(Instruction::Br, CostKind) * 7217 VF.getKnownMinValue())); 7218 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7219 // The back-edge branch will remain, as will all scalar branches. 7220 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7221 else 7222 // This branch will be eliminated by if-conversion. 7223 return 0; 7224 // Note: We currently assume zero cost for an unconditional branch inside 7225 // a predicated block since it will become a fall-through, although we 7226 // may decide in the future to call TTI for all branches. 7227 } 7228 case Instruction::PHI: { 7229 auto *Phi = cast<PHINode>(I); 7230 7231 // First-order recurrences are replaced by vector shuffles inside the loop. 7232 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7233 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7234 return TTI.getShuffleCost( 7235 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7236 VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7237 7238 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7239 // converted into select instructions. We require N - 1 selects per phi 7240 // node, where N is the number of incoming values. 7241 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7242 return (Phi->getNumIncomingValues() - 1) * 7243 TTI.getCmpSelInstrCost( 7244 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7245 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7246 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7247 7248 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7249 } 7250 case Instruction::UDiv: 7251 case Instruction::SDiv: 7252 case Instruction::URem: 7253 case Instruction::SRem: 7254 // If we have a predicated instruction, it may not be executed for each 7255 // vector lane. Get the scalarization cost and scale this amount by the 7256 // probability of executing the predicated block. If the instruction is not 7257 // predicated, we fall through to the next case. 7258 if (VF.isVector() && isScalarWithPredication(I)) { 7259 InstructionCost Cost = 0; 7260 7261 // These instructions have a non-void type, so account for the phi nodes 7262 // that we will create. This cost is likely to be zero. The phi node 7263 // cost, if any, should be scaled by the block probability because it 7264 // models a copy at the end of each predicated block. 7265 Cost += VF.getKnownMinValue() * 7266 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7267 7268 // The cost of the non-predicated instruction. 7269 Cost += VF.getKnownMinValue() * 7270 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7271 7272 // The cost of insertelement and extractelement instructions needed for 7273 // scalarization. 7274 Cost += getScalarizationOverhead(I, VF); 7275 7276 // Scale the cost by the probability of executing the predicated blocks. 7277 // This assumes the predicated block for each vector lane is equally 7278 // likely. 7279 return Cost / getReciprocalPredBlockProb(); 7280 } 7281 LLVM_FALLTHROUGH; 7282 case Instruction::Add: 7283 case Instruction::FAdd: 7284 case Instruction::Sub: 7285 case Instruction::FSub: 7286 case Instruction::Mul: 7287 case Instruction::FMul: 7288 case Instruction::FDiv: 7289 case Instruction::FRem: 7290 case Instruction::Shl: 7291 case Instruction::LShr: 7292 case Instruction::AShr: 7293 case Instruction::And: 7294 case Instruction::Or: 7295 case Instruction::Xor: { 7296 // Since we will replace the stride by 1 the multiplication should go away. 7297 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7298 return 0; 7299 7300 // Detect reduction patterns 7301 InstructionCost RedCost; 7302 if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7303 .isValid()) 7304 return RedCost; 7305 7306 // Certain instructions can be cheaper to vectorize if they have a constant 7307 // second vector operand. One example of this are shifts on x86. 7308 Value *Op2 = I->getOperand(1); 7309 TargetTransformInfo::OperandValueProperties Op2VP; 7310 TargetTransformInfo::OperandValueKind Op2VK = 7311 TTI.getOperandInfo(Op2, Op2VP); 7312 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7313 Op2VK = TargetTransformInfo::OK_UniformValue; 7314 7315 SmallVector<const Value *, 4> Operands(I->operand_values()); 7316 unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1; 7317 return N * TTI.getArithmeticInstrCost( 7318 I->getOpcode(), VectorTy, CostKind, 7319 TargetTransformInfo::OK_AnyValue, 7320 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7321 } 7322 case Instruction::FNeg: { 7323 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 7324 unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1; 7325 return N * TTI.getArithmeticInstrCost( 7326 I->getOpcode(), VectorTy, CostKind, 7327 TargetTransformInfo::OK_AnyValue, 7328 TargetTransformInfo::OK_AnyValue, 7329 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None, 7330 I->getOperand(0), I); 7331 } 7332 case Instruction::Select: { 7333 SelectInst *SI = cast<SelectInst>(I); 7334 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7335 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7336 Type *CondTy = SI->getCondition()->getType(); 7337 if (!ScalarCond) 7338 CondTy = VectorType::get(CondTy, VF); 7339 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 7340 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7341 } 7342 case Instruction::ICmp: 7343 case Instruction::FCmp: { 7344 Type *ValTy = I->getOperand(0)->getType(); 7345 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7346 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7347 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7348 VectorTy = ToVectorTy(ValTy, VF); 7349 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7350 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7351 } 7352 case Instruction::Store: 7353 case Instruction::Load: { 7354 ElementCount Width = VF; 7355 if (Width.isVector()) { 7356 InstWidening Decision = getWideningDecision(I, Width); 7357 assert(Decision != CM_Unknown && 7358 "CM decision should be taken at this point"); 7359 if (Decision == CM_Scalarize) 7360 Width = ElementCount::getFixed(1); 7361 } 7362 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 7363 return getMemoryInstructionCost(I, VF); 7364 } 7365 case Instruction::ZExt: 7366 case Instruction::SExt: 7367 case Instruction::FPToUI: 7368 case Instruction::FPToSI: 7369 case Instruction::FPExt: 7370 case Instruction::PtrToInt: 7371 case Instruction::IntToPtr: 7372 case Instruction::SIToFP: 7373 case Instruction::UIToFP: 7374 case Instruction::Trunc: 7375 case Instruction::FPTrunc: 7376 case Instruction::BitCast: { 7377 // Computes the CastContextHint from a Load/Store instruction. 7378 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7379 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7380 "Expected a load or a store!"); 7381 7382 if (VF.isScalar() || !TheLoop->contains(I)) 7383 return TTI::CastContextHint::Normal; 7384 7385 switch (getWideningDecision(I, VF)) { 7386 case LoopVectorizationCostModel::CM_GatherScatter: 7387 return TTI::CastContextHint::GatherScatter; 7388 case LoopVectorizationCostModel::CM_Interleave: 7389 return TTI::CastContextHint::Interleave; 7390 case LoopVectorizationCostModel::CM_Scalarize: 7391 case LoopVectorizationCostModel::CM_Widen: 7392 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7393 : TTI::CastContextHint::Normal; 7394 case LoopVectorizationCostModel::CM_Widen_Reverse: 7395 return TTI::CastContextHint::Reversed; 7396 case LoopVectorizationCostModel::CM_Unknown: 7397 llvm_unreachable("Instr did not go through cost modelling?"); 7398 } 7399 7400 llvm_unreachable("Unhandled case!"); 7401 }; 7402 7403 unsigned Opcode = I->getOpcode(); 7404 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7405 // For Trunc, the context is the only user, which must be a StoreInst. 7406 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7407 if (I->hasOneUse()) 7408 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7409 CCH = ComputeCCH(Store); 7410 } 7411 // For Z/Sext, the context is the operand, which must be a LoadInst. 7412 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7413 Opcode == Instruction::FPExt) { 7414 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7415 CCH = ComputeCCH(Load); 7416 } 7417 7418 // We optimize the truncation of induction variables having constant 7419 // integer steps. The cost of these truncations is the same as the scalar 7420 // operation. 7421 if (isOptimizableIVTruncate(I, VF)) { 7422 auto *Trunc = cast<TruncInst>(I); 7423 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7424 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7425 } 7426 7427 // Detect reduction patterns 7428 InstructionCost RedCost; 7429 if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7430 .isValid()) 7431 return RedCost; 7432 7433 Type *SrcScalarTy = I->getOperand(0)->getType(); 7434 Type *SrcVecTy = 7435 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7436 if (canTruncateToMinimalBitwidth(I, VF)) { 7437 // This cast is going to be shrunk. This may remove the cast or it might 7438 // turn it into slightly different cast. For example, if MinBW == 16, 7439 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7440 // 7441 // Calculate the modified src and dest types. 7442 Type *MinVecTy = VectorTy; 7443 if (Opcode == Instruction::Trunc) { 7444 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7445 VectorTy = 7446 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7447 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7448 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7449 VectorTy = 7450 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7451 } 7452 } 7453 7454 assert(!VF.isScalable() && "VF is assumed to be non scalable"); 7455 unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1; 7456 return N * 7457 TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7458 } 7459 case Instruction::Call: { 7460 bool NeedToScalarize; 7461 CallInst *CI = cast<CallInst>(I); 7462 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7463 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7464 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7465 return std::min(CallCost, IntrinsicCost); 7466 } 7467 return CallCost; 7468 } 7469 case Instruction::ExtractValue: 7470 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7471 default: 7472 // The cost of executing VF copies of the scalar instruction. This opcode 7473 // is unknown. Assume that it is the same as 'mul'. 7474 return VF.getKnownMinValue() * TTI.getArithmeticInstrCost( 7475 Instruction::Mul, VectorTy, CostKind) + 7476 getScalarizationOverhead(I, VF); 7477 } // end of switch. 7478 } 7479 7480 char LoopVectorize::ID = 0; 7481 7482 static const char lv_name[] = "Loop Vectorization"; 7483 7484 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7485 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7486 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7487 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7488 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7489 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7490 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7491 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7492 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7493 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7494 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7495 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7496 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7497 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7498 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7499 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7500 7501 namespace llvm { 7502 7503 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7504 7505 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7506 bool VectorizeOnlyWhenForced) { 7507 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7508 } 7509 7510 } // end namespace llvm 7511 7512 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7513 // Check if the pointer operand of a load or store instruction is 7514 // consecutive. 7515 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7516 return Legal->isConsecutivePtr(Ptr); 7517 return false; 7518 } 7519 7520 void LoopVectorizationCostModel::collectValuesToIgnore() { 7521 // Ignore ephemeral values. 7522 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7523 7524 // Ignore type-promoting instructions we identified during reduction 7525 // detection. 7526 for (auto &Reduction : Legal->getReductionVars()) { 7527 RecurrenceDescriptor &RedDes = Reduction.second; 7528 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7529 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7530 } 7531 // Ignore type-casting instructions we identified during induction 7532 // detection. 7533 for (auto &Induction : Legal->getInductionVars()) { 7534 InductionDescriptor &IndDes = Induction.second; 7535 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7536 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7537 } 7538 } 7539 7540 void LoopVectorizationCostModel::collectInLoopReductions() { 7541 for (auto &Reduction : Legal->getReductionVars()) { 7542 PHINode *Phi = Reduction.first; 7543 RecurrenceDescriptor &RdxDesc = Reduction.second; 7544 7545 // We don't collect reductions that are type promoted (yet). 7546 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7547 continue; 7548 7549 // If the target would prefer this reduction to happen "in-loop", then we 7550 // want to record it as such. 7551 unsigned Opcode = RdxDesc.getOpcode(); 7552 if (!PreferInLoopReductions && 7553 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7554 TargetTransformInfo::ReductionFlags())) 7555 continue; 7556 7557 // Check that we can correctly put the reductions into the loop, by 7558 // finding the chain of operations that leads from the phi to the loop 7559 // exit value. 7560 SmallVector<Instruction *, 4> ReductionOperations = 7561 RdxDesc.getReductionOpChain(Phi, TheLoop); 7562 bool InLoop = !ReductionOperations.empty(); 7563 if (InLoop) { 7564 InLoopReductionChains[Phi] = ReductionOperations; 7565 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7566 Instruction *LastChain = Phi; 7567 for (auto *I : ReductionOperations) { 7568 InLoopReductionImmediateChains[I] = LastChain; 7569 LastChain = I; 7570 } 7571 } 7572 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7573 << " reduction for phi: " << *Phi << "\n"); 7574 } 7575 } 7576 7577 // TODO: we could return a pair of values that specify the max VF and 7578 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7579 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7580 // doesn't have a cost model that can choose which plan to execute if 7581 // more than one is generated. 7582 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7583 LoopVectorizationCostModel &CM) { 7584 unsigned WidestType; 7585 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7586 return WidestVectorRegBits / WidestType; 7587 } 7588 7589 VectorizationFactor 7590 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7591 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7592 ElementCount VF = UserVF; 7593 // Outer loop handling: They may require CFG and instruction level 7594 // transformations before even evaluating whether vectorization is profitable. 7595 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7596 // the vectorization pipeline. 7597 if (!OrigLoop->isInnermost()) { 7598 // If the user doesn't provide a vectorization factor, determine a 7599 // reasonable one. 7600 if (UserVF.isZero()) { 7601 VF = ElementCount::getFixed( 7602 determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM)); 7603 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7604 7605 // Make sure we have a VF > 1 for stress testing. 7606 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7607 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7608 << "overriding computed VF.\n"); 7609 VF = ElementCount::getFixed(4); 7610 } 7611 } 7612 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7613 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7614 "VF needs to be a power of two"); 7615 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7616 << "VF " << VF << " to build VPlans.\n"); 7617 buildVPlans(VF, VF); 7618 7619 // For VPlan build stress testing, we bail out after VPlan construction. 7620 if (VPlanBuildStressTest) 7621 return VectorizationFactor::Disabled(); 7622 7623 return {VF, 0 /*Cost*/}; 7624 } 7625 7626 LLVM_DEBUG( 7627 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7628 "VPlan-native path.\n"); 7629 return VectorizationFactor::Disabled(); 7630 } 7631 7632 Optional<VectorizationFactor> 7633 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7634 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7635 Optional<ElementCount> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC); 7636 if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved. 7637 return None; 7638 7639 // Invalidate interleave groups if all blocks of loop will be predicated. 7640 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 7641 !useMaskedInterleavedAccesses(*TTI)) { 7642 LLVM_DEBUG( 7643 dbgs() 7644 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7645 "which requires masked-interleaved support.\n"); 7646 if (CM.InterleaveInfo.invalidateGroups()) 7647 // Invalidating interleave groups also requires invalidating all decisions 7648 // based on them, which includes widening decisions and uniform and scalar 7649 // values. 7650 CM.invalidateCostModelingDecisions(); 7651 } 7652 7653 ElementCount MaxVF = MaybeMaxVF.getValue(); 7654 assert(MaxVF.isNonZero() && "MaxVF is zero."); 7655 7656 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxVF); 7657 if (!UserVF.isZero() && 7658 (UserVFIsLegal || (UserVF.isScalable() && MaxVF.isScalable()))) { 7659 // FIXME: MaxVF is temporarily used inplace of UserVF for illegal scalable 7660 // VFs here, this should be reverted to only use legal UserVFs once the 7661 // loop below supports scalable VFs. 7662 ElementCount VF = UserVFIsLegal ? UserVF : MaxVF; 7663 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max") 7664 << " VF " << VF << ".\n"); 7665 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7666 "VF needs to be a power of two"); 7667 // Collect the instructions (and their associated costs) that will be more 7668 // profitable to scalarize. 7669 CM.selectUserVectorizationFactor(VF); 7670 CM.collectInLoopReductions(); 7671 buildVPlansWithVPRecipes(VF, VF); 7672 LLVM_DEBUG(printPlans(dbgs())); 7673 return {{VF, 0}}; 7674 } 7675 7676 assert(!MaxVF.isScalable() && 7677 "Scalable vectors not yet supported beyond this point"); 7678 7679 for (ElementCount VF = ElementCount::getFixed(1); 7680 ElementCount::isKnownLE(VF, MaxVF); VF *= 2) { 7681 // Collect Uniform and Scalar instructions after vectorization with VF. 7682 CM.collectUniformsAndScalars(VF); 7683 7684 // Collect the instructions (and their associated costs) that will be more 7685 // profitable to scalarize. 7686 if (VF.isVector()) 7687 CM.collectInstsToScalarize(VF); 7688 } 7689 7690 CM.collectInLoopReductions(); 7691 7692 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxVF); 7693 LLVM_DEBUG(printPlans(dbgs())); 7694 if (MaxVF.isScalar()) 7695 return VectorizationFactor::Disabled(); 7696 7697 // Select the optimal vectorization factor. 7698 return CM.selectVectorizationFactor(MaxVF); 7699 } 7700 7701 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) { 7702 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 7703 << '\n'); 7704 BestVF = VF; 7705 BestUF = UF; 7706 7707 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 7708 return !Plan->hasVF(VF); 7709 }); 7710 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 7711 } 7712 7713 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 7714 DominatorTree *DT) { 7715 // Perform the actual loop transformation. 7716 7717 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7718 VPCallbackILV CallbackILV(ILV); 7719 7720 assert(BestVF.hasValue() && "Vectorization Factor is missing"); 7721 7722 VPTransformState State{*BestVF, BestUF, LI, 7723 DT, ILV.Builder, ILV.VectorLoopValueMap, 7724 &ILV, CallbackILV}; 7725 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 7726 State.TripCount = ILV.getOrCreateTripCount(nullptr); 7727 State.CanonicalIV = ILV.Induction; 7728 7729 ILV.printDebugTracesAtStart(); 7730 7731 //===------------------------------------------------===// 7732 // 7733 // Notice: any optimization or new instruction that go 7734 // into the code below should also be implemented in 7735 // the cost-model. 7736 // 7737 //===------------------------------------------------===// 7738 7739 // 2. Copy and widen instructions from the old loop into the new loop. 7740 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 7741 VPlans.front()->execute(&State); 7742 7743 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7744 // predication, updating analyses. 7745 ILV.fixVectorizedLoop(); 7746 7747 ILV.printDebugTracesAtEnd(); 7748 } 7749 7750 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7751 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7752 7753 // We create new control-flow for the vectorized loop, so the original exit 7754 // conditions will be dead after vectorization if it's only used by the 7755 // terminator 7756 SmallVector<BasicBlock*> ExitingBlocks; 7757 OrigLoop->getExitingBlocks(ExitingBlocks); 7758 for (auto *BB : ExitingBlocks) { 7759 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 7760 if (!Cmp || !Cmp->hasOneUse()) 7761 continue; 7762 7763 // TODO: we should introduce a getUniqueExitingBlocks on Loop 7764 if (!DeadInstructions.insert(Cmp).second) 7765 continue; 7766 7767 // The operands of the icmp is often a dead trunc, used by IndUpdate. 7768 // TODO: can recurse through operands in general 7769 for (Value *Op : Cmp->operands()) { 7770 if (isa<TruncInst>(Op) && Op->hasOneUse()) 7771 DeadInstructions.insert(cast<Instruction>(Op)); 7772 } 7773 } 7774 7775 // We create new "steps" for induction variable updates to which the original 7776 // induction variables map. An original update instruction will be dead if 7777 // all its users except the induction variable are dead. 7778 auto *Latch = OrigLoop->getLoopLatch(); 7779 for (auto &Induction : Legal->getInductionVars()) { 7780 PHINode *Ind = Induction.first; 7781 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7782 7783 // If the tail is to be folded by masking, the primary induction variable, 7784 // if exists, isn't dead: it will be used for masking. Don't kill it. 7785 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 7786 continue; 7787 7788 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7789 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7790 })) 7791 DeadInstructions.insert(IndUpdate); 7792 7793 // We record as "Dead" also the type-casting instructions we had identified 7794 // during induction analysis. We don't need any handling for them in the 7795 // vectorized loop because we have proven that, under a proper runtime 7796 // test guarding the vectorized loop, the value of the phi, and the casted 7797 // value of the phi, are the same. The last instruction in this casting chain 7798 // will get its scalar/vector/widened def from the scalar/vector/widened def 7799 // of the respective phi node. Any other casts in the induction def-use chain 7800 // have no other uses outside the phi update chain, and will be ignored. 7801 InductionDescriptor &IndDes = Induction.second; 7802 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7803 DeadInstructions.insert(Casts.begin(), Casts.end()); 7804 } 7805 } 7806 7807 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 7808 7809 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7810 7811 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 7812 Instruction::BinaryOps BinOp) { 7813 // When unrolling and the VF is 1, we only need to add a simple scalar. 7814 Type *Ty = Val->getType(); 7815 assert(!Ty->isVectorTy() && "Val must be a scalar"); 7816 7817 if (Ty->isFloatingPointTy()) { 7818 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 7819 7820 // Floating point operations had to be 'fast' to enable the unrolling. 7821 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 7822 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 7823 } 7824 Constant *C = ConstantInt::get(Ty, StartIdx); 7825 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 7826 } 7827 7828 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7829 SmallVector<Metadata *, 4> MDs; 7830 // Reserve first location for self reference to the LoopID metadata node. 7831 MDs.push_back(nullptr); 7832 bool IsUnrollMetadata = false; 7833 MDNode *LoopID = L->getLoopID(); 7834 if (LoopID) { 7835 // First find existing loop unrolling disable metadata. 7836 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7837 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7838 if (MD) { 7839 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7840 IsUnrollMetadata = 7841 S && S->getString().startswith("llvm.loop.unroll.disable"); 7842 } 7843 MDs.push_back(LoopID->getOperand(i)); 7844 } 7845 } 7846 7847 if (!IsUnrollMetadata) { 7848 // Add runtime unroll disable metadata. 7849 LLVMContext &Context = L->getHeader()->getContext(); 7850 SmallVector<Metadata *, 1> DisableOperands; 7851 DisableOperands.push_back( 7852 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7853 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7854 MDs.push_back(DisableNode); 7855 MDNode *NewLoopID = MDNode::get(Context, MDs); 7856 // Set operand 0 to refer to the loop id itself. 7857 NewLoopID->replaceOperandWith(0, NewLoopID); 7858 L->setLoopID(NewLoopID); 7859 } 7860 } 7861 7862 //===--------------------------------------------------------------------===// 7863 // EpilogueVectorizerMainLoop 7864 //===--------------------------------------------------------------------===// 7865 7866 /// This function is partially responsible for generating the control flow 7867 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7868 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 7869 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7870 Loop *Lp = createVectorLoopSkeleton(""); 7871 7872 // Generate the code to check the minimum iteration count of the vector 7873 // epilogue (see below). 7874 EPI.EpilogueIterationCountCheck = 7875 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 7876 EPI.EpilogueIterationCountCheck->setName("iter.check"); 7877 7878 // Generate the code to check any assumptions that we've made for SCEV 7879 // expressions. 7880 BasicBlock *SavedPreHeader = LoopVectorPreHeader; 7881 emitSCEVChecks(Lp, LoopScalarPreHeader); 7882 7883 // If a safety check was generated save it. 7884 if (SavedPreHeader != LoopVectorPreHeader) 7885 EPI.SCEVSafetyCheck = SavedPreHeader; 7886 7887 // Generate the code that checks at runtime if arrays overlap. We put the 7888 // checks into a separate block to make the more common case of few elements 7889 // faster. 7890 SavedPreHeader = LoopVectorPreHeader; 7891 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 7892 7893 // If a safety check was generated save/overwite it. 7894 if (SavedPreHeader != LoopVectorPreHeader) 7895 EPI.MemSafetyCheck = SavedPreHeader; 7896 7897 // Generate the iteration count check for the main loop, *after* the check 7898 // for the epilogue loop, so that the path-length is shorter for the case 7899 // that goes directly through the vector epilogue. The longer-path length for 7900 // the main loop is compensated for, by the gain from vectorizing the larger 7901 // trip count. Note: the branch will get updated later on when we vectorize 7902 // the epilogue. 7903 EPI.MainLoopIterationCountCheck = 7904 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 7905 7906 // Generate the induction variable. 7907 OldInduction = Legal->getPrimaryInduction(); 7908 Type *IdxTy = Legal->getWidestInductionType(); 7909 Value *StartIdx = ConstantInt::get(IdxTy, 0); 7910 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 7911 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 7912 EPI.VectorTripCount = CountRoundDown; 7913 Induction = 7914 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 7915 getDebugLocFromInstOrOperands(OldInduction)); 7916 7917 // Skip induction resume value creation here because they will be created in 7918 // the second pass. If we created them here, they wouldn't be used anyway, 7919 // because the vplan in the second pass still contains the inductions from the 7920 // original loop. 7921 7922 return completeLoopSkeleton(Lp, OrigLoopID); 7923 } 7924 7925 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 7926 LLVM_DEBUG({ 7927 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 7928 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 7929 << ", Main Loop UF:" << EPI.MainLoopUF 7930 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 7931 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7932 }); 7933 } 7934 7935 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 7936 DEBUG_WITH_TYPE(VerboseDebug, { 7937 dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; 7938 }); 7939 } 7940 7941 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 7942 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 7943 assert(L && "Expected valid Loop."); 7944 assert(Bypass && "Expected valid bypass basic block."); 7945 unsigned VFactor = 7946 ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue(); 7947 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 7948 Value *Count = getOrCreateTripCount(L); 7949 // Reuse existing vector loop preheader for TC checks. 7950 // Note that new preheader block is generated for vector loop. 7951 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 7952 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 7953 7954 // Generate code to check if the loop's trip count is less than VF * UF of the 7955 // main vector loop. 7956 auto P = 7957 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7958 7959 Value *CheckMinIters = Builder.CreateICmp( 7960 P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor), 7961 "min.iters.check"); 7962 7963 if (!ForEpilogue) 7964 TCCheckBlock->setName("vector.main.loop.iter.check"); 7965 7966 // Create new preheader for vector loop. 7967 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 7968 DT, LI, nullptr, "vector.ph"); 7969 7970 if (ForEpilogue) { 7971 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 7972 DT->getNode(Bypass)->getIDom()) && 7973 "TC check is expected to dominate Bypass"); 7974 7975 // Update dominator for Bypass & LoopExit. 7976 DT->changeImmediateDominator(Bypass, TCCheckBlock); 7977 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 7978 7979 LoopBypassBlocks.push_back(TCCheckBlock); 7980 7981 // Save the trip count so we don't have to regenerate it in the 7982 // vec.epilog.iter.check. This is safe to do because the trip count 7983 // generated here dominates the vector epilog iter check. 7984 EPI.TripCount = Count; 7985 } 7986 7987 ReplaceInstWithInst( 7988 TCCheckBlock->getTerminator(), 7989 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7990 7991 return TCCheckBlock; 7992 } 7993 7994 //===--------------------------------------------------------------------===// 7995 // EpilogueVectorizerEpilogueLoop 7996 //===--------------------------------------------------------------------===// 7997 7998 /// This function is partially responsible for generating the control flow 7999 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8000 BasicBlock * 8001 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8002 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8003 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8004 8005 // Now, compare the remaining count and if there aren't enough iterations to 8006 // execute the vectorized epilogue skip to the scalar part. 8007 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8008 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8009 LoopVectorPreHeader = 8010 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8011 LI, nullptr, "vec.epilog.ph"); 8012 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8013 VecEpilogueIterationCountCheck); 8014 8015 // Adjust the control flow taking the state info from the main loop 8016 // vectorization into account. 8017 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8018 "expected this to be saved from the previous pass."); 8019 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8020 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8021 8022 DT->changeImmediateDominator(LoopVectorPreHeader, 8023 EPI.MainLoopIterationCountCheck); 8024 8025 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8026 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8027 8028 if (EPI.SCEVSafetyCheck) 8029 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8030 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8031 if (EPI.MemSafetyCheck) 8032 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8033 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8034 8035 DT->changeImmediateDominator( 8036 VecEpilogueIterationCountCheck, 8037 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8038 8039 DT->changeImmediateDominator(LoopScalarPreHeader, 8040 EPI.EpilogueIterationCountCheck); 8041 DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck); 8042 8043 // Keep track of bypass blocks, as they feed start values to the induction 8044 // phis in the scalar loop preheader. 8045 if (EPI.SCEVSafetyCheck) 8046 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8047 if (EPI.MemSafetyCheck) 8048 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8049 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8050 8051 // Generate a resume induction for the vector epilogue and put it in the 8052 // vector epilogue preheader 8053 Type *IdxTy = Legal->getWidestInductionType(); 8054 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8055 LoopVectorPreHeader->getFirstNonPHI()); 8056 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8057 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8058 EPI.MainLoopIterationCountCheck); 8059 8060 // Generate the induction variable. 8061 OldInduction = Legal->getPrimaryInduction(); 8062 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8063 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8064 Value *StartIdx = EPResumeVal; 8065 Induction = 8066 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8067 getDebugLocFromInstOrOperands(OldInduction)); 8068 8069 // Generate induction resume values. These variables save the new starting 8070 // indexes for the scalar loop. They are used to test if there are any tail 8071 // iterations left once the vector loop has completed. 8072 // Note that when the vectorized epilogue is skipped due to iteration count 8073 // check, then the resume value for the induction variable comes from 8074 // the trip count of the main vector loop, hence passing the AdditionalBypass 8075 // argument. 8076 createInductionResumeValues(Lp, CountRoundDown, 8077 {VecEpilogueIterationCountCheck, 8078 EPI.VectorTripCount} /* AdditionalBypass */); 8079 8080 AddRuntimeUnrollDisableMetaData(Lp); 8081 return completeLoopSkeleton(Lp, OrigLoopID); 8082 } 8083 8084 BasicBlock * 8085 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8086 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8087 8088 assert(EPI.TripCount && 8089 "Expected trip count to have been safed in the first pass."); 8090 assert( 8091 (!isa<Instruction>(EPI.TripCount) || 8092 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8093 "saved trip count does not dominate insertion point."); 8094 Value *TC = EPI.TripCount; 8095 IRBuilder<> Builder(Insert->getTerminator()); 8096 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8097 8098 // Generate code to check if the loop's trip count is less than VF * UF of the 8099 // vector epilogue loop. 8100 auto P = 8101 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8102 8103 Value *CheckMinIters = Builder.CreateICmp( 8104 P, Count, 8105 ConstantInt::get(Count->getType(), 8106 EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF), 8107 "min.epilog.iters.check"); 8108 8109 ReplaceInstWithInst( 8110 Insert->getTerminator(), 8111 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8112 8113 LoopBypassBlocks.push_back(Insert); 8114 return Insert; 8115 } 8116 8117 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8118 LLVM_DEBUG({ 8119 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8120 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 8121 << ", Main Loop UF:" << EPI.MainLoopUF 8122 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 8123 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8124 }); 8125 } 8126 8127 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8128 DEBUG_WITH_TYPE(VerboseDebug, { 8129 dbgs() << "final fn:\n" << *Induction->getFunction() << "\n"; 8130 }); 8131 } 8132 8133 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8134 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8135 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8136 bool PredicateAtRangeStart = Predicate(Range.Start); 8137 8138 for (ElementCount TmpVF = Range.Start * 2; 8139 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8140 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8141 Range.End = TmpVF; 8142 break; 8143 } 8144 8145 return PredicateAtRangeStart; 8146 } 8147 8148 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8149 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8150 /// of VF's starting at a given VF and extending it as much as possible. Each 8151 /// vectorization decision can potentially shorten this sub-range during 8152 /// buildVPlan(). 8153 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8154 ElementCount MaxVF) { 8155 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8156 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8157 VFRange SubRange = {VF, MaxVFPlusOne}; 8158 VPlans.push_back(buildVPlan(SubRange)); 8159 VF = SubRange.End; 8160 } 8161 } 8162 8163 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8164 VPlanPtr &Plan) { 8165 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8166 8167 // Look for cached value. 8168 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8169 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8170 if (ECEntryIt != EdgeMaskCache.end()) 8171 return ECEntryIt->second; 8172 8173 VPValue *SrcMask = createBlockInMask(Src, Plan); 8174 8175 // The terminator has to be a branch inst! 8176 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8177 assert(BI && "Unexpected terminator found"); 8178 8179 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8180 return EdgeMaskCache[Edge] = SrcMask; 8181 8182 // If source is an exiting block, we know the exit edge is dynamically dead 8183 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8184 // adding uses of an otherwise potentially dead instruction. 8185 if (OrigLoop->isLoopExiting(Src)) 8186 return EdgeMaskCache[Edge] = SrcMask; 8187 8188 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8189 assert(EdgeMask && "No Edge Mask found for condition"); 8190 8191 if (BI->getSuccessor(0) != Dst) 8192 EdgeMask = Builder.createNot(EdgeMask); 8193 8194 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 8195 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 8196 8197 return EdgeMaskCache[Edge] = EdgeMask; 8198 } 8199 8200 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8201 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8202 8203 // Look for cached value. 8204 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8205 if (BCEntryIt != BlockMaskCache.end()) 8206 return BCEntryIt->second; 8207 8208 // All-one mask is modelled as no-mask following the convention for masked 8209 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8210 VPValue *BlockMask = nullptr; 8211 8212 if (OrigLoop->getHeader() == BB) { 8213 if (!CM.blockNeedsPredication(BB)) 8214 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8215 8216 // Create the block in mask as the first non-phi instruction in the block. 8217 VPBuilder::InsertPointGuard Guard(Builder); 8218 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8219 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8220 8221 // Introduce the early-exit compare IV <= BTC to form header block mask. 8222 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8223 // Start by constructing the desired canonical IV. 8224 VPValue *IV = nullptr; 8225 if (Legal->getPrimaryInduction()) 8226 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8227 else { 8228 auto IVRecipe = new VPWidenCanonicalIVRecipe(); 8229 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8230 IV = IVRecipe->getVPValue(); 8231 } 8232 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8233 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8234 8235 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8236 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8237 // as a second argument, we only pass the IV here and extract the 8238 // tripcount from the transform state where codegen of the VP instructions 8239 // happen. 8240 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8241 } else { 8242 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8243 } 8244 return BlockMaskCache[BB] = BlockMask; 8245 } 8246 8247 // This is the block mask. We OR all incoming edges. 8248 for (auto *Predecessor : predecessors(BB)) { 8249 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8250 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8251 return BlockMaskCache[BB] = EdgeMask; 8252 8253 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8254 BlockMask = EdgeMask; 8255 continue; 8256 } 8257 8258 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8259 } 8260 8261 return BlockMaskCache[BB] = BlockMask; 8262 } 8263 8264 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range, 8265 VPlanPtr &Plan) { 8266 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8267 "Must be called with either a load or store"); 8268 8269 auto willWiden = [&](ElementCount VF) -> bool { 8270 if (VF.isScalar()) 8271 return false; 8272 LoopVectorizationCostModel::InstWidening Decision = 8273 CM.getWideningDecision(I, VF); 8274 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8275 "CM decision should be taken at this point."); 8276 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8277 return true; 8278 if (CM.isScalarAfterVectorization(I, VF) || 8279 CM.isProfitableToScalarize(I, VF)) 8280 return false; 8281 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8282 }; 8283 8284 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8285 return nullptr; 8286 8287 VPValue *Mask = nullptr; 8288 if (Legal->isMaskRequired(I)) 8289 Mask = createBlockInMask(I->getParent(), Plan); 8290 8291 VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I)); 8292 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8293 return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask); 8294 8295 StoreInst *Store = cast<StoreInst>(I); 8296 VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand()); 8297 return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask); 8298 } 8299 8300 VPWidenIntOrFpInductionRecipe * 8301 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, VPlan &Plan) const { 8302 // Check if this is an integer or fp induction. If so, build the recipe that 8303 // produces its scalar and vector values. 8304 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8305 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8306 II.getKind() == InductionDescriptor::IK_FpInduction) { 8307 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8308 return new VPWidenIntOrFpInductionRecipe(Phi, Start); 8309 } 8310 8311 return nullptr; 8312 } 8313 8314 VPWidenIntOrFpInductionRecipe * 8315 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I, VFRange &Range, 8316 VPlan &Plan) const { 8317 // Optimize the special case where the source is a constant integer 8318 // induction variable. Notice that we can only optimize the 'trunc' case 8319 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8320 // (c) other casts depend on pointer size. 8321 8322 // Determine whether \p K is a truncation based on an induction variable that 8323 // can be optimized. 8324 auto isOptimizableIVTruncate = 8325 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8326 return [=](ElementCount VF) -> bool { 8327 return CM.isOptimizableIVTruncate(K, VF); 8328 }; 8329 }; 8330 8331 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8332 isOptimizableIVTruncate(I), Range)) { 8333 8334 InductionDescriptor II = 8335 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 8336 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8337 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8338 Start, I); 8339 } 8340 return nullptr; 8341 } 8342 8343 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) { 8344 // We know that all PHIs in non-header blocks are converted into selects, so 8345 // we don't have to worry about the insertion order and we can just use the 8346 // builder. At this point we generate the predication tree. There may be 8347 // duplications since this is a simple recursive scan, but future 8348 // optimizations will clean it up. 8349 8350 SmallVector<VPValue *, 2> Operands; 8351 unsigned NumIncoming = Phi->getNumIncomingValues(); 8352 for (unsigned In = 0; In < NumIncoming; In++) { 8353 VPValue *EdgeMask = 8354 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8355 assert((EdgeMask || NumIncoming == 1) && 8356 "Multiple predecessors with one having a full mask"); 8357 Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In))); 8358 if (EdgeMask) 8359 Operands.push_back(EdgeMask); 8360 } 8361 return new VPBlendRecipe(Phi, Operands); 8362 } 8363 8364 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range, 8365 VPlan &Plan) const { 8366 8367 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8368 [this, CI](ElementCount VF) { 8369 return CM.isScalarWithPredication(CI, VF); 8370 }, 8371 Range); 8372 8373 if (IsPredicated) 8374 return nullptr; 8375 8376 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8377 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8378 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8379 ID == Intrinsic::pseudoprobe || 8380 ID == Intrinsic::experimental_noalias_scope_decl)) 8381 return nullptr; 8382 8383 auto willWiden = [&](ElementCount VF) -> bool { 8384 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8385 // The following case may be scalarized depending on the VF. 8386 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8387 // version of the instruction. 8388 // Is it beneficial to perform intrinsic call compared to lib call? 8389 bool NeedToScalarize = false; 8390 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8391 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8392 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8393 assert(IntrinsicCost.isValid() && CallCost.isValid() && 8394 "Cannot have invalid costs while widening"); 8395 return UseVectorIntrinsic || !NeedToScalarize; 8396 }; 8397 8398 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8399 return nullptr; 8400 8401 return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands())); 8402 } 8403 8404 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8405 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8406 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8407 // Instruction should be widened, unless it is scalar after vectorization, 8408 // scalarization is profitable or it is predicated. 8409 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8410 return CM.isScalarAfterVectorization(I, VF) || 8411 CM.isProfitableToScalarize(I, VF) || 8412 CM.isScalarWithPredication(I, VF); 8413 }; 8414 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8415 Range); 8416 } 8417 8418 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const { 8419 auto IsVectorizableOpcode = [](unsigned Opcode) { 8420 switch (Opcode) { 8421 case Instruction::Add: 8422 case Instruction::And: 8423 case Instruction::AShr: 8424 case Instruction::BitCast: 8425 case Instruction::FAdd: 8426 case Instruction::FCmp: 8427 case Instruction::FDiv: 8428 case Instruction::FMul: 8429 case Instruction::FNeg: 8430 case Instruction::FPExt: 8431 case Instruction::FPToSI: 8432 case Instruction::FPToUI: 8433 case Instruction::FPTrunc: 8434 case Instruction::FRem: 8435 case Instruction::FSub: 8436 case Instruction::ICmp: 8437 case Instruction::IntToPtr: 8438 case Instruction::LShr: 8439 case Instruction::Mul: 8440 case Instruction::Or: 8441 case Instruction::PtrToInt: 8442 case Instruction::SDiv: 8443 case Instruction::Select: 8444 case Instruction::SExt: 8445 case Instruction::Shl: 8446 case Instruction::SIToFP: 8447 case Instruction::SRem: 8448 case Instruction::Sub: 8449 case Instruction::Trunc: 8450 case Instruction::UDiv: 8451 case Instruction::UIToFP: 8452 case Instruction::URem: 8453 case Instruction::Xor: 8454 case Instruction::ZExt: 8455 return true; 8456 } 8457 return false; 8458 }; 8459 8460 if (!IsVectorizableOpcode(I->getOpcode())) 8461 return nullptr; 8462 8463 // Success: widen this instruction. 8464 return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands())); 8465 } 8466 8467 VPBasicBlock *VPRecipeBuilder::handleReplication( 8468 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8469 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 8470 VPlanPtr &Plan) { 8471 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8472 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8473 Range); 8474 8475 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8476 [&](ElementCount VF) { return CM.isScalarWithPredication(I, VF); }, 8477 Range); 8478 8479 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8480 IsUniform, IsPredicated); 8481 setRecipe(I, Recipe); 8482 Plan->addVPValue(I, Recipe); 8483 8484 // Find if I uses a predicated instruction. If so, it will use its scalar 8485 // value. Avoid hoisting the insert-element which packs the scalar value into 8486 // a vector value, as that happens iff all users use the vector value. 8487 for (auto &Op : I->operands()) 8488 if (auto *PredInst = dyn_cast<Instruction>(Op)) 8489 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 8490 PredInst2Recipe[PredInst]->setAlsoPack(false); 8491 8492 // Finalize the recipe for Instr, first if it is not predicated. 8493 if (!IsPredicated) { 8494 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8495 VPBB->appendRecipe(Recipe); 8496 return VPBB; 8497 } 8498 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8499 assert(VPBB->getSuccessors().empty() && 8500 "VPBB has successors when handling predicated replication."); 8501 // Record predicated instructions for above packing optimizations. 8502 PredInst2Recipe[I] = Recipe; 8503 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8504 VPBlockUtils::insertBlockAfter(Region, VPBB); 8505 auto *RegSucc = new VPBasicBlock(); 8506 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8507 return RegSucc; 8508 } 8509 8510 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8511 VPRecipeBase *PredRecipe, 8512 VPlanPtr &Plan) { 8513 // Instructions marked for predication are replicated and placed under an 8514 // if-then construct to prevent side-effects. 8515 8516 // Generate recipes to compute the block mask for this region. 8517 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8518 8519 // Build the triangular if-then region. 8520 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8521 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8522 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8523 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8524 auto *PHIRecipe = Instr->getType()->isVoidTy() 8525 ? nullptr 8526 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8527 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8528 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8529 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8530 8531 // Note: first set Entry as region entry and then connect successors starting 8532 // from it in order, to propagate the "parent" of each VPBasicBlock. 8533 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8534 VPBlockUtils::connectBlocks(Pred, Exit); 8535 8536 return Region; 8537 } 8538 8539 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8540 VFRange &Range, 8541 VPlanPtr &Plan) { 8542 // First, check for specific widening recipes that deal with calls, memory 8543 // operations, inductions and Phi nodes. 8544 if (auto *CI = dyn_cast<CallInst>(Instr)) 8545 return tryToWidenCall(CI, Range, *Plan); 8546 8547 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8548 return tryToWidenMemory(Instr, Range, Plan); 8549 8550 VPRecipeBase *Recipe; 8551 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8552 if (Phi->getParent() != OrigLoop->getHeader()) 8553 return tryToBlend(Phi, Plan); 8554 if ((Recipe = tryToOptimizeInductionPHI(Phi, *Plan))) 8555 return Recipe; 8556 8557 if (Legal->isReductionVariable(Phi)) { 8558 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 8559 VPValue *StartV = 8560 Plan->getOrAddVPValue(RdxDesc.getRecurrenceStartValue()); 8561 return new VPWidenPHIRecipe(Phi, RdxDesc, *StartV); 8562 } 8563 8564 return new VPWidenPHIRecipe(Phi); 8565 } 8566 8567 if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate( 8568 cast<TruncInst>(Instr), Range, *Plan))) 8569 return Recipe; 8570 8571 if (!shouldWiden(Instr, Range)) 8572 return nullptr; 8573 8574 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8575 return new VPWidenGEPRecipe(GEP, Plan->mapToVPValues(GEP->operands()), 8576 OrigLoop); 8577 8578 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8579 bool InvariantCond = 8580 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8581 return new VPWidenSelectRecipe(*SI, Plan->mapToVPValues(SI->operands()), 8582 InvariantCond); 8583 } 8584 8585 return tryToWiden(Instr, *Plan); 8586 } 8587 8588 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8589 ElementCount MaxVF) { 8590 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8591 8592 // Collect instructions from the original loop that will become trivially dead 8593 // in the vectorized loop. We don't need to vectorize these instructions. For 8594 // example, original induction update instructions can become dead because we 8595 // separately emit induction "steps" when generating code for the new loop. 8596 // Similarly, we create a new latch condition when setting up the structure 8597 // of the new loop, so the old one can become dead. 8598 SmallPtrSet<Instruction *, 4> DeadInstructions; 8599 collectTriviallyDeadInstructions(DeadInstructions); 8600 8601 // Add assume instructions we need to drop to DeadInstructions, to prevent 8602 // them from being added to the VPlan. 8603 // TODO: We only need to drop assumes in blocks that get flattend. If the 8604 // control flow is preserved, we should keep them. 8605 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8606 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8607 8608 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8609 // Dead instructions do not need sinking. Remove them from SinkAfter. 8610 for (Instruction *I : DeadInstructions) 8611 SinkAfter.erase(I); 8612 8613 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8614 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8615 VFRange SubRange = {VF, MaxVFPlusOne}; 8616 VPlans.push_back( 8617 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8618 VF = SubRange.End; 8619 } 8620 } 8621 8622 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8623 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8624 const DenseMap<Instruction *, Instruction *> &SinkAfter) { 8625 8626 // Hold a mapping from predicated instructions to their recipes, in order to 8627 // fix their AlsoPack behavior if a user is determined to replicate and use a 8628 // scalar instead of vector value. 8629 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 8630 8631 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8632 8633 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8634 8635 // --------------------------------------------------------------------------- 8636 // Pre-construction: record ingredients whose recipes we'll need to further 8637 // process after constructing the initial VPlan. 8638 // --------------------------------------------------------------------------- 8639 8640 // Mark instructions we'll need to sink later and their targets as 8641 // ingredients whose recipe we'll need to record. 8642 for (auto &Entry : SinkAfter) { 8643 RecipeBuilder.recordRecipeOf(Entry.first); 8644 RecipeBuilder.recordRecipeOf(Entry.second); 8645 } 8646 for (auto &Reduction : CM.getInLoopReductionChains()) { 8647 PHINode *Phi = Reduction.first; 8648 RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); 8649 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8650 8651 RecipeBuilder.recordRecipeOf(Phi); 8652 for (auto &R : ReductionOperations) { 8653 RecipeBuilder.recordRecipeOf(R); 8654 // For min/max reducitons, where we have a pair of icmp/select, we also 8655 // need to record the ICmp recipe, so it can be removed later. 8656 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8657 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8658 } 8659 } 8660 8661 // For each interleave group which is relevant for this (possibly trimmed) 8662 // Range, add it to the set of groups to be later applied to the VPlan and add 8663 // placeholders for its members' Recipes which we'll be replacing with a 8664 // single VPInterleaveRecipe. 8665 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8666 auto applyIG = [IG, this](ElementCount VF) -> bool { 8667 return (VF.isVector() && // Query is illegal for VF == 1 8668 CM.getWideningDecision(IG->getInsertPos(), VF) == 8669 LoopVectorizationCostModel::CM_Interleave); 8670 }; 8671 if (!getDecisionAndClampRange(applyIG, Range)) 8672 continue; 8673 InterleaveGroups.insert(IG); 8674 for (unsigned i = 0; i < IG->getFactor(); i++) 8675 if (Instruction *Member = IG->getMember(i)) 8676 RecipeBuilder.recordRecipeOf(Member); 8677 }; 8678 8679 // --------------------------------------------------------------------------- 8680 // Build initial VPlan: Scan the body of the loop in a topological order to 8681 // visit each basic block after having visited its predecessor basic blocks. 8682 // --------------------------------------------------------------------------- 8683 8684 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 8685 auto Plan = std::make_unique<VPlan>(); 8686 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 8687 Plan->setEntry(VPBB); 8688 8689 // Scan the body of the loop in a topological order to visit each basic block 8690 // after having visited its predecessor basic blocks. 8691 LoopBlocksDFS DFS(OrigLoop); 8692 DFS.perform(LI); 8693 8694 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8695 // Relevant instructions from basic block BB will be grouped into VPRecipe 8696 // ingredients and fill a new VPBasicBlock. 8697 unsigned VPBBsForBB = 0; 8698 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 8699 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 8700 VPBB = FirstVPBBForBB; 8701 Builder.setInsertPoint(VPBB); 8702 8703 // Introduce each ingredient into VPlan. 8704 // TODO: Model and preserve debug instrinsics in VPlan. 8705 for (Instruction &I : BB->instructionsWithoutDebug()) { 8706 Instruction *Instr = &I; 8707 8708 // First filter out irrelevant instructions, to ensure no recipes are 8709 // built for them. 8710 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 8711 continue; 8712 8713 if (auto Recipe = 8714 RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) { 8715 for (auto *Def : Recipe->definedValues()) { 8716 auto *UV = Def->getUnderlyingValue(); 8717 Plan->addVPValue(UV, Def); 8718 } 8719 8720 RecipeBuilder.setRecipe(Instr, Recipe); 8721 VPBB->appendRecipe(Recipe); 8722 continue; 8723 } 8724 8725 // Otherwise, if all widening options failed, Instruction is to be 8726 // replicated. This may create a successor for VPBB. 8727 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication( 8728 Instr, Range, VPBB, PredInst2Recipe, Plan); 8729 if (NextVPBB != VPBB) { 8730 VPBB = NextVPBB; 8731 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8732 : ""); 8733 } 8734 } 8735 } 8736 8737 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 8738 // may also be empty, such as the last one VPBB, reflecting original 8739 // basic-blocks with no recipes. 8740 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 8741 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 8742 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 8743 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 8744 delete PreEntry; 8745 8746 // --------------------------------------------------------------------------- 8747 // Transform initial VPlan: Apply previously taken decisions, in order, to 8748 // bring the VPlan to its final state. 8749 // --------------------------------------------------------------------------- 8750 8751 // Apply Sink-After legal constraints. 8752 for (auto &Entry : SinkAfter) { 8753 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 8754 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 8755 // If the target is in a replication region, make sure to move Sink to the 8756 // block after it, not into the replication region itself. 8757 if (auto *Region = 8758 dyn_cast_or_null<VPRegionBlock>(Target->getParent()->getParent())) { 8759 if (Region->isReplicator()) { 8760 assert(Region->getNumSuccessors() == 1 && "Expected SESE region!"); 8761 VPBasicBlock *NextBlock = 8762 cast<VPBasicBlock>(Region->getSuccessors().front()); 8763 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 8764 continue; 8765 } 8766 } 8767 Sink->moveAfter(Target); 8768 } 8769 8770 // Interleave memory: for each Interleave Group we marked earlier as relevant 8771 // for this VPlan, replace the Recipes widening its memory instructions with a 8772 // single VPInterleaveRecipe at its insertion point. 8773 for (auto IG : InterleaveGroups) { 8774 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 8775 RecipeBuilder.getRecipe(IG->getInsertPos())); 8776 SmallVector<VPValue *, 4> StoredValues; 8777 for (unsigned i = 0; i < IG->getFactor(); ++i) 8778 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) 8779 StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0))); 8780 8781 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 8782 Recipe->getMask()); 8783 VPIG->insertBefore(Recipe); 8784 unsigned J = 0; 8785 for (unsigned i = 0; i < IG->getFactor(); ++i) 8786 if (Instruction *Member = IG->getMember(i)) { 8787 if (!Member->getType()->isVoidTy()) { 8788 VPValue *OriginalV = Plan->getVPValue(Member); 8789 Plan->removeVPValueFor(Member); 8790 Plan->addVPValue(Member, VPIG->getVPValue(J)); 8791 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 8792 J++; 8793 } 8794 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 8795 } 8796 } 8797 8798 // Adjust the recipes for any inloop reductions. 8799 if (Range.Start.isVector()) 8800 adjustRecipesForInLoopReductions(Plan, RecipeBuilder); 8801 8802 // Finally, if tail is folded by masking, introduce selects between the phi 8803 // and the live-out instruction of each reduction, at the end of the latch. 8804 if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) { 8805 Builder.setInsertPoint(VPBB); 8806 auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 8807 for (auto &Reduction : Legal->getReductionVars()) { 8808 if (CM.isInLoopReduction(Reduction.first)) 8809 continue; 8810 VPValue *Phi = Plan->getOrAddVPValue(Reduction.first); 8811 VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr()); 8812 Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi}); 8813 } 8814 } 8815 8816 std::string PlanName; 8817 raw_string_ostream RSO(PlanName); 8818 ElementCount VF = Range.Start; 8819 Plan->addVF(VF); 8820 RSO << "Initial VPlan for VF={" << VF; 8821 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 8822 Plan->addVF(VF); 8823 RSO << "," << VF; 8824 } 8825 RSO << "},UF>=1"; 8826 RSO.flush(); 8827 Plan->setName(PlanName); 8828 8829 return Plan; 8830 } 8831 8832 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 8833 // Outer loop handling: They may require CFG and instruction level 8834 // transformations before even evaluating whether vectorization is profitable. 8835 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 8836 // the vectorization pipeline. 8837 assert(!OrigLoop->isInnermost()); 8838 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 8839 8840 // Create new empty VPlan 8841 auto Plan = std::make_unique<VPlan>(); 8842 8843 // Build hierarchical CFG 8844 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 8845 HCFGBuilder.buildHierarchicalCFG(); 8846 8847 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 8848 VF *= 2) 8849 Plan->addVF(VF); 8850 8851 if (EnableVPlanPredication) { 8852 VPlanPredicator VPP(*Plan); 8853 VPP.predicate(); 8854 8855 // Avoid running transformation to recipes until masked code generation in 8856 // VPlan-native path is in place. 8857 return Plan; 8858 } 8859 8860 SmallPtrSet<Instruction *, 1> DeadInstructions; 8861 VPlanTransforms::VPInstructionsToVPRecipes( 8862 OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions); 8863 return Plan; 8864 } 8865 8866 // Adjust the recipes for any inloop reductions. The chain of instructions 8867 // leading from the loop exit instr to the phi need to be converted to 8868 // reductions, with one operand being vector and the other being the scalar 8869 // reduction chain. 8870 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions( 8871 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) { 8872 for (auto &Reduction : CM.getInLoopReductionChains()) { 8873 PHINode *Phi = Reduction.first; 8874 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 8875 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8876 8877 // ReductionOperations are orders top-down from the phi's use to the 8878 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 8879 // which of the two operands will remain scalar and which will be reduced. 8880 // For minmax the chain will be the select instructions. 8881 Instruction *Chain = Phi; 8882 for (Instruction *R : ReductionOperations) { 8883 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 8884 RecurKind Kind = RdxDesc.getRecurrenceKind(); 8885 8886 VPValue *ChainOp = Plan->getVPValue(Chain); 8887 unsigned FirstOpId; 8888 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 8889 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 8890 "Expected to replace a VPWidenSelectSC"); 8891 FirstOpId = 1; 8892 } else { 8893 assert(isa<VPWidenRecipe>(WidenRecipe) && 8894 "Expected to replace a VPWidenSC"); 8895 FirstOpId = 0; 8896 } 8897 unsigned VecOpId = 8898 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 8899 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 8900 8901 auto *CondOp = CM.foldTailByMasking() 8902 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 8903 : nullptr; 8904 VPReductionRecipe *RedRecipe = new VPReductionRecipe( 8905 &RdxDesc, R, ChainOp, VecOp, CondOp, Legal->hasFunNoNaNAttr(), TTI); 8906 WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe); 8907 Plan->removeVPValueFor(R); 8908 Plan->addVPValue(R, RedRecipe); 8909 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 8910 WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe); 8911 WidenRecipe->eraseFromParent(); 8912 8913 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 8914 VPRecipeBase *CompareRecipe = 8915 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 8916 assert(isa<VPWidenRecipe>(CompareRecipe) && 8917 "Expected to replace a VPWidenSC"); 8918 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 8919 "Expected no remaining users"); 8920 CompareRecipe->eraseFromParent(); 8921 } 8922 Chain = R; 8923 } 8924 } 8925 } 8926 8927 Value* LoopVectorizationPlanner::VPCallbackILV:: 8928 getOrCreateVectorValues(Value *V, unsigned Part) { 8929 return ILV.getOrCreateVectorValue(V, Part); 8930 } 8931 8932 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue( 8933 Value *V, const VPIteration &Instance) { 8934 return ILV.getOrCreateScalarValue(V, Instance); 8935 } 8936 8937 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 8938 VPSlotTracker &SlotTracker) const { 8939 O << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 8940 IG->getInsertPos()->printAsOperand(O, false); 8941 O << ", "; 8942 getAddr()->printAsOperand(O, SlotTracker); 8943 VPValue *Mask = getMask(); 8944 if (Mask) { 8945 O << ", "; 8946 Mask->printAsOperand(O, SlotTracker); 8947 } 8948 for (unsigned i = 0; i < IG->getFactor(); ++i) 8949 if (Instruction *I = IG->getMember(i)) 8950 O << "\\l\" +\n" << Indent << "\" " << VPlanIngredient(I) << " " << i; 8951 } 8952 8953 void VPWidenCallRecipe::execute(VPTransformState &State) { 8954 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 8955 *this, State); 8956 } 8957 8958 void VPWidenSelectRecipe::execute(VPTransformState &State) { 8959 State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), 8960 this, *this, InvariantCond, State); 8961 } 8962 8963 void VPWidenRecipe::execute(VPTransformState &State) { 8964 State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State); 8965 } 8966 8967 void VPWidenGEPRecipe::execute(VPTransformState &State) { 8968 State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, 8969 *this, State.UF, State.VF, IsPtrLoopInvariant, 8970 IsIndexLoopInvariant, State); 8971 } 8972 8973 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 8974 assert(!State.Instance && "Int or FP induction being replicated."); 8975 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 8976 Trunc); 8977 } 8978 8979 void VPWidenPHIRecipe::execute(VPTransformState &State) { 8980 Value *StartV = 8981 getStartValue() ? getStartValue()->getLiveInIRValue() : nullptr; 8982 State.ILV->widenPHIInstruction(Phi, RdxDesc, StartV, State.UF, State.VF); 8983 } 8984 8985 void VPBlendRecipe::execute(VPTransformState &State) { 8986 State.ILV->setDebugLocFromInst(State.Builder, Phi); 8987 // We know that all PHIs in non-header blocks are converted into 8988 // selects, so we don't have to worry about the insertion order and we 8989 // can just use the builder. 8990 // At this point we generate the predication tree. There may be 8991 // duplications since this is a simple recursive scan, but future 8992 // optimizations will clean it up. 8993 8994 unsigned NumIncoming = getNumIncomingValues(); 8995 8996 // Generate a sequence of selects of the form: 8997 // SELECT(Mask3, In3, 8998 // SELECT(Mask2, In2, 8999 // SELECT(Mask1, In1, 9000 // In0))) 9001 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9002 // are essentially undef are taken from In0. 9003 InnerLoopVectorizer::VectorParts Entry(State.UF); 9004 for (unsigned In = 0; In < NumIncoming; ++In) { 9005 for (unsigned Part = 0; Part < State.UF; ++Part) { 9006 // We might have single edge PHIs (blocks) - use an identity 9007 // 'select' for the first PHI operand. 9008 Value *In0 = State.get(getIncomingValue(In), Part); 9009 if (In == 0) 9010 Entry[Part] = In0; // Initialize with the first incoming value. 9011 else { 9012 // Select between the current value and the previous incoming edge 9013 // based on the incoming mask. 9014 Value *Cond = State.get(getMask(In), Part); 9015 Entry[Part] = 9016 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9017 } 9018 } 9019 } 9020 for (unsigned Part = 0; Part < State.UF; ++Part) 9021 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 9022 } 9023 9024 void VPInterleaveRecipe::execute(VPTransformState &State) { 9025 assert(!State.Instance && "Interleave group being replicated."); 9026 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9027 getStoredValues(), getMask()); 9028 } 9029 9030 void VPReductionRecipe::execute(VPTransformState &State) { 9031 assert(!State.Instance && "Reduction being replicated."); 9032 for (unsigned Part = 0; Part < State.UF; ++Part) { 9033 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9034 Value *NewVecOp = State.get(getVecOp(), Part); 9035 if (VPValue *Cond = getCondOp()) { 9036 Value *NewCond = State.get(Cond, Part); 9037 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9038 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 9039 Kind, VecTy->getElementType()); 9040 Constant *IdenVec = 9041 ConstantVector::getSplat(VecTy->getElementCount(), Iden); 9042 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9043 NewVecOp = Select; 9044 } 9045 Value *NewRed = 9046 createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9047 Value *PrevInChain = State.get(getChainOp(), Part); 9048 Value *NextInChain; 9049 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9050 NextInChain = 9051 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9052 NewRed, PrevInChain); 9053 } else { 9054 NextInChain = State.Builder.CreateBinOp( 9055 (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed, 9056 PrevInChain); 9057 } 9058 State.set(this, getUnderlyingInstr(), NextInChain, Part); 9059 } 9060 } 9061 9062 void VPReplicateRecipe::execute(VPTransformState &State) { 9063 if (State.Instance) { // Generate a single instance. 9064 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9065 State.ILV->scalarizeInstruction(getUnderlyingInstr(), *this, 9066 *State.Instance, IsPredicated, State); 9067 // Insert scalar instance packing it into a vector. 9068 if (AlsoPack && State.VF.isVector()) { 9069 // If we're constructing lane 0, initialize to start from poison. 9070 if (State.Instance->Lane == 0) { 9071 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9072 Value *Poison = PoisonValue::get( 9073 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9074 State.ValueMap.setVectorValue(getUnderlyingInstr(), 9075 State.Instance->Part, Poison); 9076 } 9077 State.ILV->packScalarIntoVectorValue(getUnderlyingInstr(), 9078 *State.Instance); 9079 } 9080 return; 9081 } 9082 9083 // Generate scalar instances for all VF lanes of all UF parts, unless the 9084 // instruction is uniform inwhich case generate only the first lane for each 9085 // of the UF parts. 9086 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9087 assert((!State.VF.isScalable() || IsUniform) && 9088 "Can't scalarize a scalable vector"); 9089 for (unsigned Part = 0; Part < State.UF; ++Part) 9090 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9091 State.ILV->scalarizeInstruction(getUnderlyingInstr(), *this, {Part, Lane}, 9092 IsPredicated, State); 9093 } 9094 9095 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9096 assert(State.Instance && "Branch on Mask works only on single instance."); 9097 9098 unsigned Part = State.Instance->Part; 9099 unsigned Lane = State.Instance->Lane; 9100 9101 Value *ConditionBit = nullptr; 9102 VPValue *BlockInMask = getMask(); 9103 if (BlockInMask) { 9104 ConditionBit = State.get(BlockInMask, Part); 9105 if (ConditionBit->getType()->isVectorTy()) 9106 ConditionBit = State.Builder.CreateExtractElement( 9107 ConditionBit, State.Builder.getInt32(Lane)); 9108 } else // Block in mask is all-one. 9109 ConditionBit = State.Builder.getTrue(); 9110 9111 // Replace the temporary unreachable terminator with a new conditional branch, 9112 // whose two destinations will be set later when they are created. 9113 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9114 assert(isa<UnreachableInst>(CurrentTerminator) && 9115 "Expected to replace unreachable terminator with conditional branch."); 9116 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9117 CondBr->setSuccessor(0, nullptr); 9118 ReplaceInstWithInst(CurrentTerminator, CondBr); 9119 } 9120 9121 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9122 assert(State.Instance && "Predicated instruction PHI works per instance."); 9123 Instruction *ScalarPredInst = 9124 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9125 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9126 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9127 assert(PredicatingBB && "Predicated block has no single predecessor."); 9128 9129 // By current pack/unpack logic we need to generate only a single phi node: if 9130 // a vector value for the predicated instruction exists at this point it means 9131 // the instruction has vector users only, and a phi for the vector value is 9132 // needed. In this case the recipe of the predicated instruction is marked to 9133 // also do that packing, thereby "hoisting" the insert-element sequence. 9134 // Otherwise, a phi node for the scalar value is needed. 9135 unsigned Part = State.Instance->Part; 9136 Instruction *PredInst = 9137 cast<Instruction>(getOperand(0)->getUnderlyingValue()); 9138 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 9139 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 9140 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9141 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9142 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9143 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9144 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 9145 } else { 9146 Type *PredInstType = PredInst->getType(); 9147 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9148 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), PredicatingBB); 9149 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9150 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 9151 } 9152 } 9153 9154 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9155 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9156 State.ILV->vectorizeMemoryInstruction(&Ingredient, State, 9157 StoredValue ? nullptr : getVPValue(), 9158 getAddr(), StoredValue, getMask()); 9159 } 9160 9161 // Determine how to lower the scalar epilogue, which depends on 1) optimising 9162 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 9163 // predication, and 4) a TTI hook that analyses whether the loop is suitable 9164 // for predication. 9165 static ScalarEpilogueLowering getScalarEpilogueLowering( 9166 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 9167 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 9168 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 9169 LoopVectorizationLegality &LVL) { 9170 // 1) OptSize takes precedence over all other options, i.e. if this is set, 9171 // don't look at hints or options, and don't request a scalar epilogue. 9172 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 9173 // LoopAccessInfo (due to code dependency and not being able to reliably get 9174 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 9175 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 9176 // versioning when the vectorization is forced, unlike hasOptSize. So revert 9177 // back to the old way and vectorize with versioning when forced. See D81345.) 9178 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 9179 PGSOQueryType::IRPass) && 9180 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 9181 return CM_ScalarEpilogueNotAllowedOptSize; 9182 9183 // 2) If set, obey the directives 9184 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 9185 switch (PreferPredicateOverEpilogue) { 9186 case PreferPredicateTy::ScalarEpilogue: 9187 return CM_ScalarEpilogueAllowed; 9188 case PreferPredicateTy::PredicateElseScalarEpilogue: 9189 return CM_ScalarEpilogueNotNeededUsePredicate; 9190 case PreferPredicateTy::PredicateOrDontVectorize: 9191 return CM_ScalarEpilogueNotAllowedUsePredicate; 9192 }; 9193 } 9194 9195 // 3) If set, obey the hints 9196 switch (Hints.getPredicate()) { 9197 case LoopVectorizeHints::FK_Enabled: 9198 return CM_ScalarEpilogueNotNeededUsePredicate; 9199 case LoopVectorizeHints::FK_Disabled: 9200 return CM_ScalarEpilogueAllowed; 9201 }; 9202 9203 // 4) if the TTI hook indicates this is profitable, request predication. 9204 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 9205 LVL.getLAI())) 9206 return CM_ScalarEpilogueNotNeededUsePredicate; 9207 9208 return CM_ScalarEpilogueAllowed; 9209 } 9210 9211 void VPTransformState::set(VPValue *Def, Value *IRDef, Value *V, 9212 unsigned Part) { 9213 set(Def, V, Part); 9214 ILV->setVectorValue(IRDef, Part, V); 9215 } 9216 9217 // Process the loop in the VPlan-native vectorization path. This path builds 9218 // VPlan upfront in the vectorization pipeline, which allows to apply 9219 // VPlan-to-VPlan transformations from the very beginning without modifying the 9220 // input LLVM IR. 9221 static bool processLoopInVPlanNativePath( 9222 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 9223 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 9224 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 9225 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 9226 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) { 9227 9228 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 9229 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 9230 return false; 9231 } 9232 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 9233 Function *F = L->getHeader()->getParent(); 9234 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 9235 9236 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9237 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 9238 9239 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 9240 &Hints, IAI); 9241 // Use the planner for outer loop vectorization. 9242 // TODO: CM is not used at this point inside the planner. Turn CM into an 9243 // optional argument if we don't need it in the future. 9244 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE); 9245 9246 // Get user vectorization factor. 9247 ElementCount UserVF = Hints.getWidth(); 9248 9249 // Plan how to best vectorize, return the best VF and its cost. 9250 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 9251 9252 // If we are stress testing VPlan builds, do not attempt to generate vector 9253 // code. Masked vector code generation support will follow soon. 9254 // Also, do not attempt to vectorize if no vector code will be produced. 9255 if (VPlanBuildStressTest || EnableVPlanPredication || 9256 VectorizationFactor::Disabled() == VF) 9257 return false; 9258 9259 LVP.setBestPlan(VF.Width, 1); 9260 9261 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 9262 &CM, BFI, PSI); 9263 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 9264 << L->getHeader()->getParent()->getName() << "\"\n"); 9265 LVP.executePlan(LB, DT); 9266 9267 // Mark the loop as already vectorized to avoid vectorizing again. 9268 Hints.setAlreadyVectorized(); 9269 9270 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9271 return true; 9272 } 9273 9274 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 9275 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 9276 !EnableLoopInterleaving), 9277 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 9278 !EnableLoopVectorization) {} 9279 9280 bool LoopVectorizePass::processLoop(Loop *L) { 9281 assert((EnableVPlanNativePath || L->isInnermost()) && 9282 "VPlan-native path is not enabled. Only process inner loops."); 9283 9284 #ifndef NDEBUG 9285 const std::string DebugLocStr = getDebugLocString(L); 9286 #endif /* NDEBUG */ 9287 9288 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 9289 << L->getHeader()->getParent()->getName() << "\" from " 9290 << DebugLocStr << "\n"); 9291 9292 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 9293 9294 LLVM_DEBUG( 9295 dbgs() << "LV: Loop hints:" 9296 << " force=" 9297 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 9298 ? "disabled" 9299 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 9300 ? "enabled" 9301 : "?")) 9302 << " width=" << Hints.getWidth() 9303 << " unroll=" << Hints.getInterleave() << "\n"); 9304 9305 // Function containing loop 9306 Function *F = L->getHeader()->getParent(); 9307 9308 // Looking at the diagnostic output is the only way to determine if a loop 9309 // was vectorized (other than looking at the IR or machine code), so it 9310 // is important to generate an optimization remark for each loop. Most of 9311 // these messages are generated as OptimizationRemarkAnalysis. Remarks 9312 // generated as OptimizationRemark and OptimizationRemarkMissed are 9313 // less verbose reporting vectorized loops and unvectorized loops that may 9314 // benefit from vectorization, respectively. 9315 9316 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 9317 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 9318 return false; 9319 } 9320 9321 PredicatedScalarEvolution PSE(*SE, *L); 9322 9323 // Check if it is legal to vectorize the loop. 9324 LoopVectorizationRequirements Requirements(*ORE); 9325 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 9326 &Requirements, &Hints, DB, AC, BFI, PSI); 9327 if (!LVL.canVectorize(EnableVPlanNativePath)) { 9328 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 9329 Hints.emitRemarkWithHints(); 9330 return false; 9331 } 9332 9333 // Check the function attributes and profiles to find out if this function 9334 // should be optimized for size. 9335 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9336 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 9337 9338 // Entrance to the VPlan-native vectorization path. Outer loops are processed 9339 // here. They may require CFG and instruction level transformations before 9340 // even evaluating whether vectorization is profitable. Since we cannot modify 9341 // the incoming IR, we need to build VPlan upfront in the vectorization 9342 // pipeline. 9343 if (!L->isInnermost()) 9344 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 9345 ORE, BFI, PSI, Hints); 9346 9347 assert(L->isInnermost() && "Inner loop expected."); 9348 9349 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 9350 // count by optimizing for size, to minimize overheads. 9351 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 9352 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 9353 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 9354 << "This loop is worth vectorizing only if no scalar " 9355 << "iteration overheads are incurred."); 9356 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 9357 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 9358 else { 9359 LLVM_DEBUG(dbgs() << "\n"); 9360 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 9361 } 9362 } 9363 9364 // Check the function attributes to see if implicit floats are allowed. 9365 // FIXME: This check doesn't seem possibly correct -- what if the loop is 9366 // an integer loop and the vector instructions selected are purely integer 9367 // vector instructions? 9368 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 9369 reportVectorizationFailure( 9370 "Can't vectorize when the NoImplicitFloat attribute is used", 9371 "loop not vectorized due to NoImplicitFloat attribute", 9372 "NoImplicitFloat", ORE, L); 9373 Hints.emitRemarkWithHints(); 9374 return false; 9375 } 9376 9377 // Check if the target supports potentially unsafe FP vectorization. 9378 // FIXME: Add a check for the type of safety issue (denormal, signaling) 9379 // for the target we're vectorizing for, to make sure none of the 9380 // additional fp-math flags can help. 9381 if (Hints.isPotentiallyUnsafe() && 9382 TTI->isFPVectorizationPotentiallyUnsafe()) { 9383 reportVectorizationFailure( 9384 "Potentially unsafe FP op prevents vectorization", 9385 "loop not vectorized due to unsafe FP support.", 9386 "UnsafeFP", ORE, L); 9387 Hints.emitRemarkWithHints(); 9388 return false; 9389 } 9390 9391 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 9392 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 9393 9394 // If an override option has been passed in for interleaved accesses, use it. 9395 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 9396 UseInterleaved = EnableInterleavedMemAccesses; 9397 9398 // Analyze interleaved memory accesses. 9399 if (UseInterleaved) { 9400 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 9401 } 9402 9403 // Use the cost model. 9404 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 9405 F, &Hints, IAI); 9406 CM.collectValuesToIgnore(); 9407 9408 // Use the planner for vectorization. 9409 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE); 9410 9411 // Get user vectorization factor and interleave count. 9412 ElementCount UserVF = Hints.getWidth(); 9413 unsigned UserIC = Hints.getInterleave(); 9414 9415 // Plan how to best vectorize, return the best VF and its cost. 9416 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 9417 9418 VectorizationFactor VF = VectorizationFactor::Disabled(); 9419 unsigned IC = 1; 9420 9421 if (MaybeVF) { 9422 VF = *MaybeVF; 9423 // Select the interleave count. 9424 IC = CM.selectInterleaveCount(VF.Width, VF.Cost); 9425 } 9426 9427 // Identify the diagnostic messages that should be produced. 9428 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 9429 bool VectorizeLoop = true, InterleaveLoop = true; 9430 if (Requirements.doesNotMeet(F, L, Hints)) { 9431 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 9432 "requirements.\n"); 9433 Hints.emitRemarkWithHints(); 9434 return false; 9435 } 9436 9437 if (VF.Width.isScalar()) { 9438 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 9439 VecDiagMsg = std::make_pair( 9440 "VectorizationNotBeneficial", 9441 "the cost-model indicates that vectorization is not beneficial"); 9442 VectorizeLoop = false; 9443 } 9444 9445 if (!MaybeVF && UserIC > 1) { 9446 // Tell the user interleaving was avoided up-front, despite being explicitly 9447 // requested. 9448 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 9449 "interleaving should be avoided up front\n"); 9450 IntDiagMsg = std::make_pair( 9451 "InterleavingAvoided", 9452 "Ignoring UserIC, because interleaving was avoided up front"); 9453 InterleaveLoop = false; 9454 } else if (IC == 1 && UserIC <= 1) { 9455 // Tell the user interleaving is not beneficial. 9456 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 9457 IntDiagMsg = std::make_pair( 9458 "InterleavingNotBeneficial", 9459 "the cost-model indicates that interleaving is not beneficial"); 9460 InterleaveLoop = false; 9461 if (UserIC == 1) { 9462 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 9463 IntDiagMsg.second += 9464 " and is explicitly disabled or interleave count is set to 1"; 9465 } 9466 } else if (IC > 1 && UserIC == 1) { 9467 // Tell the user interleaving is beneficial, but it explicitly disabled. 9468 LLVM_DEBUG( 9469 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 9470 IntDiagMsg = std::make_pair( 9471 "InterleavingBeneficialButDisabled", 9472 "the cost-model indicates that interleaving is beneficial " 9473 "but is explicitly disabled or interleave count is set to 1"); 9474 InterleaveLoop = false; 9475 } 9476 9477 // Override IC if user provided an interleave count. 9478 IC = UserIC > 0 ? UserIC : IC; 9479 9480 // Emit diagnostic messages, if any. 9481 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 9482 if (!VectorizeLoop && !InterleaveLoop) { 9483 // Do not vectorize or interleaving the loop. 9484 ORE->emit([&]() { 9485 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 9486 L->getStartLoc(), L->getHeader()) 9487 << VecDiagMsg.second; 9488 }); 9489 ORE->emit([&]() { 9490 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 9491 L->getStartLoc(), L->getHeader()) 9492 << IntDiagMsg.second; 9493 }); 9494 return false; 9495 } else if (!VectorizeLoop && InterleaveLoop) { 9496 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 9497 ORE->emit([&]() { 9498 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 9499 L->getStartLoc(), L->getHeader()) 9500 << VecDiagMsg.second; 9501 }); 9502 } else if (VectorizeLoop && !InterleaveLoop) { 9503 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 9504 << ") in " << DebugLocStr << '\n'); 9505 ORE->emit([&]() { 9506 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 9507 L->getStartLoc(), L->getHeader()) 9508 << IntDiagMsg.second; 9509 }); 9510 } else if (VectorizeLoop && InterleaveLoop) { 9511 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 9512 << ") in " << DebugLocStr << '\n'); 9513 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 9514 } 9515 9516 LVP.setBestPlan(VF.Width, IC); 9517 9518 using namespace ore; 9519 bool DisableRuntimeUnroll = false; 9520 MDNode *OrigLoopID = L->getLoopID(); 9521 9522 if (!VectorizeLoop) { 9523 assert(IC > 1 && "interleave count should not be 1 or 0"); 9524 // If we decided that it is not legal to vectorize the loop, then 9525 // interleave it. 9526 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, &CM, 9527 BFI, PSI); 9528 LVP.executePlan(Unroller, DT); 9529 9530 ORE->emit([&]() { 9531 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 9532 L->getHeader()) 9533 << "interleaved loop (interleaved count: " 9534 << NV("InterleaveCount", IC) << ")"; 9535 }); 9536 } else { 9537 // If we decided that it is *legal* to vectorize the loop, then do it. 9538 9539 // Consider vectorizing the epilogue too if it's profitable. 9540 VectorizationFactor EpilogueVF = 9541 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 9542 if (EpilogueVF.Width.isVector()) { 9543 9544 // The first pass vectorizes the main loop and creates a scalar epilogue 9545 // to be vectorized by executing the plan (potentially with a different 9546 // factor) again shortly afterwards. 9547 EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC, 9548 EpilogueVF.Width.getKnownMinValue(), 1); 9549 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, EPI, 9550 &LVL, &CM, BFI, PSI); 9551 9552 LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF); 9553 LVP.executePlan(MainILV, DT); 9554 ++LoopsVectorized; 9555 9556 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 9557 formLCSSARecursively(*L, *DT, LI, SE); 9558 9559 // Second pass vectorizes the epilogue and adjusts the control flow 9560 // edges from the first pass. 9561 LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF); 9562 EPI.MainLoopVF = EPI.EpilogueVF; 9563 EPI.MainLoopUF = EPI.EpilogueUF; 9564 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 9565 ORE, EPI, &LVL, &CM, BFI, PSI); 9566 LVP.executePlan(EpilogILV, DT); 9567 ++LoopsEpilogueVectorized; 9568 9569 if (!MainILV.areSafetyChecksAdded()) 9570 DisableRuntimeUnroll = true; 9571 } else { 9572 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 9573 &LVL, &CM, BFI, PSI); 9574 LVP.executePlan(LB, DT); 9575 ++LoopsVectorized; 9576 9577 // Add metadata to disable runtime unrolling a scalar loop when there are 9578 // no runtime checks about strides and memory. A scalar loop that is 9579 // rarely used is not worth unrolling. 9580 if (!LB.areSafetyChecksAdded()) 9581 DisableRuntimeUnroll = true; 9582 } 9583 9584 // Report the vectorization decision. 9585 ORE->emit([&]() { 9586 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 9587 L->getHeader()) 9588 << "vectorized loop (vectorization width: " 9589 << NV("VectorizationFactor", VF.Width) 9590 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 9591 }); 9592 } 9593 9594 Optional<MDNode *> RemainderLoopID = 9595 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 9596 LLVMLoopVectorizeFollowupEpilogue}); 9597 if (RemainderLoopID.hasValue()) { 9598 L->setLoopID(RemainderLoopID.getValue()); 9599 } else { 9600 if (DisableRuntimeUnroll) 9601 AddRuntimeUnrollDisableMetaData(L); 9602 9603 // Mark the loop as already vectorized to avoid vectorizing again. 9604 Hints.setAlreadyVectorized(); 9605 } 9606 9607 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9608 return true; 9609 } 9610 9611 LoopVectorizeResult LoopVectorizePass::runImpl( 9612 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 9613 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 9614 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 9615 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 9616 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 9617 SE = &SE_; 9618 LI = &LI_; 9619 TTI = &TTI_; 9620 DT = &DT_; 9621 BFI = &BFI_; 9622 TLI = TLI_; 9623 AA = &AA_; 9624 AC = &AC_; 9625 GetLAA = &GetLAA_; 9626 DB = &DB_; 9627 ORE = &ORE_; 9628 PSI = PSI_; 9629 9630 // Don't attempt if 9631 // 1. the target claims to have no vector registers, and 9632 // 2. interleaving won't help ILP. 9633 // 9634 // The second condition is necessary because, even if the target has no 9635 // vector registers, loop vectorization may still enable scalar 9636 // interleaving. 9637 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 9638 TTI->getMaxInterleaveFactor(1) < 2) 9639 return LoopVectorizeResult(false, false); 9640 9641 bool Changed = false, CFGChanged = false; 9642 9643 // The vectorizer requires loops to be in simplified form. 9644 // Since simplification may add new inner loops, it has to run before the 9645 // legality and profitability checks. This means running the loop vectorizer 9646 // will simplify all loops, regardless of whether anything end up being 9647 // vectorized. 9648 for (auto &L : *LI) 9649 Changed |= CFGChanged |= 9650 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 9651 9652 // Build up a worklist of inner-loops to vectorize. This is necessary as 9653 // the act of vectorizing or partially unrolling a loop creates new loops 9654 // and can invalidate iterators across the loops. 9655 SmallVector<Loop *, 8> Worklist; 9656 9657 for (Loop *L : *LI) 9658 collectSupportedLoops(*L, LI, ORE, Worklist); 9659 9660 LoopsAnalyzed += Worklist.size(); 9661 9662 // Now walk the identified inner loops. 9663 while (!Worklist.empty()) { 9664 Loop *L = Worklist.pop_back_val(); 9665 9666 // For the inner loops we actually process, form LCSSA to simplify the 9667 // transform. 9668 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 9669 9670 Changed |= CFGChanged |= processLoop(L); 9671 } 9672 9673 // Process each loop nest in the function. 9674 return LoopVectorizeResult(Changed, CFGChanged); 9675 } 9676 9677 PreservedAnalyses LoopVectorizePass::run(Function &F, 9678 FunctionAnalysisManager &AM) { 9679 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 9680 auto &LI = AM.getResult<LoopAnalysis>(F); 9681 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 9682 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 9683 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 9684 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 9685 auto &AA = AM.getResult<AAManager>(F); 9686 auto &AC = AM.getResult<AssumptionAnalysis>(F); 9687 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 9688 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 9689 MemorySSA *MSSA = EnableMSSALoopDependency 9690 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 9691 : nullptr; 9692 9693 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 9694 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 9695 [&](Loop &L) -> const LoopAccessInfo & { 9696 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 9697 TLI, TTI, nullptr, MSSA}; 9698 return LAM.getResult<LoopAccessAnalysis>(L, AR); 9699 }; 9700 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 9701 ProfileSummaryInfo *PSI = 9702 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 9703 LoopVectorizeResult Result = 9704 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 9705 if (!Result.MadeAnyChange) 9706 return PreservedAnalyses::all(); 9707 PreservedAnalyses PA; 9708 9709 // We currently do not preserve loopinfo/dominator analyses with outer loop 9710 // vectorization. Until this is addressed, mark these analyses as preserved 9711 // only for non-VPlan-native path. 9712 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 9713 if (!EnableVPlanNativePath) { 9714 PA.preserve<LoopAnalysis>(); 9715 PA.preserve<DominatorTreeAnalysis>(); 9716 } 9717 PA.preserve<BasicAA>(); 9718 PA.preserve<GlobalsAA>(); 9719 if (!Result.MadeCFGChange) 9720 PA.preserveSet<CFGAnalyses>(); 9721 return PA; 9722 } 9723